diff --git a/BUILD.gn b/BUILD.gn index fee6ad88af7026b60e5ae697a343153985905969..e5a514c74baee27cbfe7c835e2b3660e26920274 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -145,7 +145,10 @@ group("ark_unittest") { ] if (!run_with_asan) { if (!(ark_standalone_build && current_os == "ohos")) { - deps += [ "ecmascript/compiler/tests:host_unittest" ] + deps += [ + "ecmascript/compiler/tests:host_unittest", + "ecmascript/ohos/tests:host_unittest", + ] } } } @@ -182,13 +185,22 @@ group("ark_runtime_host_unittest") { deps = [] if (host_os != "mac") { # js unittest - deps += [ ":ark_unittest" ] + deps += [ + ":ark_unittest", + "$js_root/test/pgotypeinfer:ark_pgotypeinfer_test", + ] # js bytecode test deps += [ "$js_root/test/moduletest:ark_js_moduletest" ] # quickfix test deps += [ "$js_root/test/quickfix:ark_quickfix_test" ] + if (!ark_standalone_build && run_regress_test) { + deps += [ "$js_root/test/regresstest:ark_regress_test" ] + } + + # execution test + deps += [ "$js_root/test/executiontest:ark_execution_test" ] # ts aot test and asm test if (!run_with_asan) { @@ -376,8 +388,11 @@ config("ark_jsruntime_common_config") { } } - if (!is_mac && target_os != "ios" && !use_libfuzzer && - !(ark_standalone_build && !enable_lto)) { + if (is_ohos) { + defines += [ "PANDA_TARGET_OHOS" ] + } + + if (!is_mac && target_os != "ios" && !use_libfuzzer && !enable_lto_O0) { cflags_cc += [ "-flto=thin" ] ldflags += [ "-flto=thin" ] } @@ -454,7 +469,7 @@ config("ark_jsruntime_common_config") { } } - if (enable_leak_check) { + if (enable_leak_check || is_asan) { defines += [ "ECMASCRIPT_ENABLE_HANDLE_LEAK_CHECK" ] } @@ -466,12 +481,27 @@ config("ark_jsruntime_common_config") { defines += [ "HOOK_ENABLE" ] } } + + # is_asan: skynet config; run_with_asan: est_runtime enable asan config + if (is_asan) { + defines += [ "ECMASCRIPT_ENABLE_DFX_CONFIG" ] + } + + code_encrypto_enable = true + if (defined(global_parts_info) && + !defined(global_parts_info.security_code_crypto_metadata_process)) { + code_encrypto_enable = false + } + if (code_encrypto_enable) { + defines += [ "CODE_ENCRYPTION_ENABLE" ] + } } # ecmascript unit testcase config config("ecma_test_config") { visibility = [ "./ecmascript/*", + "./test/executiontest/*", "./test/fuzztest/*", ] @@ -480,7 +510,7 @@ config("ecma_test_config") { "$js_root:ark_jsruntime_common_config", ] - if (enable_leak_check) { + if (enable_leak_check || is_asan) { defines = [ "ECMASCRIPT_ENABLE_HANDLE_LEAK_CHECK" ] } @@ -510,9 +540,12 @@ ecma_source = [ "ecmascript/base/atomic_helper.cpp", "ecmascript/base/builtins_base.cpp", "ecmascript/base/error_helper.cpp", + "ecmascript/base/fast_json_stringifier.cpp", + "ecmascript/base/json_helper.cpp", "ecmascript/base/json_parser.cpp", "ecmascript/base/json_stringifier.cpp", "ecmascript/base/number_helper.cpp", + "ecmascript/base/path_helper.cpp", "ecmascript/base/string_helper.cpp", "ecmascript/base/typed_array_helper.cpp", "ecmascript/base/utf_helper.cpp", @@ -557,6 +590,7 @@ ecma_source = [ "ecmascript/builtins/builtins_weak_ref.cpp", "ecmascript/builtins/builtins_weak_set.cpp", "ecmascript/byte_array.cpp", + "ecmascript/ohos/code_decrypt.cpp", "ecmascript/compiler/aot_file/elf_builder.cpp", "ecmascript/compiler/aot_file/elf_reader.cpp", "ecmascript/compiler/aot_file/an_file_data_manager.cpp", @@ -566,7 +600,14 @@ ecma_source = [ "ecmascript/compiler/aot_file/binary_buffer_parser.cpp", "ecmascript/compiler/aot_file/module_section_des.cpp", "ecmascript/compiler/aot_file/aot_file_manager.cpp", + "ecmascript/compiler/aot_snapshot/aot_snapshot.cpp", + "ecmascript/compiler/aot_snapshot/snapshot_constantpool_data.cpp", + "ecmascript/compiler/aot_snapshot/snapshot_global_data.cpp", "ecmascript/compiler/pgo_bc_info.cpp", + "ecmascript/compiler/pgo_type/pgo_hclass_generator.cpp", + "ecmascript/compiler/pgo_type/pgo_type_manager.cpp", + "ecmascript/compiler/pgo_type/pgo_type_parser.cpp", + "ecmascript/compiler/pgo_type/pgo_type_recorder.cpp", "ecmascript/containers/containers_arraylist.cpp", "ecmascript/containers/containers_deque.cpp", "ecmascript/containers/containers_errors.cpp", @@ -598,6 +639,7 @@ ecma_source = [ "ecmascript/ecma_string.cpp", "ecmascript/ecma_string_table.cpp", "ecmascript/ecma_vm.cpp", + "ecmascript/elements.cpp", "ecmascript/frames.cpp", "ecmascript/free_object.cpp", "ecmascript/generator_helper.cpp", @@ -615,6 +657,8 @@ ecma_source = [ "ecmascript/interpreter/interpreter_assembly.cpp", "ecmascript/interpreter/slow_runtime_stub.cpp", "ecmascript/intl/locale_helper.cpp", + "ecmascript/jit/jit.cpp", + "ecmascript/jit/jit_task.cpp", "ecmascript/jobs/micro_job_queue.cpp", "ecmascript/jspandafile/js_pandafile.cpp", "ecmascript/jspandafile/js_pandafile_manager.cpp", @@ -708,12 +752,14 @@ ecma_source = [ "ecmascript/mem/heap_region_allocator.cpp", "ecmascript/mem/incremental_marker.cpp", "ecmascript/mem/linear_space.cpp", + "ecmascript/mem/machine_code.cpp", "ecmascript/mem/mem_controller.cpp", "ecmascript/mem/mem_map_allocator.cpp", "ecmascript/mem/native_area_allocator.cpp", "ecmascript/mem/parallel_evacuator.cpp", "ecmascript/mem/parallel_marker.cpp", "ecmascript/mem/partial_gc.cpp", + "ecmascript/mem/regexp_cached_chunk.cpp", "ecmascript/mem/stw_young_gc.cpp", "ecmascript/mem/space.cpp", "ecmascript/mem/sparse_space.cpp", @@ -725,21 +771,30 @@ ecma_source = [ "ecmascript/module/js_module_namespace.cpp", "ecmascript/module/js_module_record.cpp", "ecmascript/module/js_module_source_text.cpp", + "ecmascript/module/js_module_deregister.cpp", "ecmascript/module/module_data_extractor.cpp", + "ecmascript/module/module_path_helper.cpp", "ecmascript/napi/jsnapi.cpp", "ecmascript/object_factory.cpp", "ecmascript/object_operator.cpp", "ecmascript/patch/patch_loader.cpp", "ecmascript/patch/quick_fix_manager.cpp", + "ecmascript/pgo_profiler/ap_file/pgo_file_info.cpp", "ecmascript/pgo_profiler/pgo_profiler.cpp", "ecmascript/pgo_profiler/pgo_profiler_decoder.cpp", "ecmascript/pgo_profiler/pgo_profiler_encoder.cpp", "ecmascript/pgo_profiler/pgo_profiler_info.cpp", "ecmascript/pgo_profiler/pgo_profiler_layout.cpp", + "ecmascript/pgo_profiler/pgo_profiler_manager.cpp", + "ecmascript/pgo_profiler/pgo_utils.cpp", + "ecmascript/pgo_profiler/ap_file/pgo_method_type_set.cpp", + "ecmascript/pgo_profiler/types/pgo_profile_type.cpp", + "ecmascript/property_accessor.cpp", "ecmascript/stackmap/ark_stackmap_builder.cpp", "ecmascript/stackmap/ark_stackmap_parser.cpp", "ecmascript/stackmap/llvm_stackmap_parser.cpp", "ecmascript/stackmap/llvm_stackmap_type.cpp", + "ecmascript/stackmap/litecg_stackmap_type.cpp", "ecmascript/subtyping_operator.cpp", "ecmascript/taskpool/taskpool.cpp", "ecmascript/taskpool/runner.cpp", @@ -784,6 +839,7 @@ if (is_ohos && is_standard_system && enable_hitrace) { ecma_debugger_source = [ "ecmascript/debugger/debugger_api.cpp", "ecmascript/debugger/js_debugger.cpp", + "ecmascript/debugger/dropframe_manager.cpp", "ecmascript/debugger/hot_reload_manager.cpp", ] @@ -815,9 +871,24 @@ ecma_profiler_source += [ "ecmascript/snapshot/mem/snapshot_processor.cpp", ] +ecma_stackinfo_source = [] + +ecma_stackinfo_source = [ + "ecmascript/extractortool/src/extractor.cpp", + "ecmascript/extractortool/src/file_mapper.cpp", + "ecmascript/extractortool/src/file_path_utils.cpp", + "ecmascript/extractortool/src/zip_file_reader_io.cpp", + "ecmascript/extractortool/src/zip_file_reader_mem.cpp", + "ecmascript/extractortool/src/zip_file_reader.cpp", + "ecmascript/extractortool/src/zip_file.cpp", +] + ecma_platform_source = [] -ecma_platform_source += [ "ecmascript/platform/common/map.cpp" ] +ecma_platform_source += [ + "ecmascript/platform/common/map.cpp", + "ecmascript/platform/common/mutex.cpp", +] config("include_llvm") { if (compile_llvm_online) { @@ -866,6 +937,7 @@ if (is_mingw) { "ecmascript/platform/windows/file.cpp", "ecmascript/platform/windows/os.cpp", "ecmascript/platform/windows/time.cpp", + "ecmascript/platform/windows/log.cpp", ] } else { ecma_platform_source += [ @@ -873,20 +945,29 @@ if (is_mingw) { "ecmascript/platform/unix/map.cpp", "ecmascript/platform/unix/time.cpp", ] - if (is_mac || target_os == "ios") { + if (is_mac) { ecma_platform_source += [ "ecmascript/platform/unix/mac/os.cpp", "ecmascript/platform/unix/mac/backtrace.cpp", + "ecmascript/platform/unix/mac/log.cpp", + ] + } else if (target_os == "ios") { + ecma_platform_source += [ + "ecmascript/platform/unix/mac/os.cpp", + "ecmascript/platform/unix/mac/backtrace.cpp", + "ecmascript/platform/common/log.cpp", ] } else if (is_ohos || target_os == "android") { ecma_platform_source += [ "ecmascript/platform/unix/linux/os.cpp", "ecmascript/platform/unix/ohos/backtrace.cpp", + "ecmascript/platform/common/log.cpp", ] } else if (is_linux) { ecma_platform_source += [ "ecmascript/platform/unix/linux/os.cpp", "ecmascript/platform/unix/linux/backtrace.cpp", + "ecmascript/platform/common/log.cpp", ] } else { print("Invalid os!") @@ -915,12 +996,20 @@ ohos_source_set("libark_jsruntime_set") { "/system/lib64/${arkcompiler_relative_lib_path}/lib_ark_builtins.d.abc" defines += [ "TARGET_BUILTINS_DTS_PATH=\"${target_builtins_dts_path}\"" ] + if (current_cpu == "arm64") { + defines += [ "ENABLE_POSTFORK_FORCEEXPAND" ] + } + sources = ecma_source sources += ecma_profiler_source sources += ecma_debugger_source sources += hitrace_scope_source sources += ecma_platform_source + if (is_ohos) { + sources += ecma_stackinfo_source + } + public_configs = [ "$js_root:ark_jsruntime_public_config" ] public_configs += [ ":include_llvm" ] @@ -936,12 +1025,18 @@ ohos_source_set("libark_jsruntime_set") { external_deps = [] deps = [] - if (!is_cross_platform_build) { + if (!is_arkui_x) { external_deps += [ "runtime_core:arkfile_header_deps" ] } else { deps += [ "$ark_root/libpandafile:arkfile_header_deps" ] } + if (is_ohos && is_standard_system && !is_arkui_x && + defined(global_parts_info) && defined(global_parts_info.qos_manager)) { + defines += [ "ENABLE_QOS" ] + external_deps += [ "qos_manager:qos" ] + } + if (enable_target_compilation) { external_deps += [ "c_utils:utils" ] } @@ -975,7 +1070,7 @@ ohos_source_set("libark_js_intl_set") { external_deps = [] deps = [] - if (!is_cross_platform_build) { + if (!is_arkui_x) { external_deps += [ "runtime_core:arkfile_header_deps" ] } else { deps += [ "$ark_root/libpandafile:arkfile_header_deps" ] @@ -1003,6 +1098,11 @@ ohos_source_set("libark_jsruntime_test_set") { sources += ecma_platform_source defines = [ "OHOS_UNIT_TEST" ] + + if (is_ohos) { + sources += ecma_stackinfo_source + } + if (is_ohos && is_standard_system) { stub_an_file_path = "/system/lib64/${arkcompiler_relative_lib_path}/stub.an" } else { @@ -1019,7 +1119,7 @@ ohos_source_set("libark_jsruntime_test_set") { "$ark_third_party_root/icu/icu4c:shared_icuuc", ] external_deps = [] - if (!is_cross_platform_build) { + if (!is_arkui_x) { external_deps += [ "runtime_core:arkfile_header_deps" ] } else { deps += [ "$ark_root/libpandafile:arkfile_header_deps" ] @@ -1038,7 +1138,7 @@ ohos_source_set("libark_jsruntime_test_set") { deps += [ "$js_root/ecmascript/compiler:libark_mock_stub_set" ] } - if (enable_leak_check) { + if (enable_leak_check || is_asan) { defines += [ "ECMASCRIPT_ENABLE_HANDLE_LEAK_CHECK" ] } @@ -1070,7 +1170,7 @@ ohos_source_set("libark_jsruntime_static") { ":libark_jsruntime_set", ] external_deps = [] - if (!is_cross_platform_build) { + if (!is_arkui_x) { external_deps += [ "runtime_core:arkfile_header_deps" ] } else { deps += [ "$ark_root/libpandafile:arkfile_header_deps" ] @@ -1097,7 +1197,7 @@ ohos_shared_library("libark_jsruntime") { sdk_libc_secshared_dep, ] external_deps = [] - if (!is_cross_platform_build) { + if (!is_arkui_x) { external_deps += [ "runtime_core:libarkfile_static" ] } else { deps += [ "$ark_root/libpandafile:libarkfile_static" ] @@ -1171,3 +1271,12 @@ ohos_shared_library("libark_jsruntime_test") { } subsystem_name = "test" } + +ohos_prebuilt_etc("app_aot_white_list") { + relative_install_dir = "ark" + source = "$js_root/ecmascript/ohos/app_aot_white_list.conf" + + # Set the subsystem name + part_name = "ets_runtime" + subsystem_name = "arkcompiler" +} diff --git a/README_zh.md b/README_zh.md index 5cf324c0323ca4672643a49786f39c8527aa811d..2f3e5112f0bdce3ec3017f4d2b10fe102687ab04 100644 --- a/README_zh.md +++ b/README_zh.md @@ -69,7 +69,7 @@ NAPI接口说明参考[NAPI部件](https://gitee.com/openharmony/arkui_napi/blob ### 使用说明 -ArkTS生成字节码参考[方舟eTS编译器](docs/using-the-toolchain-zh.md) +ArkTS生成字节码参考[方舟eTS编译器]( https://gitee.com/openharmony/arkcompiler_ets_frontend/blob/master/README_zh.md#%E4%BD%BF%E7%94%A8%E8%AF%B4%E6%98%8E) 字节码执行: ``` diff --git a/bundle.json b/bundle.json index c7af643cffcee1348797ce3be7d19a7394264455..b98bd71a4fdd2e62a4e30beebb649f0e5c0ae751 100644 --- a/bundle.json +++ b/bundle.json @@ -25,6 +25,7 @@ "faultloggerd", "hitrace", "hilog", + "qos_manager", "runtime_core", "c_utils", "code_signature" @@ -37,6 +38,7 @@ }, "build": { "sub_component": [ + "//arkcompiler/ets_runtime:app_aot_white_list", "//arkcompiler/ets_runtime:ark_js_packages" ], "inner_kits": [ diff --git a/docs/README_zh.md b/docs/README_zh.md index 037e4173ea5c8db25e61fbfcb653668e65dd96ed..a7bf762b8c4d55f22ac459954167ac5bb74339d6 100644 --- a/docs/README_zh.md +++ b/docs/README_zh.md @@ -106,17 +106,9 @@ print("Hello World!!!"); 1. 通过方舟前端生成hello-world.abc文件,编译命令: ``` - node --expose-gc /your_code_path/out/rk3568/clang_x64/arkcompiler/ets_frontend/build/src/index.js hello-world.js + /your_code_path/out/rk3568/clang_x64/arkcompiler/ets_frontend/es2abc hello-world.js ``` - **注意**:使用node编译abc过程遇到ENOENT错误,运行如下命令进行修复 - - ``` - npm cache clean --force - cd /your_code_path/arkcompiler/ets_frontend/ts2panda - npm install - cd /your_code_path/out/rk3568/clang_x64/arkcompiler/ets_frontend/build - npm install - ``` + 2. 执行hello-world.abc文件: 1. 设置搜索路径: @@ -400,7 +392,7 @@ print('Hello World!!!') 1. 通过方舟前端生成hello-world.abc文件,编译命令: ``` - node --expose-gc /your_code_path/out/rk3568/clang_x64/arkcompiler/ets_frontend/build/src/index.js -m --merge-abc test1/test.ts + /your_code_path/out/rk3568/clang_x64/arkcompiler/ets_frontend/es2abc --module --merge-abc test1/test.ts ``` 2. 执行hello-world.abc文件: @@ -439,136 +431,151 @@ print('Hello World!!!') 构建编译: ``` -$ ./build.sh --product-name rk3568 --build-target ark_ts2abc_build +$ ./build.sh --product-name rk3568 --build-target ets_frontend_build ``` -安装 `node`和 `npm`后, 使用前端工具: - ``` -$ cd out/rk3568/clang_x64/arkcompiler/ets_frontend/build -$ npm install -$ node --expose-gc src/index.js [选项] file.js +$ cd out/rk3568/clang_x64/arkcompiler/ets_frontend/ +$ ./es2abc [options] file.js ``` - -

选项

-

缩写

+ + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + + + - - - - - - - - + + + + - + + - - + + - - - + - + + + - - - - - - - - diff --git a/docs/development-example.md b/docs/development-example.md index 4fc6e27681fcaf0f7d541c05706645bcde2ff848..c8ee078d03e02cc03cb720233b94e7ae1a988f6c 100644 --- a/docs/development-example.md +++ b/docs/development-example.md @@ -48,7 +48,7 @@ Run the **hello-world.js** file. 1. Use the ARK frontend to create the **hello-world.abc** file. ``` - node --expose-gc /your_code_path/out/rk3568/clang_x64/arkcompiler/ets_frontend/build/src/index.js hello-world.js + /your_code_path/out/rk3568/clang_x64/arkcompiler/ets_frontend/es2abc hello-world.js ``` 2. Run the **hello-world.abc** file. diff --git a/docs/using-the-toolchain.md b/docs/using-the-toolchain.md index d487322f813bd9890ac8a9b75f25af36e467eef1..2b0f65c3dfe5d80e4eeb96bc105aef73c121747b 100644 --- a/docs/using-the-toolchain.md +++ b/docs/using-the-toolchain.md @@ -9,137 +9,153 @@ Front-end tools, converting JS source code into ARK bytecode, can be built by sp Build tools: ``` -$ $ ./build.sh --product-name rk3568 --build-target ark_ts2abc_build +$ $ ./build.sh --product-name rk3568 --build-target ets_frontend_build ``` -Install `node` and `npm`, then use tools: - ``` -$ cd out/rk3568/clang_x64/arkcompiler/ets_frontend/build -$ npm install -$ node --expose-gc src/index.js [option] file.js +$ cd out/rk3568/clang_x64/arkcompiler/ets_frontend/ +$ ./es2abc [options] file.js ``` - -

选项

描述

+

描述

取值范围

+

取值范围

默认值

+

默认值

--modules

+

--debug-info

-m

+

携带debug信息

按照Module方式编译

+

-

-

-

-

+

-

--debug-log

-

-l

+

--debugger-evaluate-expression

使能log信息

+

debugger下对输入的base64形式的表达式求值

-

+

-

-

+

-

--dump-assembly

-

-a

+

--dump-assembly

输出为可读文本格式的字节码文件

+

输出为汇编文件

-

+

-

-

+

-

--debug

+

--dump-ast

-d

+

打印解析得到的ast(抽象语法树)

携带debug信息

+

-

-

-

-

+

-

--show-statistics

-

-s

+

--dump-debug-info

显示字节码相关的统计信息

+

打印debug信息

-

+

-

-

+

-

--output

+

--dump-literal-buffer

+

打印literal buffer内容

-o

+

-

+

-

+

--dump-size-stat

输出文件路径

+

显示字节码相关的统计信息

-

+

-

-

+

-

--timeout

+

--extension

-t

+

指定输入类型

超时门限

+

['js', 'ts', 'as']

-

+

-

-

+

--help

+

帮助提示

+

-

+

-

--help

+

--module

+

按照ESM模式编译

+

-

-h

+

-

帮助提示

+

--opt-level

+

指定编译优化等级

-

+

['0', '1', '2']

-

+

0

--bc-version

+

--output

+

+输出文件路径

-v

+

-

+

-

+

--parse-only

输出当前字节码版本

+

只对输入文件做解析动作

-

+

-

-

+

-

--bc-min-version

+

--thread

  

输出支持的最低字节码版本

+

指定生成字节码时所用的线程数目

-

+

0-机器支持的线程数目

-

+

0

-

Option

-

Abbreviation

+If no parameter is specified for **\[options\]**, an ARK binary file is generated by default. + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + + + - - - - - - - - + + + + - + + - - + + - - - + - + + + - - - - - - - - diff --git a/ecmascript/base/array_helper.cpp b/ecmascript/base/array_helper.cpp index 68fe58c692b56d0886fa3f07851d500696037065..f9ec72a50b3f332d417bac52ca28376889158a55 100644 --- a/ecmascript/base/array_helper.cpp +++ b/ecmascript/base/array_helper.cpp @@ -24,8 +24,104 @@ #include "ecmascript/js_hclass.h" #include "ecmascript/js_tagged_number.h" #include "ecmascript/js_tagged_value-inl.h" +#include "ecmascript/object_fast_operator-inl.h" namespace panda::ecmascript::base { +int64_t ArrayHelper::GetStartIndex(JSThread *thread, const JSHandle &startIndexHandle, + int64_t length) +{ + // Common procedure to clamp fromIndexValue to the range [0, length]. + // For integer case, conditional selection instructions (csel in ARM, cmov in x86, etc.) + // may be utilized by the compiler to minimize branching. + auto doClamp = [length](auto fromIndexValue) -> int64_t { + if (LIKELY(fromIndexValue >= 0)) { + // Including the case where fromIndexValue == Infinity + return (fromIndexValue >= length) ? length : static_cast(fromIndexValue); + } + auto plusLength = fromIndexValue + length; + if (plusLength >= 0) { + return static_cast(plusLength); + } + return 0; // Including the case where fromIndexValue == -Infinity + }; + if (LIKELY(startIndexHandle->IsInt())) { + // Fast path: startIndexHandle is tagged int32. + return doClamp(startIndexHandle->GetInt()); + } + // Slow path: startIndexHandle is targged double, or type conversion is involved. + JSTaggedNumber fromIndexTemp = JSTaggedValue::ToNumber(thread, startIndexHandle); + if (UNLIKELY(thread->HasPendingException())) { + return length; + } + double fromIndexValue = base::NumberHelper::TruncateDouble(fromIndexTemp.GetNumber()); // NaN -> 0 + return doClamp(fromIndexValue); +} + +int64_t ArrayHelper::GetStartIndexFromArgs(JSThread *thread, EcmaRuntimeCallInfo *argv, + uint32_t argIndex, int64_t length) +{ + uint32_t argc = argv->GetArgsNumber(); + if (argc <= argIndex) { + return 0; + } + JSHandle arg = base::BuiltinsBase::GetCallArg(argv, argIndex); + return GetStartIndex(thread, arg, length); +} + +int64_t ArrayHelper::GetLastStartIndex(JSThread *thread, const JSHandle &startIndexHandle, + int64_t length) +{ + // Common procedure to clamp fromIndexValue to the range [-1, length-1]. + auto doClamp = [length](auto fromIndexValue) -> int64_t { + if (LIKELY(fromIndexValue >= 0)) { + // Including the case where fromIndexValue == Infinity + return (length - 1 < fromIndexValue) ? (length - 1) : static_cast(fromIndexValue); + } + auto plusLength = fromIndexValue + length; + if (plusLength >= 0) { + return static_cast(plusLength); + } + return -1; // Including the case where fromIndexValue == -Infinity + }; + if (LIKELY(startIndexHandle->IsInt())) { + // Fast path: startIndexHandle is tagged int32. + return doClamp(startIndexHandle->GetInt()); + } + // Slow path: startIndexHandle is targged double, or type conversion is involved. + JSTaggedNumber fromIndexTemp = JSTaggedValue::ToNumber(thread, startIndexHandle); + if (UNLIKELY(thread->HasPendingException())) { + return -1; + } + double fromIndexValue = base::NumberHelper::TruncateDouble(fromIndexTemp.GetNumber()); // NaN -> 0 + return doClamp(fromIndexValue); +} + +int64_t ArrayHelper::GetLastStartIndexFromArgs(JSThread *thread, EcmaRuntimeCallInfo *argv, + uint32_t argIndex, int64_t length) +{ + uint32_t argc = argv->GetArgsNumber(); + if (argc <= argIndex) { + return length - 1; + } + JSHandle arg = base::BuiltinsBase::GetCallArg(argv, argIndex); + return GetLastStartIndex(thread, arg, length); +} + +bool ArrayHelper::ElementIsStrictEqualTo(JSThread *thread, const JSHandle &thisObjVal, + const JSHandle &keyHandle, + const JSHandle &target) +{ + bool exists = thisObjVal->IsTypedArray() || JSTaggedValue::HasProperty(thread, thisObjVal, keyHandle); + if (thread->HasPendingException() || !exists) { + return false; + } + JSHandle valueHandle = JSArray::FastGetPropertyByValue(thread, thisObjVal, keyHandle); + if (thread->HasPendingException()) { + return false; + } + return JSTaggedValue::StrictEqual(thread, target, valueHandle); +} + bool ArrayHelper::IsConcatSpreadable(JSThread *thread, const JSHandle &obj) { // 1. If Type(O) is not Object, return false. @@ -37,19 +133,22 @@ bool ArrayHelper::IsConcatSpreadable(JSThread *thread, const JSHandleGetEcmaVM(); JSHandle env = ecmaVm->GetGlobalEnv(); JSHandle isConcatsprKey = env->GetIsConcatSpreadableSymbol(); - JSHandle spreadable = JSTaggedValue::GetProperty(thread, obj, isConcatsprKey).GetValue(); + JSTaggedValue spreadable = ObjectFastOperator::FastGetPropertyByValue(thread, obj.GetTaggedValue(), + isConcatsprKey.GetTaggedValue()); // 3. ReturnIfAbrupt(spreadable). RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, false); // 4. If spreadable is not undefined, return ToBoolean(spreadable). - if (!spreadable->IsUndefined()) { - return spreadable->ToBoolean(); + if (!spreadable.IsUndefined()) { + return spreadable.ToBoolean(); } // 5. Return IsArray(O). return obj->IsArray(thread); } +// must use 'double' as return type, for sort result may double. +// let arr = [1,2,3,4,5,6]; arr.sort(() => Math.random() - 0.5); double ArrayHelper::SortCompare(JSThread *thread, const JSHandle &callbackfnHandle, const JSHandle &valueX, const JSHandle &valueY) { @@ -86,10 +185,10 @@ double ArrayHelper::SortCompare(JSThread *thread, const JSHandle RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0); info->SetCallArg(valueX.GetTaggedValue(), valueY.GetTaggedValue()); JSTaggedValue callResult = JSFunction::Call(info); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0); if (callResult.IsInt()) { return callResult.GetInt(); } - RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0); JSHandle testResult(thread, callResult); JSTaggedNumber v = JSTaggedValue::ToNumber(thread, testResult); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0); @@ -106,12 +205,45 @@ double ArrayHelper::SortCompare(JSThread *thread, const JSHandle // 9. If xString < yString, return -1. // 10. If xString > yString, return 1. // 11. Return +0. + if (valueX->IsInt() && valueY->IsInt()) { + return JSTaggedValue::IntLexicographicCompare(valueX.GetTaggedValue(), valueY.GetTaggedValue()); + } + if (valueX->IsString() && valueY->IsString()) { + return EcmaStringAccessor::Compare(thread->GetEcmaVM(), + JSHandle(valueX), JSHandle(valueY)); + } JSHandle xValueHandle(JSTaggedValue::ToString(thread, valueX)); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0); JSHandle yValueHandle(JSTaggedValue::ToString(thread, valueY)); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, 0); ComparisonResult compareResult = JSTaggedValue::Compare(thread, xValueHandle, yValueHandle); - return compareResult == ComparisonResult::GREAT ? 1 : 0; + if (compareResult == ComparisonResult::GREAT) { + return 1; + } + if (compareResult == ComparisonResult::LESS) { + return -1; + } + return 0; +} + +double ArrayHelper::StringSortCompare(JSThread *thread, const JSHandle &valueX, + const JSHandle &valueY) +{ + ASSERT(valueX->IsString()); + ASSERT(valueY->IsString()); + // 9. If xString < yString, return -1. + // 10. If xString > yString, return 1. + // 11. Return +0. + auto xHandle = JSHandle(valueX); + auto yHandle = JSHandle(valueY); + int result = EcmaStringAccessor::Compare(thread->GetEcmaVM(), xHandle, yHandle); + if (result < 0) { + return -1; + } + if (result > 0) { + return 1; + } + return 0; } int64_t ArrayHelper::GetLength(JSThread *thread, const JSHandle &thisHandle) @@ -187,6 +319,7 @@ JSTaggedValue ArrayHelper::FlattenIntoArray(JSThread *thread, const JSHandle ArrayHelper::SortIndexedProperties(JSThread *thread, const JSHandle &thisObj, + int64_t len, const JSHandle &callbackFnHandle, + HolesType holes) +{ + // 1. Let items be a new empty List. + JSHandle items(thread->GetEcmaVM()->GetFactory()->NewTaggedArray(len)); + // 2. Let k be 0. + int64_t k = 0; + // 3. Repeat, while k < len, + // a. Let Pk be ! ToString(𝔽(k)). + // b. If holes is skip-holes, then + // i. Let kRead be ? HasProperty(obj, Pk). + // c. Else, + // i. Assert: holes is read-through-holes. + // ii. Let kRead be true. + // d. If kRead is true, then + // i. Let kValue be ? Get(obj, Pk). + // ii. Append kValue to items. + // e. Set k to k + 1. + bool kRead = false; + JSMutableHandle pk(thread, JSTaggedValue::Undefined()); + + int64_t index = 0; + while (k < len) { + if (holes == HolesType::SKIP_HOLES) { + pk.Update(JSTaggedValue(k)); + kRead = JSTaggedValue::HasProperty(thread, thisObj, pk); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, items); + } else { + ASSERT(holes == HolesType::READ_THROUGH_HOLES); + kRead = true; + } + if (kRead) { + JSHandle kValue = JSArray::FastGetPropertyByValue(thread, thisObj, k); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, items); + items->Set(thread, index++, kValue.GetTaggedValue()); + } + ++k; + } + if (index < k) { + items->Trim(thread, index); + } + // 4. Sort items using an implementation-defined sequence of calls to SortCompare. + // If any such call returns an abrupt completion, + // stop before performing any further calls to SortCompare and return that Completion Record. + JSArray::SortElements(thread, items, callbackFnHandle); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, items); + // 5. Return items. + return items; +} } // namespace panda::ecmascript::base diff --git a/ecmascript/base/array_helper.h b/ecmascript/base/array_helper.h index 1461c246cd9d96ff6829d1cdd799e26fe4402377..7417bbd0cfbb76e00a26b8511a4e4af225957c50 100644 --- a/ecmascript/base/array_helper.h +++ b/ecmascript/base/array_helper.h @@ -19,6 +19,8 @@ #include #include "ecmascript/base/builtins_base.h" +#include "ecmascript/ecma_runtime_call_info.h" +#include "ecmascript/js_tagged_value.h" namespace panda::ecmascript::base { struct FlattenArgs { @@ -26,17 +28,51 @@ struct FlattenArgs { int64_t start = 0; double depth = 0; }; + +enum class HolesType { + SKIP_HOLES, + READ_THROUGH_HOLES, +}; class ArrayHelper { public: + // Common subprocedure for Array.prototype.at, Array.prototype.indexOf, Array.prototype.slice, etc. + // Gets start index that falls in range [0, length]. + // length is returned on pending exception. + static int64_t GetStartIndex(JSThread *thread, const JSHandle &startIndexHandle, + int64_t length); + // If argIndex is out of range [0, argc), then start index = 0 by default. + // Otherwise, let startIndexHandle = GetCallArg(argv, argIndex) and call GetStartIndex. + static int64_t GetStartIndexFromArgs(JSThread *thread, EcmaRuntimeCallInfo *argv, + uint32_t argIndex, int64_t length); + // Common subprocedure for Array.prototype.lastIndexOf, etc. + // Gets last start index that falls in range [-1, length - 1]. + // -1 is returned on pending exception. + static int64_t GetLastStartIndex(JSThread *thread, const JSHandle &startIndexHandle, + int64_t length); + // If argIndex is out of range [0, argc), then start index = length - 1 by default. + // Otherwise, let startIndexHandle = GetCallArg(argv, argIndex) and call GetLastStartIndex. + static int64_t GetLastStartIndexFromArgs(JSThread *thread, EcmaRuntimeCallInfo *argv, + uint32_t argIndex, int64_t length); + // Let thisHandle be the array object. Checks whether array[key] (if exists) is strictly equal to target. + // Returns false on pending exception. + static bool ElementIsStrictEqualTo(JSThread *thread, const JSHandle &thisHandle, + const JSHandle &keyHandle, + const JSHandle &target); + static bool IsConcatSpreadable(JSThread *thread, const JSHandle &obj); static double SortCompare(JSThread *thread, const JSHandle &callbackfnHandle, const JSHandle &valueX, const JSHandle &valueY); + static double StringSortCompare(JSThread *thread, const JSHandle &valueX, + const JSHandle &valueY); static int64_t GetLength(JSThread *thread, const JSHandle &thisHandle); static int64_t GetArrayLength(JSThread *thread, const JSHandle &thisHandle); static JSTaggedValue FlattenIntoArray(JSThread *thread, const JSHandle &newArrayHandle, const JSHandle &thisObjVal, const FlattenArgs &args, const JSHandle &mapperFunctionHandle, const JSHandle &thisArg); + static JSHandle SortIndexedProperties(JSThread *thread, const JSHandle &thisObj, + int64_t len, const JSHandle &callbackFnHandle, + HolesType holes); }; } // namespace panda::ecmascript::base diff --git a/ecmascript/base/atomic_helper.h b/ecmascript/base/atomic_helper.h index bc9ef981f7f3f9877ae7539033370eb24266fe6a..c180bc7e3bdfeb98b94e01cd9c6064e725578d3c 100644 --- a/ecmascript/base/atomic_helper.h +++ b/ecmascript/base/atomic_helper.h @@ -19,7 +19,7 @@ #include "ecmascript/js_dataview.h" namespace panda::ecmascript::base { -enum class BytesSize : int32_t {ONEBYTES = 1, TWOBYTES = 2, FOURBYTES = 4, EIGHTBYTES = 8}; +enum class BytesSize : uint32_t {ONEBYTES = 1, TWOBYTES = 2, FOURBYTES = 4, EIGHTBYTES = 8}; class AtomicHelper final { public: @@ -101,4 +101,4 @@ public: }; } // namespace panda::ecmascript::base -#endif // ECMASCRIPT_BASE_ATOMIC_HELPER_H \ No newline at end of file +#endif // ECMASCRIPT_BASE_ATOMIC_HELPER_H diff --git a/ecmascript/base/bit_helper.h b/ecmascript/base/bit_helper.h index 1bc058b0981b8e4297937afcd481feb810033884..fe9ffbeb10865a14984f7ec88ad96fe478c0e33e 100644 --- a/ecmascript/base/bit_helper.h +++ b/ecmascript/base/bit_helper.h @@ -22,6 +22,7 @@ #include namespace panda::ecmascript::base { +constexpr uint64_t pureNaN = 0x7FF8ULL << 48U; // Be sure return the NaN that is safe. template union Data { S src; diff --git a/ecmascript/base/builtins_base.h b/ecmascript/base/builtins_base.h index be50bb83cc00234aa705531d52f3a8d29a487dd1..baf84b1858ea4f49d48274e3d575a73587eb921e 100644 --- a/ecmascript/base/builtins_base.h +++ b/ecmascript/base/builtins_base.h @@ -30,6 +30,96 @@ namespace panda::ecmascript { class JSArray; namespace base { +class BuiltinConstantEntry { +public: + constexpr BuiltinConstantEntry(std::string_view name, JSTaggedValue value) + : name_(name), rawTaggedValue_(value.GetRawData()) {} + + static constexpr BuiltinConstantEntry Create(std::string_view name, JSTaggedValue value) + { + return BuiltinConstantEntry(name, value); + } + + constexpr std::string_view GetName() const + { + return name_; + } + + constexpr JSTaggedValue GetTaggedValue() const + { + return JSTaggedValue(rawTaggedValue_); + } + +private: + std::string_view name_; + JSTaggedType rawTaggedValue_; +}; + +class BuiltinFunctionEntry { +public: + static constexpr int LENGTH_BITS_SIZE = 8; + static constexpr int BUILTIN_ID_BITS_SIZE = 8; + // Assures the bits are enough to represent all builtin stubs. + static_assert(kungfu::BuiltinsStubCSigns::NUM_OF_BUILTINS_STUBS <= (1u << BUILTIN_ID_BITS_SIZE)); + + using LengthBits = panda::BitField; + using BuiltinIdBits = LengthBits::NextField; + using IsConstructorBit = BuiltinIdBits::NextFlag; + using IsAccessorBit = IsConstructorBit::NextFlag; + + template + static constexpr BuiltinFunctionEntry Create(std::string_view name, EcmaEntrypoint entrypoint, + int length, kungfu::BuiltinsStubCSigns::ID builtinId) + { + static_assert((std::is_same_v && ...), + "Only 1-bit fields are available in BitFieldArgs"); + uint64_t bitfield = 0; + bitfield |= LengthBits::Encode(length); + bitfield |= BuiltinIdBits::Encode(builtinId); + // Traverses BitFieldArgs (IsConstructorBit, IsAccessorBit, etc.) + ((bitfield |= BitFieldArgs::Encode(true)), ...); + return BuiltinFunctionEntry(name, entrypoint, bitfield); + } + + constexpr std::string_view GetName() const + { + return name_; + } + + constexpr EcmaEntrypoint GetEntrypoint() const + { + return entrypoint_; + } + + constexpr int GetLength() const + { + return LengthBits::Decode(bitfield_); + } + + constexpr kungfu::BuiltinsStubCSigns::ID GetBuiltinStubId() const + { + return BuiltinIdBits::Decode(bitfield_); + } + + constexpr bool IsConstructor() const + { + return IsConstructorBit::Decode(bitfield_); + } + + constexpr bool IsAccessor() const + { + return IsAccessorBit::Decode(bitfield_); + } + +private: + std::string_view name_; + EcmaEntrypoint entrypoint_; + uint64_t bitfield_; + + constexpr BuiltinFunctionEntry(std::string_view name, EcmaEntrypoint entrypoint, uint64_t bitfield) + : name_(name), entrypoint_(entrypoint), bitfield_(bitfield) {} +}; + class BuiltinsBase { public: enum ArgsPosition : uint32_t { FIRST = 0, SECOND, THIRD, FOURTH, FIFTH }; @@ -63,6 +153,11 @@ public: return JSTaggedValue(value); } + static inline JSTaggedValue GetTaggedInt64(int64_t value) + { + return JSTaggedValue(value); + } + static inline JSTaggedValue GetTaggedDouble(double value) { return JSTaggedValue(value); diff --git a/ecmascript/base/config.h b/ecmascript/base/config.h index 0dc57ae708d531f90a8d068366a75f103fc10793..0f9baeb51b3489697bd87cb10744b3777a505ae6 100644 --- a/ecmascript/base/config.h +++ b/ecmascript/base/config.h @@ -64,6 +64,15 @@ namespace panda::ecmascript { #define ECMASCRIPT_ENABLE_HEAP_VERIFY 1 #define ECMASCRIPT_ENABLE_BARRIER_CHECK 1 #define ECMASCRIPT_ENABLE_NAPI_SPECIAL_CHECK 1 +#elif defined(ECMASCRIPT_ENABLE_DFX_CONFIG) + #define ECMASCRIPT_ENABLE_IC 1 + #define ECMASCRIPT_ENABLE_ZAP_MEM 0 + #define ECMASCRIPT_SWITCH_GC_MODE_TO_FULL_GC 0 + #define ECMASCRIPT_ENABLE_CAST_CHECK 0 + #define ECMASCRIPT_ENABLE_NEW_HANDLE_CHECK 0 + #define ECMASCRIPT_ENABLE_HEAP_VERIFY 1 + #define ECMASCRIPT_ENABLE_BARRIER_CHECK 0 + #define ECMASCRIPT_ENABLE_NAPI_SPECIAL_CHECK 1 #else #define ECMASCRIPT_ENABLE_IC 1 #define ECMASCRIPT_ENABLE_ZAP_MEM 0 diff --git a/ecmascript/base/error_helper.cpp b/ecmascript/base/error_helper.cpp index 78a0422f16d797efe39bff410f97d9565b904aa3..58197f6ac753f44874f67a3e74fb364636ddc48a 100644 --- a/ecmascript/base/error_helper.cpp +++ b/ecmascript/base/error_helper.cpp @@ -118,6 +118,9 @@ JSHandle ErrorHelper::GetErrorName(JSThread *thread, const JSHand case ErrorType::OOM_ERROR: errorKey = globalConst->GetHandledOOMErrorString(); break; + case ErrorType::TERMINATION_ERROR: + errorKey = globalConst->GetHandledTerminationErrorString(); + break; default: errorKey = globalConst->GetHandledErrorString(); break; @@ -128,7 +131,7 @@ JSHandle ErrorHelper::GetErrorName(JSThread *thread, const JSHand } JSTaggedValue ErrorHelper::ErrorCommonConstructor(EcmaRuntimeCallInfo *argv, - [[maybe_unused]] const ErrorType &errorType) + const ErrorType &errorType) { JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); @@ -166,7 +169,23 @@ JSTaggedValue ErrorHelper::ErrorCommonConstructor(EcmaRuntimeCallInfo *argv, [[maybe_unused]] bool status = JSObject::DefineOwnProperty(thread, nativeInstanceObj, msgKey, msgDesc); ASSERT_PRINT(status == true, "return result exception!"); } - + // InstallErrorCause + JSHandle options = BuiltinsBase::GetCallArg(argv, 1); + // If options is an Object and ? HasProperty(options, "cause") is true, then + // a. Let cause be ? Get(options, "cause"). + // b. Perform CreateNonEnumerableDataPropertyOrThrow(O, "cause", cause). + if (options->IsECMAObject()) { + JSHandle causeKey = globalConst->GetHandledCauseString(); + bool causePresent = JSTaggedValue::HasProperty(thread, options, causeKey); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (causePresent) { + JSHandle cause = JSObject::GetProperty(thread, options, causeKey).GetValue(); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + PropertyDescriptor causeDesc(thread, cause, true, false, true); + [[maybe_unused]] bool status = JSObject::DefineOwnProperty(thread, nativeInstanceObj, causeKey, causeDesc); + ASSERT_PRINT(status == true, "return result exception!"); + } + } JSHandle errorFunc = GetErrorJSFunction(thread); if (!errorFunc->IsUndefined()) { JSHandle errorFunckey = globalConst->GetHandledErrorFuncString(); @@ -176,7 +195,8 @@ JSTaggedValue ErrorHelper::ErrorCommonConstructor(EcmaRuntimeCallInfo *argv, ASSERT_PRINT(status == true, "return result exception!"); } - JSHandle handleStack = BuildEcmaStackTrace(thread); + bool isOOMError = errorType == ErrorType::OOM_ERROR; + JSHandle handleStack = BuildEcmaStackTrace(thread, isOOMError); JSHandle stackkey = globalConst->GetHandledStackString(); PropertyDescriptor stackDesc(thread, JSHandle::Cast(handleStack), true, false, true); [[maybe_unused]] bool status = JSObject::DefineOwnProperty(thread, nativeInstanceObj, stackkey, stackDesc); @@ -205,9 +225,12 @@ JSHandle ErrorHelper::GetErrorJSFunction(JSThread *thread) return thread->GlobalConstants()->GetHandledUndefined(); } -JSHandle ErrorHelper::BuildEcmaStackTrace(JSThread *thread) +JSHandle ErrorHelper::BuildEcmaStackTrace(JSThread *thread, bool isOOMError) { std::string data = JsStackInfo::BuildJsStackTrace(thread, false); + if (isOOMError) { + data = data.substr(0, MAX_ERROR_SIZE); + } ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); LOG_ECMA(DEBUG) << data; return factory->NewFromStdString(data); diff --git a/ecmascript/base/error_helper.h b/ecmascript/base/error_helper.h index 5a1deeab3ade84bede2f86ff0bcbb16ea4c7d55b..aedf378e641f6844fa52921971da955da3aa5f7f 100644 --- a/ecmascript/base/error_helper.h +++ b/ecmascript/base/error_helper.h @@ -35,10 +35,11 @@ public: private: static JSHandle GetErrorJSFunction(JSThread *thread); - static JSHandle BuildEcmaStackTrace(JSThread *thread); + static JSHandle BuildEcmaStackTrace(JSThread *thread, bool isOOMError); static JSHandle GetErrorName(JSThread *thread, const JSHandle &name, const ErrorType &errorType); + static constexpr uint32_t MAX_ERROR_SIZE = 128_KB; }; } // namespace panda::ecmascript::base diff --git a/ecmascript/base/error_type.h b/ecmascript/base/error_type.h index 7905c0614b88a9fb96ab45f5f8a2e3478beb95ed..ce5acacf3c26c0810c980dd35b3a33f9440a032b 100644 --- a/ecmascript/base/error_type.h +++ b/ecmascript/base/error_type.h @@ -29,6 +29,7 @@ enum class ErrorType : uint8_t { URI_ERROR, AGGREGATE_ERROR, OOM_ERROR, + TERMINATION_ERROR, }; } // namespace panda::ecmascript::base diff --git a/ecmascript/base/fast_json_stringifier.cpp b/ecmascript/base/fast_json_stringifier.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6a7cd607547205dfe090acd7bda72b4535cccfbc --- /dev/null +++ b/ecmascript/base/fast_json_stringifier.cpp @@ -0,0 +1,939 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/base/fast_json_stringifier.h" + +#include "ecmascript/base/builtins_base.h" +#include "ecmascript/base/json_helper.h" +#include "ecmascript/base/number_helper.h" +#include "ecmascript/builtins/builtins_errors.h" +#include "ecmascript/ecma_runtime_call_info.h" +#include "ecmascript/ecma_string-inl.h" +#include "ecmascript/ecma_vm.h" +#include "ecmascript/global_dictionary-inl.h" +#include "ecmascript/js_array.h" +#include "ecmascript/js_function.h" +#include "ecmascript/js_handle.h" +#include "ecmascript/js_object-inl.h" +#include "ecmascript/js_primitive_ref.h" +#include "ecmascript/js_tagged_value-inl.h" +#include "ecmascript/js_tagged_value.h" +#include "ecmascript/object_fast_operator-inl.h" + +namespace panda::ecmascript::base { +JSHandle FastJsonStringifier::Stringify(const JSHandle &value) +{ + factory_ = thread_->GetEcmaVM()->GetFactory(); + JSHandle jsonCache = thread_->GetEcmaVM()->GetGlobalEnv()->GetJsonObjectHclassCache(); + if (jsonCache->IsHole()) { + hclassCache_ = factory_->NewTaggedArray(JSON_CACHE_SIZE); + } else { + hclassCache_ = JSHandle::Cast(jsonCache); + } + JSTaggedValue tagValue = value.GetTaggedValue(); + handleValue_ = JSMutableHandle(thread_, tagValue); + handleKey_ = JSMutableHandle(thread_, factory_->GetEmptyString()); + + if (handleValue_->IsECMAObject() || handleValue_->IsBigInt()) { + JSTaggedValue serializeValue = GetSerializeValue(handleKey_, handleValue_); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread_); + handleValue_.Update(serializeValue); + } + + JSTaggedValue result = SerializeJSONProperty(handleValue_); + + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread_); + if (!result.IsUndefined()) { + return JSHandle( + factory_->NewFromUtf8Literal(reinterpret_cast(result_.c_str()), result_.size())); + } + return thread_->GlobalConstants()->GetHandledUndefined(); +} + +JSTaggedValue FastJsonStringifier::GetSerializeValue(const JSHandle &key, + const JSHandle &value) +{ + JSTaggedValue tagValue = value.GetTaggedValue(); + JSHandle undefined = thread_->GlobalConstants()->GetHandledUndefined(); + // a. Let toJSON be Get(value, "toJSON"). + JSHandle toJson = thread_->GlobalConstants()->GetHandledToJsonString(); + JSHandle toJsonFun( + thread_, ObjectFastOperator::FastGetPropertyByValue(thread_, tagValue, toJson.GetTaggedValue())); + // b. ReturnIfAbrupt(toJSON). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); + tagValue = value.GetTaggedValue(); + // c. If IsCallable(toJSON) is true + if (UNLIKELY(toJsonFun->IsCallable())) { + // Let value be Call(toJSON, value, «key»). + EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread_, toJsonFun, value, undefined, 1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); + info->SetCallArg(key.GetTaggedValue()); + tagValue = JSFunction::Call(info); + // ii. ReturnIfAbrupt(value). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); + } + return tagValue; +} + +JSTaggedValue FastJsonStringifier::SerializeJSONProperty(const JSHandle &value) +{ + JSTaggedValue tagValue = value.GetTaggedValue(); + if (!tagValue.IsHeapObject()) { + JSTaggedType type = tagValue.GetRawData(); + switch (type) { + // If value is false, return "false". + case JSTaggedValue::VALUE_FALSE: + result_ += "false"; + return tagValue; + // If value is true, return "true". + case JSTaggedValue::VALUE_TRUE: + result_ += "true"; + return tagValue; + // If value is null, return "null". + case JSTaggedValue::VALUE_NULL: + result_ += "null"; + return tagValue; + default: + // If Type(value) is Number, then + if (tagValue.IsNumber()) { + // a. If value is finite, return ToString(value). + if (std::isfinite(tagValue.GetNumber())) { + result_ += ConvertToString(*base::NumberHelper::NumberToString(thread_, tagValue)); + } else { + // b. Else, return "null". + result_ += "null"; + } + return tagValue; + } + } + } else { + JSType jsType = tagValue.GetTaggedObject()->GetClass()->GetObjectType(); + JSHandle valHandle(thread_, tagValue); + switch (jsType) { + case JSType::JS_ARRAY: { + SerializeJSArray(valHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); + return tagValue; + } + // If Type(value) is String, return QuoteJSONString(value). + case JSType::LINE_STRING: + case JSType::CONSTANT_STRING: + case JSType::TREE_STRING: + case JSType::SLICED_STRING: { + JSHandle strHandle = JSHandle(valHandle); + auto string = JSHandle(thread_, + EcmaStringAccessor::Flatten(thread_->GetEcmaVM(), strHandle)); + CString str = ConvertToString(*string, StringConvertedUsage::LOGICOPERATION); + str = JsonHelper::ValueToQuotedString(str); + result_ += str; + return tagValue; + } + case JSType::JS_PRIMITIVE_REF: { + SerializePrimitiveRef(valHandle); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, JSTaggedValue::Exception()); + return tagValue; + } + case JSType::SYMBOL: + return JSTaggedValue::Undefined(); + case JSType::BIGINT: { + THROW_TYPE_ERROR_AND_RETURN(thread_, "cannot serialize a BigInt", JSTaggedValue::Exception()); + } + default: { + if (!tagValue.IsCallable()) { + JSHClass *jsHclass = tagValue.GetTaggedObject()->GetClass(); + if (UNLIKELY(jsHclass->IsJSProxy() && + JSProxy::Cast(tagValue.GetTaggedObject())->IsArray(thread_))) { + SerializeJSProxy(valHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); + } else { + SerializeJSONObject(valHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); + } + return tagValue; + } + } + } + } + return JSTaggedValue::Undefined(); +} + +CString FastJsonStringifier::SerializeObjectKey(const JSHandle &key, bool hasContent) +{ + if (hasContent) { + result_ += ","; + } + + CString str; + if (key->IsString()) { + str = ConvertToString(EcmaString::Cast(key->GetTaggedObject()), StringConvertedUsage::LOGICOPERATION); + } else if (key->IsInt()) { + str = NumberHelper::IntToString(static_cast(key->GetInt())); + } else { + str = ConvertToString(*JSTaggedValue::ToString(thread_, key), StringConvertedUsage::LOGICOPERATION); + } + str = JsonHelper::ValueToQuotedString(str); + result_ += str; + result_ += ":"; + + return str; +} + +bool FastJsonStringifier::PushValue(const JSHandle &value) +{ + uint32_t thisLen = stack_.size(); + + for (uint32_t i = 0; i < thisLen; i++) { + bool equal = JSTaggedValue::SameValue(stack_[i].GetTaggedValue(), value.GetTaggedValue()); + if (equal) { + return true; + } + } + + stack_.emplace_back(value); + return false; +} + +void FastJsonStringifier::PopValue() +{ + stack_.pop_back(); +} + +bool FastJsonStringifier::SerializeJSONObject(const JSHandle &value) +{ + bool isContain = PushValue(value); + if (isContain) { + THROW_TYPE_ERROR_AND_RETURN(thread_, "stack contains value", true); + } + + result_ += "{"; + bool hasContent = false; + + ASSERT(!value->IsAccessor()); + JSHandle obj(value); + if (UNLIKELY(value->IsJSProxy() || value->IsTypedArray())) { // serialize proxy and typedArray + JSHandle propertyArray = JSObject::EnumerableOwnNames(thread_, obj); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + uint32_t arrLength = propertyArray->GetLength(); + for (uint32_t i = 0; i < arrLength; i++) { + handleKey_.Update(propertyArray->Get(i)); + JSHandle valueHandle = JSTaggedValue::GetProperty(thread_, value, handleKey_).GetValue(); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (UNLIKELY(valueHandle->IsECMAObject() || valueHandle->IsBigInt())) { + JSTaggedValue serializeValue = GetSerializeValue(handleKey_, valueHandle); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (UNLIKELY(serializeValue.IsUndefined() || serializeValue.IsSymbol() || + (serializeValue.IsECMAObject() && serializeValue.IsCallable()))) { + continue; + } + handleValue_.Update(serializeValue); + } else { + handleValue_.Update(valueHandle); + } + SerializeObjectKey(handleKey_, hasContent); + JSTaggedValue res = SerializeJSONProperty(handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (!res.IsUndefined()) { + hasContent = true; + } + } + } else { + uint32_t numOfKeys = obj->GetNumberOfKeys(); + uint32_t numOfElements = obj->GetNumberOfElements(); + if (numOfKeys + numOfElements < CACHE_MINIMUN_SIZIE || !cacheable_) { + if (numOfElements > 0) { + hasContent = DefaultSerializeElements(obj, hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + if (numOfKeys > 0) { + hasContent = DefaultSerializeKeys(obj, hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } else { + JSHClass *jsHclass = value->GetTaggedObject()->GetClass(); + int32_t index = FindCache(jsHclass, numOfKeys + numOfElements); + if (index != INVALID_INDEX) { + auto strCache = thread_->GetCurrentEcmaContext()->GetJsonStringifyCache(index); + uint32_t cacheIndex = 0; + if (numOfElements > 0) { + hasContent = SerializeElementsWithCache(obj, hasContent, strCache, cacheIndex, numOfElements); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + if (numOfKeys > 0) { + hasContent = SerializeKeysWithCache(obj, hasContent, strCache, cacheIndex); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } else { + CVector> strCache; + if (numOfElements > 0) { + hasContent = TryCacheSerializeElements(obj, hasContent, strCache); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + if (numOfKeys > 0) { + hasContent = TryCacheSerializeKeys(obj, hasContent, strCache); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + if (cacheable_) { + SetCache(value->GetTaggedObject()->GetClass(), numOfElements + numOfKeys, strCache); + } + } + } + } + + result_ += "}"; + PopValue(); + return true; +} + +bool FastJsonStringifier::SerializeJSProxy(const JSHandle &object) +{ + bool isContain = PushValue(object); + if (isContain) { + THROW_TYPE_ERROR_AND_RETURN(thread_, "stack contains value", true); + } + + result_ += "["; + JSHandle proxy(object); + JSHandle lengthKey = thread_->GlobalConstants()->GetHandledLengthString(); + JSHandle lenghHandle = JSProxy::GetProperty(thread_, proxy, lengthKey).GetValue(); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + JSTaggedNumber lenNumber = JSTaggedValue::ToLength(thread_, lenghHandle); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + uint32_t length = lenNumber.ToUint32(); + for (uint32_t i = 0; i < length; i++) { + handleKey_.Update(JSTaggedValue(i)); + JSHandle valHandle = JSProxy::GetProperty(thread_, proxy, handleKey_).GetValue(); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (i > 0) { + result_ += ","; + } + if (UNLIKELY(valHandle->IsECMAObject() || valHandle->IsBigInt())) { + JSTaggedValue serializeValue = GetSerializeValue(handleKey_, valHandle); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + handleValue_.Update(serializeValue); + } else { + handleValue_.Update(valHandle); + } + JSTaggedValue res = SerializeJSONProperty(handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (res.IsUndefined()) { + result_ += "null"; + } + } + + result_ += "]"; + PopValue(); + return true; +} + +bool FastJsonStringifier::SerializeJSArray(const JSHandle &value) +{ + // If state.[[Stack]] contains value, throw a TypeError exception because the structure is cyclical. + bool isContain = PushValue(value); + if (isContain) { + THROW_TYPE_ERROR_AND_RETURN(thread_, "stack contains value", true); + } + + result_ += "["; + JSHandle jsArr(value); + uint32_t len = jsArr->GetArrayLength(); + if (len > 0) { + for (uint32_t i = 0; i < len; i++) { + JSTaggedValue tagVal = ObjectFastOperator::FastGetPropertyByIndex(thread_, value.GetTaggedValue(), i); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (UNLIKELY(tagVal.IsAccessor())) { + tagVal = JSObject::CallGetter(thread_, AccessorData::Cast(tagVal.GetTaggedObject()), value); + } + handleKey_.Update(JSTaggedValue(i)); + handleValue_.Update(tagVal); + + if (i > 0) { + result_ += ","; + } + if (handleValue_->IsECMAObject() || handleValue_->IsBigInt()) { + JSTaggedValue serializeValue = GetSerializeValue(handleKey_, handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + handleValue_.Update(serializeValue); + } + JSTaggedValue res = SerializeJSONProperty(handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (res.IsUndefined()) { + result_ += "null"; + } + } + } + + result_ += "]"; + PopValue(); + return true; +} + +void FastJsonStringifier::SerializePrimitiveRef(const JSHandle &primitiveRef) +{ + JSTaggedValue primitive = JSPrimitiveRef::Cast(primitiveRef.GetTaggedValue().GetTaggedObject())->GetValue(); + if (primitive.IsString()) { + auto priStr = JSTaggedValue::ToString(thread_, primitiveRef); + RETURN_IF_ABRUPT_COMPLETION(thread_); + CString str = ConvertToString(*priStr, StringConvertedUsage::LOGICOPERATION); + str = JsonHelper::ValueToQuotedString(str); + result_ += str; + } else if (primitive.IsNumber()) { + auto priNum = JSTaggedValue::ToNumber(thread_, primitiveRef); + RETURN_IF_ABRUPT_COMPLETION(thread_); + if (std::isfinite(priNum.GetNumber())) { + result_ += ConvertToString(*base::NumberHelper::NumberToString(thread_, priNum)); + } else { + result_ += "null"; + } + } else if (primitive.IsBoolean()) { + result_ += primitive.IsTrue() ? "true" : "false"; + } else if (primitive.IsBigInt()) { + THROW_TYPE_ERROR(thread_, "cannot serialize a BigInt"); + } +} + +bool FastJsonStringifier::TryCacheSerializeElements(const JSHandle &obj, bool hasContent, + CVector> &strCache) +{ + JSHandle elementsArr(thread_, obj->GetElements()); + if (!elementsArr->IsDictionaryMode()) { + uint32_t elementsLen = elementsArr->GetLength(); + for (uint32_t i = 0; i < elementsLen; ++i) { + if (!elementsArr->Get(i).IsHole()) { + handleKey_.Update(JSTaggedValue(i)); + handleValue_.Update(elementsArr->Get(i)); + hasContent = AppendJsonString(hasContent, strCache, i); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + } else { + JSHandle numberDic(elementsArr); + CVector> sortArr; + int size = numberDic->Size(); + for (int hashIndex = 0; hashIndex < size; hashIndex++) { + JSTaggedValue key = numberDic->GetKey(hashIndex); + if (!key.IsUndefined() && !key.IsHole()) { + PropertyAttributes attr = numberDic->GetAttributes(hashIndex); + if (attr.IsEnumerable()) { + JSTaggedValue numberKey = JSTaggedValue(static_cast(key.GetInt())); + sortArr.emplace_back(JSHandle(thread_, numberKey)); + } + } + } + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareNumber); + for (const auto &entry : sortArr) { + JSTaggedValue entryKey = entry.GetTaggedValue(); + handleKey_.Update(entryKey); + int index = numberDic->FindEntry(entryKey); + JSTaggedValue value = numberDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent, strCache, index); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + return hasContent; +} + +bool FastJsonStringifier::SerializeElementsWithCache(const JSHandle &obj, bool hasContent, + CVector> &strCache, uint32_t &cacheIndex, uint32_t elementSize) +{ + JSHandle elementsArr(thread_, obj->GetElements()); + if (!elementsArr->IsDictionaryMode()) { + uint32_t elementsLen = elementsArr->GetLength(); + for (uint32_t i = 0; i < elementsLen; ++i) { + if (!elementsArr->Get(i).IsHole()) { + CString key = strCache[cacheIndex++].first; + handleValue_.Update(elementsArr->Get(i)); + hasContent = FastAppendJsonString(hasContent, key); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + } else { + JSHandle numberDic(elementsArr); + for (; cacheIndex < elementSize; cacheIndex++) { + CString key = strCache[cacheIndex].first; + int index = strCache[cacheIndex].second; + JSTaggedValue value = numberDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = FastAppendJsonString(hasContent, key); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + return hasContent; +} + +bool FastJsonStringifier::TryCacheSerializeKeys(const JSHandle &obj, bool hasContent, + CVector> &strCache) +{ + JSHandle propertiesArr(thread_, obj->GetProperties()); + if (!propertiesArr->IsDictionaryMode()) { + JSHandle jsHclass(thread_, obj->GetJSHClass()); + JSTaggedValue enumCache = jsHclass->GetEnumCache(); + if (JSObject::GetEnumCacheKind(thread_, enumCache) == EnumCacheKind::ONLY_OWN_KEYS) { + JSHandle cache(thread_, enumCache); + uint32_t length = cache->GetLength(); + for (uint32_t i = 0; i < length; i++) { + JSTaggedValue key = cache->Get(i); + if (!key.IsString()) { + continue; + } + handleKey_.Update(key); + JSTaggedValue value; + LayoutInfo *layoutInfo = LayoutInfo::Cast(jsHclass->GetLayout().GetTaggedObject()); + int index = JSHClass::FindPropertyEntry(thread_, *jsHclass, key); + PropertyAttributes attr(layoutInfo->GetAttr(index)); + ASSERT(static_cast(attr.GetOffset()) == index); + value = attr.IsInlinedProps() + ? obj->GetPropertyInlinedPropsWithRep(static_cast(index), attr) + : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); + if (attr.IsInlinedProps() && value.IsHole()) { + continue; + } + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent, strCache, index); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; + } + int end = static_cast(jsHclass->NumberOfProps()); + if (end <= 0) { + return hasContent; + } + for (int i = 0; i < end; i++) { + LayoutInfo *layoutInfo = LayoutInfo::Cast(jsHclass->GetLayout().GetTaggedObject()); + JSTaggedValue key = layoutInfo->GetKey(i); + if (key.IsString() && layoutInfo->GetAttr(i).IsEnumerable()) { + handleKey_.Update(key); + JSTaggedValue value; + int index = JSHClass::FindPropertyEntry(thread_, *jsHclass, key); + PropertyAttributes attr(layoutInfo->GetAttr(index)); + ASSERT(static_cast(attr.GetOffset()) == index); + value = attr.IsInlinedProps() + ? obj->GetPropertyInlinedPropsWithRep(static_cast(index), attr) + : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); + if (attr.IsInlinedProps() && value.IsHole()) { + continue; + } + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent, strCache, index); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + return hasContent; + } + if (obj->IsJSGlobalObject()) { + JSHandle globalDic(propertiesArr); + int size = globalDic->Size(); + CVector, PropertyAttributes>> sortArr; + for (int hashIndex = 0; hashIndex < size; hashIndex++) { + JSTaggedValue key = globalDic->GetKey(hashIndex); + if (!key.IsString()) { + continue; + } + PropertyAttributes attr = globalDic->GetAttributes(hashIndex); + if (!attr.IsEnumerable()) { + continue; + } + std::pair, PropertyAttributes> pair(JSHandle(thread_, key), attr); + sortArr.emplace_back(pair); + } + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareKey); + for (const auto &entry : sortArr) { + JSTaggedValue entryKey = entry.first.GetTaggedValue(); + handleKey_.Update(entryKey); + int index = globalDic->FindEntry(entryKey); + JSTaggedValue value = globalDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent, strCache, index); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; + } + JSHandle nameDic(propertiesArr); + int size = nameDic->Size(); + CVector, PropertyAttributes>> sortArr; + for (int hashIndex = 0; hashIndex < size; hashIndex++) { + JSTaggedValue key = nameDic->GetKey(hashIndex); + if (!key.IsString()) { + continue; + } + PropertyAttributes attr = nameDic->GetAttributes(hashIndex); + if (!attr.IsEnumerable()) { + continue; + } + std::pair, PropertyAttributes> pair(JSHandle(thread_, key), attr); + sortArr.emplace_back(pair); + } + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareKey); + for (const auto &entry : sortArr) { + JSTaggedValue entryKey = entry.first.GetTaggedValue(); + handleKey_.Update(entryKey); + int index = nameDic->FindEntry(entryKey); + JSTaggedValue value = nameDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent, strCache, index); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; +} + +bool FastJsonStringifier::SerializeKeysWithCache(const JSHandle &obj, bool hasContent, + CVector> &strCache, uint32_t &cacheIndex) +{ + JSHandle jsHclass(thread_, obj->GetJSHClass()); + JSHandle propertiesArr(thread_, obj->GetProperties()); + if (!propertiesArr->IsDictionaryMode()) { + for (; cacheIndex < strCache.size(); cacheIndex++) { + auto cacheValue = strCache[cacheIndex]; + CString str = cacheValue.first; + int index = cacheValue.second; + LayoutInfo *layoutInfo = LayoutInfo::Cast(jsHclass->GetLayout().GetTaggedObject()); + PropertyAttributes attr(layoutInfo->GetAttr(index)); + JSTaggedValue value = attr.IsInlinedProps() + ? obj->GetPropertyInlinedPropsWithRep(static_cast(index), attr) + : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = FastAppendJsonString(hasContent, str); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; + } + if (obj->IsJSGlobalObject()) { + JSHandle globalDic(propertiesArr); + for (; cacheIndex < strCache.size(); cacheIndex++) { + auto cacheValue = strCache[cacheIndex]; + CString str = cacheValue.first; + int index = cacheValue.second; + JSTaggedValue value = globalDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = FastAppendJsonString(hasContent, str); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; + } + JSHandle nameDic(propertiesArr); + for (; cacheIndex < strCache.size(); cacheIndex++) { + auto cacheValue = strCache[cacheIndex]; + CString str = cacheValue.first; + int index = cacheValue.second; + JSTaggedValue value = nameDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = FastAppendJsonString(hasContent, str); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; +} + +bool FastJsonStringifier::AppendJsonString(bool hasContent, CVector> &strCache, int index) +{ + if (handleValue_->IsECMAObject() || handleValue_->IsBigInt()) { + JSTaggedValue serializeValue = GetSerializeValue(handleKey_, handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (UNLIKELY(serializeValue.IsUndefined() || serializeValue.IsSymbol() || + (serializeValue.IsECMAObject() && serializeValue.IsCallable()))) { + return hasContent; + } + handleValue_.Update(serializeValue); + } + CString keyStr = SerializeObjectKey(handleKey_, hasContent); + strCache.emplace_back(std::pair(keyStr, index)); + JSTaggedValue res = SerializeJSONProperty(handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (!res.IsUndefined()) { + return true; + } + EraseKeyString(keyStr, hasContent); + return hasContent; +} + +bool FastJsonStringifier::FastAppendJsonString(bool hasContent, CString &key) +{ + if (handleValue_->IsECMAObject() || handleValue_->IsBigInt()) { + JSTaggedValue serializeValue = GetSerializeValue(handleKey_, handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (UNLIKELY(serializeValue.IsUndefined() || serializeValue.IsSymbol() || + (serializeValue.IsECMAObject() && serializeValue.IsCallable()))) { + return hasContent; + } + handleValue_.Update(serializeValue); + } + FastSerializeObjectKey(key, hasContent); + JSTaggedValue res = SerializeJSONProperty(handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (!res.IsUndefined()) { + return true; + } + EraseKeyString(key, hasContent); + return hasContent; +} + +bool FastJsonStringifier::DefaultSerializeElements(const JSHandle &obj, bool hasContent) +{ + JSHandle elementsArr(thread_, obj->GetElements()); + if (!elementsArr->IsDictionaryMode()) { + uint32_t elementsLen = elementsArr->GetLength(); + for (uint32_t i = 0; i < elementsLen; ++i) { + if (!elementsArr->Get(i).IsHole()) { + handleKey_.Update(JSTaggedValue(i)); + handleValue_.Update(elementsArr->Get(i)); + hasContent = AppendJsonString(hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + } else { + JSHandle numberDic(elementsArr); + CVector> sortArr; + int size = numberDic->Size(); + for (int hashIndex = 0; hashIndex < size; hashIndex++) { + JSTaggedValue key = numberDic->GetKey(hashIndex); + if (!key.IsUndefined() && !key.IsHole()) { + PropertyAttributes attr = numberDic->GetAttributes(hashIndex); + if (attr.IsEnumerable()) { + JSTaggedValue numberKey = JSTaggedValue(static_cast(key.GetInt())); + sortArr.emplace_back(JSHandle(thread_, numberKey)); + } + } + } + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareNumber); + for (const auto &entry : sortArr) { + JSTaggedValue entryKey = entry.GetTaggedValue(); + handleKey_.Update(entryKey); + int index = numberDic->FindEntry(entryKey); + JSTaggedValue value = numberDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + return hasContent; +} + +bool FastJsonStringifier::DefaultSerializeKeys(const JSHandle &obj, bool hasContent) +{ + JSHandle propertiesArr(thread_, obj->GetProperties()); + if (!propertiesArr->IsDictionaryMode()) { + JSHandle jsHclass(thread_, obj->GetJSHClass()); + JSTaggedValue enumCache = jsHclass->GetEnumCache(); + if (JSObject::GetEnumCacheKind(thread_, enumCache) == EnumCacheKind::ONLY_OWN_KEYS) { + JSHandle cache(thread_, enumCache); + uint32_t length = cache->GetLength(); + for (uint32_t i = 0; i < length; i++) { + JSTaggedValue key = cache->Get(i); + if (!key.IsString()) { + continue; + } + handleKey_.Update(key); + JSTaggedValue value; + LayoutInfo *layoutInfo = LayoutInfo::Cast(jsHclass->GetLayout().GetTaggedObject()); + int index = JSHClass::FindPropertyEntry(thread_, *jsHclass, key); + PropertyAttributes attr(layoutInfo->GetAttr(index)); + ASSERT(static_cast(attr.GetOffset()) == index); + value = attr.IsInlinedProps() + ? obj->GetPropertyInlinedPropsWithRep(static_cast(index), attr) + : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); + if (attr.IsInlinedProps() && value.IsHole()) { + continue; + } + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; + } + int end = static_cast(jsHclass->NumberOfProps()); + if (end <= 0) { + return hasContent; + } + for (int i = 0; i < end; i++) { + LayoutInfo *layoutInfo = LayoutInfo::Cast(jsHclass->GetLayout().GetTaggedObject()); + JSTaggedValue key = layoutInfo->GetKey(i); + if (key.IsString() && layoutInfo->GetAttr(i).IsEnumerable()) { + handleKey_.Update(key); + JSTaggedValue value; + int index = JSHClass::FindPropertyEntry(thread_, *jsHclass, key); + PropertyAttributes attr(layoutInfo->GetAttr(index)); + ASSERT(static_cast(attr.GetOffset()) == index); + value = attr.IsInlinedProps() + ? obj->GetPropertyInlinedPropsWithRep(static_cast(index), attr) + : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); + if (attr.IsInlinedProps() && value.IsHole()) { + continue; + } + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + } + return hasContent; + } + if (obj->IsJSGlobalObject()) { + JSHandle globalDic(propertiesArr); + int size = globalDic->Size(); + CVector, PropertyAttributes>> sortArr; + for (int hashIndex = 0; hashIndex < size; hashIndex++) { + JSTaggedValue key = globalDic->GetKey(hashIndex); + if (!key.IsString()) { + continue; + } + PropertyAttributes attr = globalDic->GetAttributes(hashIndex); + if (!attr.IsEnumerable()) { + continue; + } + std::pair, PropertyAttributes> pair(JSHandle(thread_, key), attr); + sortArr.emplace_back(pair); + } + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareKey); + for (const auto &entry : sortArr) { + JSTaggedValue entryKey = entry.first.GetTaggedValue(); + handleKey_.Update(entryKey); + int index = globalDic->FindEntry(entryKey); + JSTaggedValue value = globalDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; + } + JSHandle nameDic(propertiesArr); + int size = nameDic->Size(); + CVector, PropertyAttributes>> sortArr; + for (int hashIndex = 0; hashIndex < size; hashIndex++) { + JSTaggedValue key = nameDic->GetKey(hashIndex); + if (!key.IsString()) { + continue; + } + PropertyAttributes attr = nameDic->GetAttributes(hashIndex); + if (!attr.IsEnumerable()) { + continue; + } + std::pair, PropertyAttributes> pair(JSHandle(thread_, key), attr); + sortArr.emplace_back(pair); + } + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareKey); + for (const auto &entry : sortArr) { + JSTaggedValue entryKey = entry.first.GetTaggedValue(); + handleKey_.Update(entryKey); + int index = nameDic->FindEntry(entryKey); + JSTaggedValue value = nameDic->GetValue(index); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = AppendJsonString(hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + return hasContent; +} + +bool FastJsonStringifier::AppendJsonString(bool hasContent) +{ + if (handleValue_->IsECMAObject() || handleValue_->IsBigInt()) { + JSTaggedValue serializeValue = GetSerializeValue(handleKey_, handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (UNLIKELY(serializeValue.IsUndefined() || serializeValue.IsSymbol() || + (serializeValue.IsECMAObject() && serializeValue.IsCallable()))) { + return hasContent; + } + handleValue_.Update(serializeValue); + } + CString keyStr = SerializeObjectKey(handleKey_, hasContent); + JSTaggedValue res = SerializeJSONProperty(handleValue_); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + if (!res.IsUndefined()) { + return true; + } + EraseKeyString(keyStr, hasContent); + return hasContent; +} + +bool FastJsonStringifier::DefaultSerializeObject(const JSTaggedValue &object, uint32_t numOfKeys, + uint32_t numOfElements) +{ + JSHandle value(thread_, object); + bool isContain = PushValue(value); + if (isContain) { + THROW_TYPE_ERROR_AND_RETURN(thread_, "stack contains value", true); + } + + result_ += "{"; + bool hasContent = false; + + JSHandle obj(value); + if (numOfElements > 0) { + hasContent = DefaultSerializeElements(obj, hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + if (numOfKeys > 0) { + hasContent = DefaultSerializeKeys(obj, hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } + + result_ += "}"; + PopValue(); + return true; +} +} // namespace panda::ecmascript::base diff --git a/ecmascript/base/fast_json_stringifier.h b/ecmascript/base/fast_json_stringifier.h new file mode 100644 index 0000000000000000000000000000000000000000..32528ed402fc4d81e486f9917e96e7417196514d --- /dev/null +++ b/ecmascript/base/fast_json_stringifier.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_BASE_FAST_JSON_STRINGIFY_H +#define ECMASCRIPT_BASE_FAST_JSON_STRINGIFY_H + +#include "ecmascript/js_tagged_value.h" +#include "ecmascript/js_handle.h" +#include "ecmascript/object_factory.h" +#include "ecmascript/global_env.h" +#include "ecmascript/mem/c_containers.h" + +namespace panda::ecmascript::base { +class FastJsonStringifier { +public: + static constexpr int32_t INVALID_INDEX = -1; + static constexpr int32_t JSON_CACHE_MASK = 62; + static constexpr int32_t JSON_CACHE_SIZE = 64; + static constexpr int32_t CACHE_MINIMUN_SIZIE = 5; + FastJsonStringifier() = default; + + explicit FastJsonStringifier(JSThread *thread) : thread_(thread) {} + + ~FastJsonStringifier() = default; + NO_COPY_SEMANTIC(FastJsonStringifier); + NO_MOVE_SEMANTIC(FastJsonStringifier); + + JSHandle Stringify(const JSHandle &value); + +private: + JSTaggedValue SerializeJSONProperty(const JSHandle &value); + JSTaggedValue GetSerializeValue(const JSHandle &key, const JSHandle &value); + CString SerializeObjectKey(const JSHandle &key, bool hasContent); + + bool SerializeJSONObject(const JSHandle &value); + + bool SerializeJSArray(const JSHandle &value); + bool SerializeJSProxy(const JSHandle &object); + + void SerializePrimitiveRef(const JSHandle &primitiveRef); + + bool PushValue(const JSHandle &value); + + void PopValue(); + + bool AppendJsonString(bool hasContent, CVector> &strCache, int index); + bool FastAppendJsonString(bool hasContent, CString &key); + bool TryCacheSerializeElements(const JSHandle &obj, bool hasContent, + CVector> &strCache); + bool SerializeElementsWithCache(const JSHandle &obj, bool hasContent, + CVector> &strCache, uint32_t &cacheIndex, + uint32_t elementSize); + bool TryCacheSerializeKeys(const JSHandle &obj, bool hasContent, + CVector> &strCache); + bool SerializeKeysWithCache(const JSHandle &obj, bool hasContent, + CVector> &strCache, uint32_t &cacheIndex); + bool AppendJsonString(bool hasContent); + bool DefaultSerializeKeys(const JSHandle &obj, bool hasContent); + bool DefaultSerializeElements(const JSHandle &obj, bool hasContent); + bool DefaultSerializeObject(const JSTaggedValue &object, uint32_t numOfKeys, uint32_t numOfElements); + + inline void EraseKeyString(CString &keyStr, bool hasContent) + { + size_t keyLength = keyStr.length() + (hasContent ? 1 : 0) + 1; + result_.erase(result_.end() - keyLength, result_.end()); + } + + inline void FastSerializeObjectKey(CString &key, bool hasContent) + { + if (hasContent) { + result_ += ","; + } + + result_ += key; + result_ += ":"; + } + + inline int32_t FindCache(JSHClass *hclass, size_t numOfKeys) + { + size_t index = GetHash(hclass, numOfKeys); + JSTaggedValue cacheHclass = hclassCache_->Get(index); + if (cacheHclass != JSTaggedValue::Hole()) { + if (JSHClass::Cast(cacheHclass.GetTaggedObject()) == hclass) { + return index; + } else { + cacheHclass = hclassCache_->Get(++index); + if (JSHClass::Cast(cacheHclass.GetTaggedObject()) == hclass) { + return index; + } else { + return INVALID_INDEX; + } + } + } + return INVALID_INDEX; + } + + inline void SetCache(JSHClass *hclass, size_t numOfKeys, CVector> &value) + { + size_t index = GetHash(hclass, numOfKeys); + JSTaggedValue cacheHclass = hclassCache_->Get(index); + if (cacheHclass != JSTaggedValue::Hole()) { + cacheHclass = hclassCache_->Get(++index); + if (cacheHclass != JSTaggedValue::Hole()) { + --index; + } + } + hclassCache_->Set(thread_, index, JSTaggedValue(hclass)); + thread_->GetCurrentEcmaContext()->SetJsonStringifyCache(index, value); + } + + inline size_t GetHash(JSHClass *hclass, size_t numOfKeys) + { + uintptr_t ptr = reinterpret_cast(hclass); + size_t hash = (ptr + numOfKeys) & JSON_CACHE_MASK; + return hash; + } + + CString result_; + JSThread *thread_ {nullptr}; + ObjectFactory *factory_ {nullptr}; + CVector> stack_; + JSMutableHandle handleKey_ {}; + JSMutableHandle handleValue_ {}; + bool cacheable_ {true}; + JSHandle hclassCache_ {}; +}; +} // namespace panda::ecmascript::basekey +#endif // ECMASCRIPT_BASE_FAST_JSON_STRINGIFY_H diff --git a/ecmascript/base/file_header.h b/ecmascript/base/file_header.h index a34ce20ead1f27bbd9926656135e228b97d2961e..1d70e938419f522e5834eed87c6d05d13ca1100d 100644 --- a/ecmascript/base/file_header.h +++ b/ecmascript/base/file_header.h @@ -19,15 +19,18 @@ #include "ecmascript/base/string_helper.h" #include "ecmascript/log_wrapper.h" #include "utils/bit_utils.h" +#include "zlib.h" + #include #include #include namespace panda::ecmascript::base { -class FileHeader { +class FileHeaderBase { public: static constexpr size_t MAGIC_SIZE = 8; static constexpr size_t VERSION_SIZE = 4; + static constexpr uint32_t CHECKSUM_END_OFFSET = MAGIC_SIZE + VERSION_SIZE + sizeof(uint32_t); static constexpr std::array MAGIC = {'P', 'A', 'N', 'D', 'A', '\0', '\0', '\0'}; using VersionType = std::array; @@ -59,23 +62,7 @@ public: return ret; } -protected: - explicit FileHeader(const VersionType &lastVersion) : magic_(MAGIC), version_(lastVersion) {} - - static bool VerifyVersion(const char *fileDesc, const VersionType &currVersion, const VersionType &lastVersion, - bool strictMatch) - { - bool matched = strictMatch ? currVersion == lastVersion : currVersion <= lastVersion; - if (!matched) { - LOG_HOST_TOOL_ERROR << fileDesc << " version error, expected version should be " - << (strictMatch ? "equal to " : "less or equal than ") << ConvToStr(lastVersion) - << ", but got " << ConvToStr(currVersion); - return false; - } - return true; - } - - bool InternalVerify(const char *fileDesc, const VersionType &lastVersion, bool strictMatch) const + bool VerifyVersion(const char *fileDesc, const VersionType &lastVersion, bool strictMatch) const { if (magic_ != MAGIC) { LOG_HOST_TOOL_ERROR << "Magic mismatch, please make sure " << fileDesc @@ -91,11 +78,37 @@ protected: return true; } - bool InternalVerifyVersion(const VersionType &expectVersion) const + bool CompatibleVerify(const VersionType &expectVersion) const { return version_ >= expectVersion; } + VersionType GetVersion() const + { + return version_; + } + + void SetVersion(VersionType version) + { + version_ = version; + } + +protected: + explicit FileHeaderBase(const VersionType &lastVersion) : magic_(MAGIC), version_(lastVersion) {} + + static bool VerifyVersion(const char *fileDesc, const VersionType &currVersion, const VersionType &lastVersion, + bool strictMatch) + { + bool matched = strictMatch ? (currVersion == lastVersion) : (currVersion <= lastVersion); + if (!matched) { + LOG_HOST_TOOL_ERROR << fileDesc << " version error, expected version should be " + << (strictMatch ? "equal to " : "less or equal than ") << ConvToStr(lastVersion) + << ", but got " << ConvToStr(currVersion); + return false; + } + return true; + } + std::string InternalGetVersion() const { return ConvToStr(version_); @@ -124,5 +137,52 @@ private: VersionType version_; }; +class FileHeaderElastic : public FileHeaderBase { +public: + static constexpr uint32_t ENDIAN_VALUE = 0x12345678; + void SetChecksum(uint32_t checksum) + { + checksum_ = checksum; + } + + uint32_t GetChecksum() const + { + return checksum_; + } + + void SetHeaderSize(uint32_t size) + { + headerSize_ = size; + } + + uint32_t GetHeaderSize() const + { + return headerSize_; + } + + void SetFileSize(uint32_t size) + { + fileSize_ = size; + } + + uint32_t GetFileSize() const + { + return fileSize_; + } + + uint32_t GetEndianTag() const + { + return endianTag_; + } + +protected: + explicit FileHeaderElastic(const VersionType &lastVersion) : FileHeaderBase(lastVersion) {} + +private: + uint32_t checksum_ {0}; + uint32_t fileSize_ {0}; + uint32_t headerSize_ {0}; + uint32_t endianTag_ {ENDIAN_VALUE}; +}; } // namespace panda::ecmascript::base #endif // ECMASCRIPT_BASE_FILE_HEADER_H diff --git a/ecmascript/base/json_helper.cpp b/ecmascript/base/json_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c0fb8de8bd83dc65c3d352c95bfd24d0ced7449d --- /dev/null +++ b/ecmascript/base/json_helper.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/base/json_helper.h" + +#include +#include +#include + +namespace panda::ecmascript::base { +constexpr unsigned char CODE_SPACE = 0x20; +constexpr int FOUR_HEX = 4; +constexpr char ZERO_FIRST = static_cast(0xc0); // \u0000 => c0 80 + +bool JsonHelper::IsFastValueToQuotedString(const char *value) +{ + if (strpbrk(value, "\"\\\b\f\n\r\t") != nullptr) { + return false; + } + while (*value != '\0') { + if ((*value > 0 && *value < CODE_SPACE) || *value == ZERO_FIRST) { + return false; + } + value++; + } + return true; +} + +CString JsonHelper::ValueToQuotedString(CString str) +{ + CString product; + const char *value = str.c_str(); + // fast mode + bool isFast = IsFastValueToQuotedString(value); + if (isFast) { + product += "\""; + product += str; + product += "\""; + return product; + } + // 1. Let product be code unit 0x0022 (QUOTATION MARK). + product += "\""; + // 2. For each code unit C in value + for (const char *c = value; *c != 0; ++c) { + switch (*c) { + /* + * a. If C is 0x0022 (QUOTATION MARK) or 0x005C (REVERSE SOLIDUS), then + * i. Let product be the concatenation of product and code unit 0x005C (REVERSE SOLIDUS). + * ii. Let product be the concatenation of product and C. + */ + case '\"': + product += "\\\""; + break; + case '\\': + product += "\\\\"; + break; + /* + * b. Else if C is 0x0008 (BACKSPACE), 0x000C (FORM FEED), 0x000A (LINE FEED), 0x000D (CARRIAGE RETURN), + * or 0x000B (LINE TABULATION), then + * i. Let product be the concatenation of product and code unit 0x005C (REVERSE SOLIDUS). + * ii. Let abbrev be the String value corresponding to the value of C as follows: + * BACKSPACE "b" + * FORM FEED (FF) "f" + * LINE FEED (LF) "n" + * CARRIAGE RETURN (CR) "r" + * LINE TABULATION "t" + * iii. Let product be the concatenation of product and abbrev. + */ + case '\b': + product += "\\b"; + break; + case '\f': + product += "\\f"; + break; + case '\n': + product += "\\n"; + break; + case '\r': + product += "\\r"; + break; + case '\t': + product += "\\t"; + break; + case ZERO_FIRST: + product += "\\u0000"; + ++c; + break; + default: + // c. Else if C has a code unit value less than 0x0020 (SPACE), then + if (*c > 0 && *c < CODE_SPACE) { + /* + * i. Let product be the concatenation of product and code unit 0x005C (REVERSE SOLIDUS). + * ii. Let product be the concatenation of product and "u". + * iii. Let hex be the string result of converting the numeric code unit value of C to a String of + * four hexadecimal digits. Alphabetic hexadecimal digits are presented as lowercase Latin letters. + * iv. Let product be the concatenation of product and hex. + */ + std::ostringstream oss; + oss << "\\u" << std::hex << std::setfill('0') << std::setw(FOUR_HEX) << static_cast(*c); + product += oss.str(); + } else { + // Else, + // i. Let product be the concatenation of product and C. + product += *c; + } + } + } + // 3. Let product be the concatenation of product and code unit 0x0022 (QUOTATION MARK). + product += "\""; + // Return product. + return product; +} +} // namespace panda::ecmascript::base \ No newline at end of file diff --git a/ecmascript/base/json_helper.h b/ecmascript/base/json_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..7cd093f9c5c82a50df600704058b09af1e2e5e34 --- /dev/null +++ b/ecmascript/base/json_helper.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_BASE_JSON_HELPER_H +#define ECMASCRIPT_BASE_JSON_HELPER_H + +#include "ecmascript/js_handle.h" +#include "ecmascript/mem/c_string.h" +#include "ecmascript/property_attributes.h" + +namespace panda::ecmascript::base { + +class JsonHelper { +public: + static CString ValueToQuotedString(CString str); + + static bool IsFastValueToQuotedString(const char *value); + + static inline bool CompareKey(const std::pair, PropertyAttributes> &a, + const std::pair, PropertyAttributes> &b) + { + return a.second.GetDictionaryOrder() < b.second.GetDictionaryOrder(); + } + + static inline bool CompareNumber(const JSHandle &a, const JSHandle &b) + { + return a->GetNumber() < b->GetNumber(); + } +}; + +} // namespace panda::ecmascript::base + +#endif // ECMASCRIPT_BASE_UTF_JSON_H \ No newline at end of file diff --git a/ecmascript/base/json_parser.cpp b/ecmascript/base/json_parser.cpp index 69dbf3405e6f4a5a46f76f64072f10b80e5a2d5a..6cd134cbfa696edc3e58138cf0c2e15c92727d78 100644 --- a/ecmascript/base/json_parser.cpp +++ b/ecmascript/base/json_parser.cpp @@ -22,9 +22,11 @@ JSHandle Internalize::InternalizeJsonProperty(JSThread *thread, c { JSHandle objHandle(holder); JSHandle val = JSTaggedValue::GetProperty(thread, objHandle, name).GetValue(); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); JSHandle lengthKey = thread->GlobalConstants()->GetHandledLengthString(); if (val->IsECMAObject()) { JSHandle obj = JSTaggedValue::ToObject(thread, val); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); bool isArray = val->IsArray(thread); if (isArray) { JSHandle lenResult = JSTaggedValue::GetProperty(thread, val, lengthKey).GetValue(); @@ -38,6 +40,7 @@ JSHandle Internalize::InternalizeJsonProperty(JSThread *thread, c // Let prop be ! ToString((I)). keyUnknow.Update(JSTaggedValue(i)); keyName.Update(JSTaggedValue::ToString(thread, keyUnknow).GetTaggedValue()); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); RecurseAndApply(thread, obj, keyName, receiver); } } else { @@ -54,12 +57,13 @@ JSHandle Internalize::InternalizeJsonProperty(JSThread *thread, c } // Return ? Call(receiver, holder, « name, val »). - const int32_t argsLength = 2; // 2: « name, val » + const uint32_t argsLength = 2; // 2: « name, val » JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, receiver, objHandle, undefined, argsLength); RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); info->SetCallArg(name.GetTaggedValue(), val.GetTaggedValue()); JSTaggedValue result = JSFunction::Call(info); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); return JSHandle(thread, result); } diff --git a/ecmascript/base/json_parser.h b/ecmascript/base/json_parser.h index ef23776a6ca2c736e94bd1fe8f9d47bce1ed24fc..9ca84a1c80674295b118e9f869446b080a66f306 100644 --- a/ecmascript/base/json_parser.h +++ b/ecmascript/base/json_parser.h @@ -16,6 +16,8 @@ #ifndef ECMASCRIPT_BASE_JSON_PARSE_INL_H #define ECMASCRIPT_BASE_JSON_PARSE_INL_H +#include + #include "ecmascript/base/json_parser.h" #include "ecmascript/base/builtins_base.h" #include "ecmascript/base/number_helper.h" @@ -52,16 +54,24 @@ enum class Tokens : uint8_t { template class JsonParser { -public: +protected: using Text = const T *; + // Instantiation of the class is prohibited JsonParser() = default; explicit JsonParser(JSThread *thread) : thread_(thread) {} ~JsonParser() = default; NO_COPY_SEMANTIC(JsonParser); NO_MOVE_SEMANTIC(JsonParser); - JSHandle Parse(Text begin, Text end) + + JSHandle Launch(Text begin, Text end) { - end_ = (end == begin) ? end : end - 1; + // check empty + if (UNLIKELY(begin == end)) { + return JSHandle(thread_, [&]() -> JSTaggedValue { + THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected Text in JSON", JSTaggedValue::Exception()); + }()); + } + end_ = end - 1; current_ = begin; auto vm = thread_->GetEcmaVM(); @@ -70,62 +80,37 @@ public: SkipEndWhiteSpace(); range_ = end_; - JSTaggedValue result = ParseJSONText(); + JSTaggedValue result = ParseJSONText(); return JSHandle(thread_, result); } - JSHandle ParseUtf8(EcmaString *str) - { - ASSERT(str != nullptr); - isAsciiString_ = true; - uint32_t len = EcmaStringAccessor(str).GetLength(); - ASSERT(len != UINT32_MAX); - CVector buf(len + 1); // 1 means add '\0' in the end of buf - EcmaStringAccessor(str).WriteToFlatUtf8(buf.data(), len + 1); - Text begin = buf.data(); - return Parse(begin, begin + len); - } - - JSHandle ParseUtf16(EcmaString *str) - { - ASSERT(str != nullptr); - uint32_t len = EcmaStringAccessor(str).GetLength(); - CVector buf(len); - EcmaStringAccessor(str).WriteToFlatUtf16(buf.data(), len); - Text begin = buf.data(); - return Parse(begin, begin + len); - } - -private: - template - JSTaggedValue ParseJSONText() + JSTaggedValue ParseJSONText(bool inObjorArr = false) { SkipStartWhiteSpace(); Tokens token = ParseToken(); switch (token) { case Tokens::OBJECT: - return ParseObject(); + return ParseObject(inObjorArr); case Tokens::ARRAY: - return ParseArray(); + return ParseArray(inObjorArr); case Tokens::LITERAL_TRUE: - return ParseLiteral("true", Tokens::LITERAL_TRUE); + return ParseLiteralTrue(); case Tokens::LITERAL_FALSE: - return ParseLiteral("false", Tokens::LITERAL_FALSE); + return ParseLiteralFalse(); case Tokens::LITERAL_NULL: - return ParseLiteral("null", Tokens::LITERAL_NULL); + return ParseLiteralNull(); case Tokens::NUMBER: - return ParseNumber(); + return ParseNumber(inObjorArr); case Tokens::STRING: - return ParseString(); + return ParseString(inObjorArr); default: THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected Text in JSON", JSTaggedValue::Exception()); } } - template - JSTaggedValue ParseNumber() + JSTaggedValue ParseNumber(bool inObjorArr = false) { - if (inObjOrArr) { + if (inObjorArr) { bool isFast = true; bool isNumber = ReadNumberRange(isFast); if (!isNumber) { @@ -134,7 +119,13 @@ private: if (isFast) { std::string strNum(current_, end_ + 1); current_ = end_; - double v = std::stod(strNum); + errno = 0; // reset errno to 0 to avoid errno has been changed + double v = std::strtod(strNum.c_str(), nullptr); + if (errno == ERANGE) { + errno = 0; + return v > 0 ? JSTaggedValue(base::POSITIVE_INFINITY): JSTaggedValue(-base::POSITIVE_INFINITY); + } + errno = 0; return JSTaggedValue::TryCastDoubleToInt32(v); } } @@ -160,66 +151,52 @@ private: std::string strNum(current, end_ + 1); current_ = end_; - double v = std::stod(strNum); - return JSTaggedValue::TryCastDoubleToInt32(v); - } - - bool ReadJsonStringRange(bool &isFastString, bool &isAscii) - { - current_++; - if (isAsciiString_) { - return ReadAsciiStringRange(isFastString); + errno = 0; // reset errno to 0 to avoid errno has been changed + double v = std::strtod(strNum.c_str(), nullptr); + if (errno == ERANGE) { + errno = 0; + return v > 0 ? JSTaggedValue(base::POSITIVE_INFINITY): JSTaggedValue(-base::POSITIVE_INFINITY); } - return ReadStringRange(isFastString, isAscii); - } - - bool IsFastParseJsonString(bool &isFastString, bool &isAscii) - { - current_++; - if (isAsciiString_) { - return IsFastParseAsciiString(isFastString); - } - return IsFastParseString(isFastString, isAscii); + errno = 0; + return JSTaggedValue::TryCastDoubleToInt32(v); } - bool ParseBackslash(CString &res) + bool ParseBackslash(std::string &res) { if (current_ == end_) { return false; } - current_++; + Advance(); switch (*current_) { case '\"': - res += "\""; + res += '\"'; break; case '\\': - res += "\\"; + res += '\\'; break; case '/': - res += "/"; + res += '/'; break; case 'b': - res += "\b"; + res += '\b'; break; case 'f': - res += "\f"; + res += '\f'; break; case 'n': - res += "\n"; + res += '\n'; break; case 'r': - res += "\r"; + res += '\r'; break; case 't': - res += "\t"; + res += '\t'; break; case 'u': { - CVector vec; - if (UNLIKELY(!ConvertStringUnicode(vec))) { + std::u16string u16Str; + if (UNLIKELY(!ConvertStringUnicode(u16Str))) { return false; } - std::u16string u16Str; - u16Str.assign(vec.begin(), vec.end()); res += base::StringHelper::U16stringToString(u16Str); break; } @@ -232,93 +209,40 @@ private: JSTaggedValue SlowParseString() { end_--; - CString res; + std::string res; + res.reserve(end_ - current_); while (current_ <= end_) { if (*current_ == '\\') { bool isLegalChar = ParseBackslash(res); if (!isLegalChar) { THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected string in JSON", JSTaggedValue::Exception()); } - current_++; - } else if (UNLIKELY(*current_ > ASCII_END)) { - if (UNLIKELY(*current_ > utf_helper::DECODE_LEAD_LOW && *current_ < utf_helper::DECODE_LEAD_HIGH && - *(current_ + 1) > utf_helper::DECODE_TRAIL_LOW && - *(current_ + 1) < utf_helper::DECODE_TRAIL_HIGH)) { - std::u16string str(current_, current_ + 2); // 2 means twice as many bytes as normal u16string - res += ConvertToString(StringHelper::U16stringToString(str)); - current_ += 2; // 2 means twice as many bytes as normal u16string - } else { - std::u16string str(current_, current_ + 1); - res += ConvertToString(StringHelper::U16stringToString(str)); - current_++; - } + Advance(); } else { - res += *current_; - current_++; + Text nextCurrent = current_; + while (nextCurrent <= end_ && *nextCurrent != '\\') { + ++nextCurrent; + } + ParticalParseString(res, current_, nextCurrent); + current_ = nextCurrent; } } - ASSERT(res.length() <= static_cast(UINT32_MAX)); - return factory_->NewFromUtf8Literal(reinterpret_cast(res.c_str()), res.length()) + ASSERT(res.size() <= static_cast(UINT32_MAX)); + return factory_->NewFromUtf8Literal(reinterpret_cast(res.c_str()), res.size()) .GetTaggedValue(); } - template - JSTaggedValue ParseString() - { - bool isFastString = true; - bool isAscii = true; - bool isLegal = true; - if (inObjorArr) { - isLegal = ReadJsonStringRange(isFastString, isAscii); - if (!isLegal) { - THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected end Text in JSON", JSTaggedValue::Exception()); - } - if (isFastString) { - if (isAscii) { - CString value(current_, end_); - current_ = end_; - ASSERT(value.length() <= static_cast(UINT32_MAX)); - return factory_->NewFromUtf8LiteralCompress( - reinterpret_cast(value.c_str()), value.length()).GetTaggedValue(); - } - std::u16string value(current_, end_); - current_ = end_; - ASSERT(value.length() <= static_cast(UINT32_MAX)); - return factory_->NewFromUtf16LiteralNotCompress( - reinterpret_cast(value.c_str()), value.length()).GetTaggedValue(); - } - } else { - if (*end_ != '"' || current_ == end_) { - THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected end Text in JSON", JSTaggedValue::Exception()); - } - isLegal = IsFastParseJsonString(isFastString, isAscii); - if (!isLegal) { - THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected end Text in JSON", JSTaggedValue::Exception()); - } - if (LIKELY(isFastString)) { - if (isAscii) { - CString value(current_, end_); - ASSERT(value.length() <= static_cast(UINT32_MAX)); - return factory_->NewFromUtf8LiteralCompress( - reinterpret_cast(value.c_str()), value.length()).GetTaggedValue(); - } - std::u16string value(current_, end_); - ASSERT(value.length() <= static_cast(UINT32_MAX)); - return factory_->NewFromUtf16LiteralNotCompress( - reinterpret_cast(value.c_str()), value.length()).GetTaggedValue(); - } - } - return SlowParseString(); - } + virtual void ParticalParseString(std::string& str, Text current, Text nextCurrent) = 0; - template - JSTaggedValue ParseArray() + virtual JSTaggedValue ParseString(bool inObjorArr = false) = 0; + + JSTaggedValue ParseArray(bool inObjorArr = false) { if (UNLIKELY(*range_ != ']' && !inObjorArr)) { THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected Array in JSON", JSTaggedValue::Exception()); } - current_++; + Advance(); SkipStartWhiteSpace(); JSHandle arr = factory_->NewJSArray(); if (*current_ == ']') { @@ -328,12 +252,12 @@ private: JSTaggedValue value; uint32_t index = 0; while (current_ <= range_) { - value = ParseJSONText(); + value = ParseJSONText(true); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); ObjectFastOperator::SetPropertyByIndex(thread_, arr.GetTaggedValue(), index++, value); GetNextNonSpaceChar(); if (*current_ == ',') { - current_++; + Advance(); } else if (*current_ == ']') { if (inObjorArr || current_ == range_) { return arr.GetTaggedValue(); @@ -345,8 +269,7 @@ private: THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected Array in JSON", JSTaggedValue::Exception()); } - template - JSTaggedValue ParseObject() + JSTaggedValue ParseObject(bool inObjorArr = false) { if (UNLIKELY(*range_ != '}' && !inObjorArr)) { THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected Object in JSON", JSTaggedValue::Exception()); @@ -354,7 +277,7 @@ private: JSHandle proto(env_->GetObjectFunction()); JSHandle result = factory_->NewJSObjectByConstructor(proto); - current_++; + Advance(); if (*current_ == '}') { return result.GetTaggedValue(); } @@ -364,7 +287,8 @@ private: while (current_ <= range_) { SkipStartWhiteSpace(); if (*current_ == '"') { - keyHandle.Update(ParseString()); + keyHandle.Update(ParseString(true)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); } else { if (*current_ == '}' && (inObjorArr || current_ == range_)) { return result.GetTaggedValue(); @@ -373,11 +297,12 @@ private: } GetNextNonSpaceChar(); if (*current_ == ':') { - current_++; + Advance(); } else { THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected Object in JSON", JSTaggedValue::Exception()); } - value = ParseJSONText(); + value = ParseJSONText(true); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); // fast path JSTaggedValue res = ObjectFastOperator::SetPropertyByValue(thread_, result.GetTaggedValue(), keyHandle.GetTaggedValue(), value); @@ -385,10 +310,11 @@ private: // slow path JSTaggedValue::SetProperty(thread_, JSHandle(result), keyHandle, JSHandle(thread_, value), true); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); } GetNextNonSpaceChar(); if (*current_ == ',') { - current_++; + Advance(); } else if (*current_ == '}') { if (inObjorArr || current_ == range_) { return result.GetTaggedValue(); @@ -415,7 +341,7 @@ private: { while (current_ != end_) { if (*current_ == ' ' || *current_ == '\r' || *current_ == '\n' || *current_ == '\t') { - current_++; + Advance(); } else { break; } @@ -424,7 +350,7 @@ private: void GetNextNonSpaceChar() { - current_++; + Advance(); SkipStartWhiteSpace(); } @@ -460,41 +386,55 @@ private: } } - JSTaggedValue ParseLiteral(CString str, Tokens literalToken) + JSTaggedValue ParseLiteralTrue() { - ASSERT((str.size() - 1) <= static_cast(UINT32_MAX)); - uint32_t strLen = str.size() - 1; + static const char literalTrue[] = "true"; uint32_t remainingLength = range_ - current_; - if (UNLIKELY(remainingLength < strLen)) { + if (UNLIKELY(remainingLength < 3)) { // 3: literalTrue length - 1 THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected Text in JSON", JSTaggedValue::Exception()); } + bool isMatch = MatchText(literalTrue, 3); // 3: literalTrue length - 1 + if (LIKELY(isMatch)) { + return JSTaggedValue::True(); + } + THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected Text in JSON", JSTaggedValue::Exception()); + } - bool isMatch = MatchText(str, strLen); + JSTaggedValue ParseLiteralFalse() + { + static const char literalFalse[] = "false"; + uint32_t remainingLength = range_ - current_; + if (UNLIKELY(remainingLength < 4)) { // 4: literalFalse length - 1 + THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected Text in JSON", JSTaggedValue::Exception()); + } + bool isMatch = MatchText(literalFalse, 4); // 4: literalFalse length - 1 if (LIKELY(isMatch)) { - switch (literalToken) { - case Tokens::LITERAL_TRUE: - return JSTaggedValue::True(); - case Tokens::LITERAL_FALSE: - return JSTaggedValue::False(); - case Tokens::LITERAL_NULL: - return JSTaggedValue::Null(); - default: - LOG_ECMA(FATAL) << "this branch is unreachable"; - UNREACHABLE(); - } + return JSTaggedValue::False(); + } + THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected Text in JSON", JSTaggedValue::Exception()); + } + + JSTaggedValue ParseLiteralNull() + { + static const char literalNull[] = "null"; + uint32_t remainingLength = range_ - current_; + if (UNLIKELY(remainingLength < 3)) { // 3: literalNull length - 1 + THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected Text in JSON", JSTaggedValue::Exception()); + } + bool isMatch = MatchText(literalNull, 3); // 3: literalNull length - 1 + if (LIKELY(isMatch)) { + return JSTaggedValue::Null(); } THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected Text in JSON", JSTaggedValue::Exception()); } - bool MatchText(CString str, uint32_t matchLen) + bool MatchText(const char *str, uint32_t matchLen) { - const char *text = str.c_str(); - uint32_t pos = 1; - while (pos <= matchLen) { - if (current_[pos] != text[pos]) { + // first char is already matched + for (uint32_t pos = 1; pos <= matchLen; ++pos) { + if (current_[pos] != str[pos]) { return false; } - pos++; } current_ += matchLen; return true; @@ -565,7 +505,7 @@ private: if (current_ == end_) { return false; } - current_++; + Advance(); if (IsNumberCharacter(*current_)) { return true; } @@ -580,14 +520,14 @@ private: } while (current_ != end_) { - current_++; + Advance(); if (IsNumberCharacter(*current_)) { continue; } else if (*current_ == 'e' || *current_ == 'E') { if (hasExponent || current_ == end_) { return false; } - current_++; + Advance(); if (!IsExponentNumber()) { return false; } @@ -604,7 +544,7 @@ private: if (hasExponent || current_ == end_) { return false; } - current_++; + Advance(); if (!IsExponentNumber()) { return false; } @@ -612,111 +552,46 @@ private: if (!IsNumberCharacter(*current_)) { return false; } - current_++; + Advance(); } return true; } - bool ReadStringRange(bool &isFast, bool &isAscii) + bool ConvertStringUnicode(std::u16string &u16Str) { - T c = 0; - Text current = current_; - - while (current != range_) { - c = *current; - if (c == '"') { - end_ = current; - return true; - } else if (UNLIKELY(c == '\\')) { - current++; - isFast = false; - } - if (!IsLegalAsciiCharacter(c, isAscii)) { + do { + uint32_t remainingLength = end_ - current_; + if (remainingLength < UNICODE_DIGIT_LENGTH) { return false; } - current++; - } - return false; - } - - bool ReadAsciiStringRange(bool &isFast) - { - T c = 0; - Text current = current_; - - while (current != range_) { - c = *current; - if (c == '"') { - end_ = current; - return true; - } else if (UNLIKELY(c == '\\')) { - current++; - isFast = false; - } else if (UNLIKELY(c < CODE_SPACE)) { - return false; + uint16_t res = 0; + for (uint32_t pos = 0; pos < UNICODE_DIGIT_LENGTH; pos++) { + Advance(); + if (*current_ >= '0' && *current_ <= '9') { + res *= NUMBER_SIXTEEN; + res += (*current_ - '0'); + } else if (*current_ >= 'a' && *current_ <= 'f') { + res *= NUMBER_SIXTEEN; + res += (*current_ - 'a' + NUMBER_TEN); + } else if (*current_ >= 'A' && *current_ <= 'F') { + res *= NUMBER_SIXTEEN; + res += (*current_ - 'A' + NUMBER_TEN); + } else { + return false; + } } - current++; - } - return false; - } - - bool IsFastParseString(bool &isFast, bool &isAscii) - { - Text current = current_; - while (current != end_) { - if (!IsLegalAsciiCharacter(*current, isAscii)) { + u16Str.push_back(res); + } while ([&]() -> bool { + static const int unicodePrefixLen = 2; + if (end_ - current_ < unicodePrefixLen) { return false; } - if (*current == '\\') { - isFast = false; - } - current++; - } - return true; - } - - bool IsFastParseAsciiString(bool &isFast) - { - Text current = current_; - while (current != end_) { - if (*current < CODE_SPACE) { - return false; - } else if (*current == '\\') { - isFast = false; + if (*(current_ + 1) == '\\' && *(current_ + unicodePrefixLen) == 'u') { + AdvanceMultiStep(unicodePrefixLen); + return true; } - current++; - } - return true; - } - - bool ConvertStringUnicode(CVector &vec) - { - uint32_t remainingLength = end_ - current_; - if (remainingLength < UNICODE_DIGIT_LENGTH) { return false; - } - uint16_t res = 0; - uint32_t exponent = UNICODE_DIGIT_LENGTH; - for (uint32_t pos = 0; pos < UNICODE_DIGIT_LENGTH; pos++) { - current_++; - exponent--; - if (*current_ >= '0' && *current_ <= '9') { - res += (*current_ - '0') * pow(NUMBER_SIXTEEN, exponent); - } else if (*current_ >= 'a' && *current_ <= 'f') { - res += (*current_ - 'a' + NUMBER_TEN) * pow(NUMBER_SIXTEEN, exponent); - } else if (*current_ >= 'A' && *current_ <= 'F') { - res += (*current_ - 'A' + NUMBER_TEN) * pow(NUMBER_SIXTEEN, exponent); - } else { - return false; - } - } - - vec.emplace_back(res); - - if (*(current_ + 1) == '\\' && *(current_ + 2) == 'u') { // 2: next two chars - current_ += 2; // 2: point moves backwards by two digits - return ConvertStringUnicode(vec); - } + }()); return true; } @@ -741,7 +616,7 @@ private: bool CheckNonZeroBeginNumber(bool &hasExponent) { while (current_ != end_) { - current_++; + Advance(); if (IsNumberCharacter(*current_)) { continue; } else if (*current_ == '.') { @@ -759,19 +634,16 @@ private: return true; } - bool IsLegalAsciiCharacter(T c, bool &isAscii) + inline void Advance() { - if (c <= ASCII_END) { - if (c >= CODE_SPACE) { - return true; - } - return false; - } - isAscii = false; - return true; + ++current_; + } + + inline void AdvanceMultiStep(int step) + { + current_ += step; } - bool isAsciiString_ {false}; Text end_ {nullptr}; Text current_ {nullptr}; Text range_ {nullptr}; @@ -780,6 +652,214 @@ private: GlobalEnv *env_ {nullptr}; }; +class Utf8JsonParser : public JsonParser { +public: + Utf8JsonParser() = default; + explicit Utf8JsonParser(JSThread *thread) : JsonParser(thread) {} + ~Utf8JsonParser() = default; + NO_COPY_SEMANTIC(Utf8JsonParser); + NO_MOVE_SEMANTIC(Utf8JsonParser); + + JSHandle Parse(EcmaString *str) + { + ASSERT(str != nullptr); + uint32_t len = EcmaStringAccessor(str).GetLength(); + ASSERT(len != UINT32_MAX); + CVector buf(len + 1); + EcmaStringAccessor(str).WriteToFlatUtf8(buf.data(), len); + Text begin = buf.data(); + return Launch(begin, begin + len); + } + +private: + void ParticalParseString(std::string& str, Text current, Text nextCurrent) override + { + str += std::string_view(reinterpret_cast(current), nextCurrent - current); + } + + JSTaggedValue ParseString(bool inObjorArr = false) override + { + bool isFastString = true; + bool isLegal = true; + if (inObjorArr) { + isLegal = ReadJsonStringRange(isFastString); + if (!isLegal) { + THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected end Text in JSON", JSTaggedValue::Exception()); + } + if (isFastString) { + std::string_view value(reinterpret_cast(current_), end_ - current_); + current_ = end_; + ASSERT(value.size() <= static_cast(UINT32_MAX)); + return factory_->NewFromUtf8LiteralCompress( + reinterpret_cast(value.data()), value.size()).GetTaggedValue(); + } + } else { + if (*end_ != '"' || current_ == end_) { + THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected end Text in JSON", JSTaggedValue::Exception()); + } + isLegal = IsFastParseJsonString(isFastString); + if (!isLegal) { + THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected end Text in JSON", JSTaggedValue::Exception()); + } + if (LIKELY(isFastString)) { + std::string_view value(reinterpret_cast(current_), end_ - current_); + ASSERT(value.size() <= static_cast(UINT32_MAX)); + return factory_->NewFromUtf8LiteralCompress( + reinterpret_cast(value.data()), value.size()).GetTaggedValue(); + } + } + return SlowParseString(); + } + + bool ReadJsonStringRange(bool &isFastString) + { + Advance(); + // chars are within Ascii + for (Text current = current_; current != range_; ++current) { + uint8_t c = *current; + if (c == '"') { + end_ = current; + return true; + } else if (UNLIKELY(c == '\\')) { + current++; + isFastString = false; + } else if (UNLIKELY(c < CODE_SPACE)) { + return false; + } + } + return false; + } + + bool IsFastParseJsonString(bool &isFastString) + { + Advance(); + // chars are within Ascii + for (Text current = current_; current != end_; ++current) { + if (*current < CODE_SPACE) { + return false; + } else if (*current == '\\') { + isFastString = false; + } + } + return true; + } +}; + +class Utf16JsonParser : public JsonParser { +public: + Utf16JsonParser() = default; + explicit Utf16JsonParser(JSThread *thread) : JsonParser(thread) {} + ~Utf16JsonParser() = default; + NO_COPY_SEMANTIC(Utf16JsonParser); + NO_MOVE_SEMANTIC(Utf16JsonParser); + + JSHandle Parse(EcmaString *str) + { + ASSERT(str != nullptr); + uint32_t len = EcmaStringAccessor(str).GetLength(); + CVector buf(len); + EcmaStringAccessor(str).WriteToFlatUtf16(buf.data(), len); + Text begin = buf.data(); + return Launch(begin, begin + len); + } + +private: + void ParticalParseString(std::string& str, Text current, Text nextCurrent) override + { + str += StringHelper::U16stringToString(std::u16string(current, nextCurrent)); + } + + JSTaggedValue ParseString(bool inObjorArr = false) override + { + bool isFastString = true; + bool isAscii = true; + bool isLegal = true; + if (inObjorArr) { + isLegal = ReadJsonStringRange(isFastString, isAscii); + if (!isLegal) { + THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected end Text in JSON", JSTaggedValue::Exception()); + } + if (isFastString) { + if (isAscii) { + std::string value(current_, end_); // from uint16_t* to std::string, can't use std::string_view + current_ = end_; + ASSERT(value.size() <= static_cast(UINT32_MAX)); + return factory_->NewFromUtf8LiteralCompress( + reinterpret_cast(value.c_str()), value.size()).GetTaggedValue(); + } + std::u16string_view value(reinterpret_cast(current_), end_ - current_); + current_ = end_; + ASSERT(value.size() <= static_cast(UINT32_MAX)); + return factory_->NewFromUtf16LiteralNotCompress( + reinterpret_cast(value.data()), value.size()).GetTaggedValue(); + } + } else { + if (*end_ != '"' || current_ == end_) { + THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected end Text in JSON", JSTaggedValue::Exception()); + } + isLegal = IsFastParseJsonString(isFastString, isAscii); + if (!isLegal) { + THROW_SYNTAX_ERROR_AND_RETURN(thread_, "Unexpected end Text in JSON", JSTaggedValue::Exception()); + } + if (LIKELY(isFastString)) { + if (isAscii) { + std::string value(current_, end_); // from uint16_t* to std::string, can't use std::string_view + ASSERT(value.size() <= static_cast(UINT32_MAX)); + return factory_->NewFromUtf8LiteralCompress( + reinterpret_cast(value.c_str()), value.size()).GetTaggedValue(); + } + std::u16string_view value(reinterpret_cast(current_), end_ - current_); + ASSERT(value.size() <= static_cast(UINT32_MAX)); + return factory_->NewFromUtf16LiteralNotCompress( + reinterpret_cast(value.data()), value.size()).GetTaggedValue(); + } + } + return SlowParseString(); + } + + bool ReadJsonStringRange(bool &isFastString, bool &isAscii) + { + Advance(); + for (Text current = current_; current != range_; ++current) { + uint16_t c = *current; + if (c == '"') { + end_ = current; + return true; + } else if (UNLIKELY(c == '\\')) { + ++current; + isFastString = false; + } + if (!IsLegalAsciiCharacter(c, isAscii)) { + return false; + } + } + return false; + } + + bool IsFastParseJsonString(bool &isFastString, bool &isAscii) + { + Advance(); + for (Text current = current_; current != end_; ++current) { + if (!IsLegalAsciiCharacter(*current, isAscii)) { + return false; + } + if (*current == '\\') { + isFastString = false; + } + } + return true; + } + + bool IsLegalAsciiCharacter(uint16_t c, bool &isAscii) + { + if (c <= ASCII_END) { + return c >= CODE_SPACE ? true : false; + } + isAscii = false; + return true; + } +}; + class Internalize { public: static JSHandle InternalizeJsonProperty(JSThread *thread, const JSHandle &holder, diff --git a/ecmascript/base/json_stringifier.cpp b/ecmascript/base/json_stringifier.cpp index 133f91feaa8b9258f5ec7fcd66445138f642424e..b0fce8f5dad9fdf8894db5c208bd2febf4f41cb7 100644 --- a/ecmascript/base/json_stringifier.cpp +++ b/ecmascript/base/json_stringifier.cpp @@ -15,11 +15,8 @@ #include "ecmascript/base/json_stringifier.h" -#include -#include -#include - #include "ecmascript/base/builtins_base.h" +#include "ecmascript/base/json_helper.h" #include "ecmascript/base/number_helper.h" #include "ecmascript/builtins/builtins_errors.h" #include "ecmascript/ecma_runtime_call_info.h" @@ -36,109 +33,7 @@ #include "ecmascript/object_fast_operator-inl.h" namespace panda::ecmascript::base { -constexpr unsigned char CODE_SPACE = 0x20; constexpr int GAP_MAX_LEN = 10; -constexpr int FOUR_HEX = 4; -constexpr char ZERO_FIRST = static_cast(0xc0); // \u0000 => c0 80 - -bool JsonStringifier::IsFastValueToQuotedString(const char *value) -{ - if (strpbrk(value, "\"\\\b\f\n\r\t") != nullptr) { - return false; - } - while (*value != '\0') { - if ((*value > 0 && *value < CODE_SPACE) || *value == ZERO_FIRST) { - return false; - } - value++; - } - return true; -} - -CString JsonStringifier::ValueToQuotedString(CString str) -{ - CString product; - const char *value = str.c_str(); - // fast mode - bool isFast = IsFastValueToQuotedString(value); - if (isFast) { - product += "\""; - product += str; - product += "\""; - return product; - } - // 1. Let product be code unit 0x0022 (QUOTATION MARK). - product += "\""; - // 2. For each code unit C in value - for (const char *c = value; *c != 0; ++c) { - switch (*c) { - /* - * a. If C is 0x0022 (QUOTATION MARK) or 0x005C (REVERSE SOLIDUS), then - * i. Let product be the concatenation of product and code unit 0x005C (REVERSE SOLIDUS). - * ii. Let product be the concatenation of product and C. - */ - case '\"': - product += "\\\""; - break; - case '\\': - product += "\\\\"; - break; - /* - * b. Else if C is 0x0008 (BACKSPACE), 0x000C (FORM FEED), 0x000A (LINE FEED), 0x000D (CARRIAGE RETURN), - * or 0x000B (LINE TABULATION), then - * i. Let product be the concatenation of product and code unit 0x005C (REVERSE SOLIDUS). - * ii. Let abbrev be the String value corresponding to the value of C as follows: - * BACKSPACE "b" - * FORM FEED (FF) "f" - * LINE FEED (LF) "n" - * CARRIAGE RETURN (CR) "r" - * LINE TABULATION "t" - * iii. Let product be the concatenation of product and abbrev. - */ - case '\b': - product += "\\b"; - break; - case '\f': - product += "\\f"; - break; - case '\n': - product += "\\n"; - break; - case '\r': - product += "\\r"; - break; - case '\t': - product += "\\t"; - break; - case ZERO_FIRST: - product += "\\u0000"; - ++c; - break; - default: - // c. Else if C has a code unit value less than 0x0020 (SPACE), then - if (*c > 0 && *c < CODE_SPACE) { - /* - * i. Let product be the concatenation of product and code unit 0x005C (REVERSE SOLIDUS). - * ii. Let product be the concatenation of product and "u". - * iii. Let hex be the string result of converting the numeric code unit value of C to a String of - * four hexadecimal digits. Alphabetic hexadecimal digits are presented as lowercase Latin letters. - * iv. Let product be the concatenation of product and hex. - */ - std::ostringstream oss; - oss << "\\u" << std::hex << std::setfill('0') << std::setw(FOUR_HEX) << static_cast(*c); - product += oss.str(); - } else { - // Else, - // i. Let product be the concatenation of product and C. - product += *c; - } - } - } - // 3. Let product be the concatenation of product and code unit 0x0022 (QUOTATION MARK). - product += "\""; - // Return product. - return product; -} JSHandle JsonStringifier::Stringify(const JSHandle &value, const JSHandle &replacer, @@ -313,7 +208,7 @@ JSTaggedValue JsonStringifier::GetSerializeValue(const JSHandle & if (UNLIKELY(replacer->IsCallable())) { handleValue_.Update(tagValue); // a. Let value be Call(ReplacerFunction, holder, «key, value»). - const int32_t argsLength = 2; // 2: «key, value» + const uint32_t argsLength = 2; // 2: «key, value» EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread_, replacer, object, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread_); @@ -369,12 +264,13 @@ JSTaggedValue JsonStringifier::SerializeJSONProperty(const JSHandle strHandle = JSHandle(valHandle); auto string = JSHandle(thread_, EcmaStringAccessor::Flatten(thread_->GetEcmaVM(), strHandle)); CString str = ConvertToString(*string, StringConvertedUsage::LOGICOPERATION); - str = ValueToQuotedString(str); + str = JsonHelper::ValueToQuotedString(str); result_ += str; return tagValue; } @@ -428,7 +324,7 @@ void JsonStringifier::SerializeObjectKey(const JSHandle &key, boo str = ConvertToString(*JSTaggedValue::ToString(thread_, key), StringConvertedUsage::LOGICOPERATION); } result_ += stepBegin; - str = ValueToQuotedString(str); + str = JsonHelper::ValueToQuotedString(str); result_ += str; result_ += ":"; result_ += stepEnd; @@ -477,6 +373,7 @@ bool JsonStringifier::SerializeJSONObject(const JSHandle &value, for (uint32_t i = 0; i < arrLength; i++) { handleKey_.Update(propertyArray->Get(i)); JSHandle valueHandle = JSTaggedValue::GetProperty(thread_, value, handleKey_).GetValue(); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); JSTaggedValue serializeValue = GetSerializeValue(value, handleKey_, valueHandle, replacer); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); if (UNLIKELY(serializeValue.IsUndefined() || serializeValue.IsSymbol() || @@ -608,6 +505,7 @@ bool JsonStringifier::SerializeJSArray(const JSHandle &value, con if (len > 0) { for (uint32_t i = 0; i < len; i++) { JSTaggedValue tagVal = ObjectFastOperator::FastGetPropertyByIndex(thread_, value.GetTaggedValue(), i); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); if (UNLIKELY(tagVal.IsAccessor())) { tagVal = JSObject::CallGetter(thread_, AccessorData::Cast(tagVal.GetTaggedObject()), value); } @@ -647,7 +545,7 @@ void JsonStringifier::SerializePrimitiveRef(const JSHandle &primi auto priStr = JSTaggedValue::ToString(thread_, primitiveRef); RETURN_IF_ABRUPT_COMPLETION(thread_); CString str = ConvertToString(*priStr, StringConvertedUsage::LOGICOPERATION); - str = ValueToQuotedString(str); + str = JsonHelper::ValueToQuotedString(str); result_ += str; } else if (primitive.IsNumber()) { auto priNum = JSTaggedValue::ToNumber(thread_, primitiveRef); @@ -692,11 +590,14 @@ bool JsonStringifier::SerializeElements(const JSHandle &obj, const JSH } } } - std::sort(sortArr.begin(), sortArr.end(), CompareNumber); + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareNumber); for (const auto &entry : sortArr) { JSTaggedValue entryKey = entry.GetTaggedValue(); handleKey_.Update(entryKey); int index = numberDic->FindEntry(entryKey); + if (index < 0) { + continue; + } JSTaggedValue value = numberDic->GetValue(index); if (UNLIKELY(value.IsAccessor())) { value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), @@ -715,9 +616,10 @@ bool JsonStringifier::SerializeKeys(const JSHandle &obj, const JSHandl { JSHandle propertiesArr(thread_, obj->GetProperties()); if (!propertiesArr->IsDictionaryMode()) { + bool hasChangedToDictionaryMode = false; JSHandle jsHclass(thread_, obj->GetJSHClass()); JSTaggedValue enumCache = jsHclass->GetEnumCache(); - if (!enumCache.IsNull()) { + if (JSObject::GetEnumCacheKind(thread_, enumCache) == EnumCacheKind::ONLY_OWN_KEYS) { JSHandle cache(thread_, enumCache); uint32_t length = cache->GetLength(); for (uint32_t i = 0; i < length; i++) { @@ -732,7 +634,7 @@ bool JsonStringifier::SerializeKeys(const JSHandle &obj, const JSHandl PropertyAttributes attr(layoutInfo->GetAttr(index)); ASSERT(static_cast(attr.GetOffset()) == index); value = attr.IsInlinedProps() - ? obj->GetPropertyInlinedProps(static_cast(index)) + ? obj->GetPropertyInlinedPropsWithRep(static_cast(index), attr) : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); if (attr.IsInlinedProps() && value.IsHole()) { continue; @@ -754,25 +656,52 @@ bool JsonStringifier::SerializeKeys(const JSHandle &obj, const JSHandl for (int i = 0; i < end; i++) { LayoutInfo *layoutInfo = LayoutInfo::Cast(jsHclass->GetLayout().GetTaggedObject()); JSTaggedValue key = layoutInfo->GetKey(i); - if (key.IsString() && layoutInfo->GetAttr(i).IsEnumerable()) { - handleKey_.Update(key); - JSTaggedValue value; - int index = JSHClass::FindPropertyEntry(thread_, *jsHclass, key); - PropertyAttributes attr(layoutInfo->GetAttr(index)); - ASSERT(static_cast(attr.GetOffset()) == index); - value = attr.IsInlinedProps() - ? obj->GetPropertyInlinedProps(static_cast(index)) - : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); - if (attr.IsInlinedProps() && value.IsHole()) { - continue; - } - if (UNLIKELY(value.IsAccessor())) { - value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), - JSHandle(obj)); + if (!hasChangedToDictionaryMode) { + if (key.IsString() && layoutInfo->GetAttr(i).IsEnumerable()) { + handleKey_.Update(key); + JSTaggedValue value; + int index = JSHClass::FindPropertyEntry(thread_, *jsHclass, key); + PropertyAttributes attr(layoutInfo->GetAttr(index)); + ASSERT(static_cast(attr.GetOffset()) == index); + value = attr.IsInlinedProps() + ? obj->GetPropertyInlinedPropsWithRep(static_cast(index), attr) + : propertiesArr->Get(static_cast(index) - jsHclass->GetInlinedProperties()); + if (attr.IsInlinedProps() && value.IsHole()) { + continue; + } + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + } + handleValue_.Update(value); + hasContent = JsonStringifier::AppendJsonString(obj, replacer, hasContent); + if (obj->GetProperties().IsDictionary()) { + hasChangedToDictionaryMode = true; + propertiesArr = JSHandle(thread_, obj->GetProperties()); + } + jsHclass = JSHandle(thread_, obj->GetJSHClass()); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); } - handleValue_.Update(value); - hasContent = JsonStringifier::AppendJsonString(obj, replacer, hasContent); - RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); + } else { + JSHandle nameDic(propertiesArr); + int index = nameDic->FindEntry(key); + if (!key.IsString()) { + continue; + } + PropertyAttributes attr = nameDic->GetAttributes(index); + if (!attr.IsEnumerable() || index < 0) { + continue; + } + JSTaggedValue value = nameDic->GetValue(index); + handleKey_.Update(key); + if (UNLIKELY(value.IsAccessor())) { + value = JSObject::CallGetter(thread_, AccessorData::Cast(value.GetTaggedObject()), + JSHandle(obj)); + jsHclass = JSHandle(thread_, obj->GetJSHClass()); + } + handleValue_.Update(value); + hasContent = JsonStringifier::AppendJsonString(obj, replacer, hasContent); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread_, false); } } return hasContent; @@ -793,7 +722,7 @@ bool JsonStringifier::SerializeKeys(const JSHandle &obj, const JSHandl std::pair, PropertyAttributes> pair(JSHandle(thread_, key), attr); sortArr.emplace_back(pair); } - std::sort(sortArr.begin(), sortArr.end(), CompareKey); + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareKey); for (const auto &entry : sortArr) { JSTaggedValue entryKey = entry.first.GetTaggedValue(); handleKey_.Update(entryKey); @@ -824,7 +753,7 @@ bool JsonStringifier::SerializeKeys(const JSHandle &obj, const JSHandl std::pair, PropertyAttributes> pair(JSHandle(thread_, key), attr); sortArr.emplace_back(pair); } - std::sort(sortArr.begin(), sortArr.end(), CompareKey); + std::sort(sortArr.begin(), sortArr.end(), JsonHelper::CompareKey); for (const auto &entry : sortArr) { JSTaggedValue entryKey = entry.first.GetTaggedValue(); handleKey_.Update(entryKey); diff --git a/ecmascript/base/json_stringifier.h b/ecmascript/base/json_stringifier.h index eba164e0b519820d23542532ebaff29728d3840e..d90189f4d7aa171549dc1b044cb003df7774cbd9 100644 --- a/ecmascript/base/json_stringifier.h +++ b/ecmascript/base/json_stringifier.h @@ -66,17 +66,6 @@ private: bool SerializeElements(const JSHandle &obj, const JSHandle &replacer, bool hasContent); bool SerializeKeys(const JSHandle &obj, const JSHandle &replacer, bool hasContent); - static inline bool CompareKey(const std::pair, PropertyAttributes> &a, - const std::pair, PropertyAttributes> &b) - { - return a.second.GetDictionaryOrder() < b.second.GetDictionaryOrder(); - } - - static inline bool CompareNumber(const JSHandle &a, const JSHandle &b) - { - return a->GetNumber() < b->GetNumber(); - } - CString gap_; CString result_; CString indent_; diff --git a/ecmascript/base/number_helper.cpp b/ecmascript/base/number_helper.cpp index e3c3a11034d866885cb0922401b6f7042070105e..cd83caa7193e28b92eb11ebbcfd76d91d3dd7b58 100644 --- a/ecmascript/base/number_helper.cpp +++ b/ecmascript/base/number_helper.cpp @@ -39,6 +39,33 @@ thread_local uint64_t RandomGenerator::randomState_ {0}; constexpr char CHARS[] = "0123456789abcdefghijklmnopqrstuvwxyz"; // NOLINT (modernize-avoid-c-arrays) constexpr uint64_t MAX_MANTISSA = 0x1ULL << 52U; +static const double POWERS_OF_TEN[] = { + 1.0, // 10^0 + 10.0, + 100.0, + 1000.0, + 10000.0, + 100000.0, + 1000000.0, + 10000000.0, + 100000000.0, + 1000000000.0, + 10000000000.0, // 10^10 + 100000000000.0, + 1000000000000.0, + 10000000000000.0, + 100000000000000.0, + 1000000000000000.0, + 10000000000000000.0, + 100000000000000000.0, + 1000000000000000000.0, + 10000000000000000000.0, + 100000000000000000000.0, // 10^20 + 1000000000000000000000.0, + 10000000000000000000000.0 // 10^22 +}; +static const int POWERS_OF_TEN_SIZE = 23; + static inline uint8_t ToDigit(uint8_t c) { if (c >= '0' && c <= '9') { @@ -90,32 +117,80 @@ bool NumberHelper::IsEmptyString(const uint8_t *start, const uint8_t *end) JSTaggedValue NumberHelper::DoubleToString(JSThread *thread, double number, int radix) { - bool negative = false; - if (number < 0.0) { - negative = true; + static constexpr int BUFFER_SIZE = 2240; // 2240: The size of the character array buffer + static constexpr int HALF_BUFFER_SIZE = BUFFER_SIZE >> 1; + char buffer[BUFFER_SIZE]; + size_t integerCursor = HALF_BUFFER_SIZE; + size_t fractionCursor = integerCursor; + + bool negative = number < 0.0; + if (negative) { number = -number; } - double numberInteger = std::floor(number); - double numberFraction = number - numberInteger; + double integer = std::floor(number); + double fraction = number - integer; auto value = bit_cast(number); value += 1; double delta = HALF * (bit_cast(value) - number); - - CString result; - if (numberFraction != 0 && numberFraction >= delta) { - result += "."; - result += DecimalsToString(&numberInteger, numberFraction, radix, delta); + delta = std::max(delta, bit_cast(static_cast(1))); // 1 : The binary of the smallest double is 1 + if (fraction != 0 && fraction >= delta) { + buffer[fractionCursor++] = '.'; + while (fraction >= delta) { + fraction *= radix; + delta *= radix; + int64_t digit = std::floor(fraction); + fraction -= digit; + buffer[fractionCursor++] = CHARS[digit]; + bool needCarry = (fraction > HALF) && (fraction + delta > 1); + if (needCarry) { + size_t fractionEnd = fractionCursor - 1; + buffer[fractionEnd] = Carry(buffer[fractionEnd], radix); + for (; fractionEnd > HALF_BUFFER_SIZE; fractionEnd--) { + if (buffer[fractionEnd] == '0') { + buffer[fractionEnd - 1] = Carry(buffer[fractionEnd - 1], radix); + } else { + break; + } + } + if (fractionEnd == HALF_BUFFER_SIZE) { + ++integer; + } + break; + } + } + // delete 0 in the end + size_t fractionEnd = fractionCursor - 1; + while (buffer[fractionEnd] == '0') { + --fractionEnd; + } + fractionCursor = fractionEnd + 1; } - result = IntegerToString(numberInteger, radix) + result; + ASSERT(radix >= MIN_RADIX && radix <= MAX_RADIX); + while (integer / radix > MAX_MANTISSA) { + integer /= radix; + buffer[--integerCursor] = '0'; + } + do { + double remainder = std::fmod(integer, radix); + buffer[--integerCursor] = CHARS[static_cast(remainder)]; + integer = (integer - remainder) / radix; + } while (integer > 0); if (negative) { - result = "-" + result; + buffer[--integerCursor] = '-'; } + buffer[fractionCursor++] = '\0'; - return BuiltinsBase::GetTaggedString(thread, result.c_str()); + size_t size = fractionCursor - integerCursor; + std::unique_ptr result = std::make_unique(size); + if (memcpy_s(result.get(), size, buffer + integerCursor, size) != EOK) { + LOG_FULL(FATAL) << "memcpy_s failed"; + UNREACHABLE(); + } + return BuiltinsBase::GetTaggedString(thread, result.get()); } JSTaggedValue NumberHelper::DoubleToExponential(JSThread *thread, double number, int digit) @@ -275,43 +350,15 @@ CString NumberHelper::IntegerToString(double number, int radix) return result; } -CString NumberHelper::DecimalsToString(double *numberInteger, double fraction, int radix, double delta) +CString NumberHelper::IntToString(int number) { - CString result; - while (fraction >= delta) { - fraction *= radix; - delta *= radix; - int64_t integer = std::floor(fraction); - fraction -= integer; - result += CHARS[integer]; - if (fraction > HALF && fraction + delta > 1) { - size_t fractionEnd = result.size() - 1; - result[fractionEnd] = Carry(*result.rbegin(), radix); - for (; fractionEnd > 0; fractionEnd--) { - if (result[fractionEnd] == '0') { - result[fractionEnd - 1] = Carry(result[fractionEnd - 1], radix); - } else { - break; - } - } - if (fractionEnd == 0) { - (*numberInteger)++; - } - break; - } - } - // delete 0 in the end - size_t found = result.find_last_not_of('0'); - if (found != CString::npos) { - result.erase(found + 1); - } - - return result; + return ToCString(number); } -CString NumberHelper::IntToString(int number) +JSHandle NumberHelper::IntToEcmaString(const JSThread *thread, int number) { - return ToCString(number); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + return factory->NewFromASCII(ToCString(number)); } // 7.1.12.1 ToString Applied to the Number Type @@ -419,7 +466,11 @@ double NumberHelper::TruncateDouble(double d) if (d == 0.0) { return 0; } - return (d >= 0) ? std::floor(d) : std::ceil(d); + double ret = (d >= 0) ? std::floor(d) : std::ceil(d); + if (ret == 0.0) { + ret = 0; + } + return ret; } int64_t NumberHelper::DoubleToInt64(double d) @@ -433,6 +484,61 @@ int64_t NumberHelper::DoubleToInt64(double d) return static_cast(d); } +bool NumberHelper::IsDigitalString(const uint8_t *start, const uint8_t *end) +{ + int len = end - start; + for (int i = 0; i < len; i++) { + if (*(start + i) < '0' || *(start + i) > '9') { + return false; + } + } + return true; +} + +int NumberHelper::StringToInt(const uint8_t *start, const uint8_t *end) +{ + int num = *start - '0'; + for (int i = 1; i < (end - start); i++) { + num = 10 * num + (*(start + i) - '0'); + } + return num; +} + +// only for string is ordinary string and using UTF8 encoding +// Fast path for short integer and some special value +std::pair NumberHelper::FastStringToNumber(const uint8_t *start, + const uint8_t *end, JSTaggedValue string) +{ + ASSERT(start < end); + EcmaStringAccessor strAccessor(string); + bool minus = (start[0] == '-'); + int pos = (minus ? 1 : 0); + + if (pos == (end - start)) { + return {true, JSTaggedNumber(NAN_VALUE)}; + } else if (*(start + pos) > '9') { + // valid number's codes not longer than '9', except 'I' and non-breaking space. + if (*(start + pos) != 'I' && *(start + pos) != 0xA0) { + return {true, JSTaggedNumber(NAN_VALUE)}; + } + } else if ((end - (start + pos)) <= MAX_ELEMENT_INDEX_LEN && IsDigitalString((start + pos), end)) { + int num = StringToInt((start + pos), end); + if (minus) { + if (num == 0) { + return {true, JSTaggedNumber(SignedZero(Sign::NEG))}; + } + num = -num; + } else { + if (num != 0 || (num == 0 && (end - start == 1))) { + strAccessor.TryToSetIntegerHash(num); + } + } + return {true, JSTaggedNumber(num)}; + } + + return {false, JSTaggedNumber(NAN_VALUE)}; +} + double NumberHelper::StringToDouble(const uint8_t *start, const uint8_t *end, uint8_t radix, uint32_t flags) { auto p = const_cast(start); @@ -558,6 +664,7 @@ double NumberHelper::StringToDouble(const uint8_t *start, const uint8_t *end, ui } // 8. parse '.' + exponent = 0; if (radix == DECIMAL && *p == '.') { RETURN_IF_CONVERSION_END(++p, end, (digits > 0 || (digits == 0 && leadingZero)) ? (number * std::pow(radix, exponent)) : NAN_VALUE); @@ -647,10 +754,15 @@ double NumberHelper::Strtod(const char *str, int exponent, uint8_t radix) } ++p; } + + // cal pow + int exponentAbs = exponent < 0 ? -exponent : exponent; + double powVal = ((radix == DECIMAL) && (exponentAbs < POWERS_OF_TEN_SIZE)) ? + POWERS_OF_TEN[exponentAbs] : std::pow(radix, exponentAbs); if (exponent < 0) { - result = number / std::pow(radix, -exponent); + result = number / powVal; } else { - result = number * std::pow(radix, exponent); + result = number * powVal; } return sign == Sign::NEG ? -result : result; } @@ -769,6 +881,9 @@ JSTaggedValue NumberHelper::StringToBigInt(JSThread *thread, JSHandle> base::RIGHT12) | EXPONENTBITS_RANGE_IN_ONE_AND_TWO; return base::bit_cast(random) - 1; } + +int32_t RandomGenerator::Next(int bits) +{ + uint64_t val = XorShift64(&randomState_); + return static_cast(val >> (INT64_BITS - bits)); +} + +int32_t RandomGenerator::GenerateIdentityHash() +{ + return RandomGenerator::Next(INT32_BITS) & INT32_MAX; +} } // namespace panda::ecmascript::base diff --git a/ecmascript/base/number_helper.h b/ecmascript/base/number_helper.h index 2552368a71024b2fa8279a73f916bc81911f917f..82a7c0a62b1fb839746fcc718599f6e36529f941 100644 --- a/ecmascript/base/number_helper.h +++ b/ecmascript/base/number_helper.h @@ -46,6 +46,8 @@ static constexpr double MAX_VALUE = std::numeric_limits::max(); static constexpr double MIN_VALUE = std::numeric_limits::min(); static constexpr double POSITIVE_INFINITY = std::numeric_limits::infinity(); static constexpr double NAN_VALUE = std::numeric_limits::quiet_NaN(); +static constexpr uint64_t MAX_UINT64_VALUE = std::numeric_limits::max(); +static constexpr int MAX_INT_VALUE = std::numeric_limits::max(); // Helper defines for double static constexpr int DOUBLE_MAX_PRECISION = 17; @@ -65,6 +67,12 @@ static constexpr size_t INT16_BITS = 16; static constexpr size_t INT8_BITS = 8; static constexpr size_t JS_DTOA_BUF_SIZE = 128; +// Max number of hexadecimal digits to display an integer +static constexpr size_t INT64_HEX_DIGITS = INT64_BITS / 4; +static constexpr size_t INT32_HEX_DIGITS = INT32_BITS / 4; +static constexpr size_t INT16_HEX_DIGITS = INT16_BITS / 4; +static constexpr size_t INT8_HEX_DIGITS = INT8_BITS / 4; + // help defines for random static constexpr int RIGHT12 = 12; static constexpr int SECONDS_TO_SUBTLE = 1000000; @@ -96,9 +104,14 @@ public: } static JSTaggedValue DoubleToString(JSThread *thread, double number, int radix); static bool IsEmptyString(const uint8_t *start, const uint8_t *end); + static JSHandle IntToEcmaString(const JSThread *thread, int number); static JSHandle NumberToString(const JSThread *thread, JSTaggedValue number); static double TruncateDouble(double d); static int64_t DoubleToInt64(double d); + static bool IsDigitalString(const uint8_t *start, const uint8_t *end); + static int StringToInt(const uint8_t *start, const uint8_t *end); + static std::pair FastStringToNumber(const uint8_t *start, + const uint8_t *end, JSTaggedValue string); static double StringToDouble(const uint8_t *start, const uint8_t *end, uint8_t radix, uint32_t flags = NO_FLAGS); static int32_t DoubleToInt(double d, size_t bits); static int32_t DoubleInRangeInt32(double d); @@ -113,7 +126,6 @@ public: private: static char Carry(char current, int radix); static double Strtod(const char *str, int exponent, uint8_t radix); - static CString DecimalsToString(double *numberInteger, double fraction, int radix, double delta); static bool GotoNonspace(uint8_t **ptr, const uint8_t *end); static void GetBase(double d, int digits, int *decpt, char *buf, char *bufTmp, int size); static int GetMinmumDigits(double d, int *decpt, char *buf); @@ -126,6 +138,8 @@ class RandomGenerator { public: static void InitRandom(); static double NextDouble(); + static int32_t GenerateIdentityHash(); + static int32_t Next(int bits); private: static uint64_t XorShift64(uint64_t *pVal); diff --git a/ecmascript/base/path_helper.cpp b/ecmascript/base/path_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2bc6d00639a870878078f2df7e590d229fbadfc7 --- /dev/null +++ b/ecmascript/base/path_helper.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ecmascript/base/path_helper.h" + +namespace panda::ecmascript::base { +/* + * Before: ./xxx/../xxx/xxx/ + * After: xxx/xxx + */ +CString PathHelper::NormalizePath(const CString &fileName) +{ + if (fileName.find(DOUBLE_SLASH_TAG) == CString::npos && + fileName.find(CURRENT_DIREATORY_TAG) == CString::npos && + fileName[fileName.size() - 1] != SLASH_TAG) { + return fileName; + } + CString res = ""; + size_t prev = 0; + size_t curr = fileName.find(SLASH_TAG); + CVector elems; + // eliminate parent directory path + while (curr != CString::npos) { + if (curr > prev) { + CString elem = fileName.substr(prev, curr - prev); + if (elem == DOUBLE_POINT_TAG && !elems.empty()) { // looking for xxx/../ + elems.pop_back(); + } else if (elem != POINT_STRING_TAG && elem != DOUBLE_POINT_TAG) { // remove ./ ../ + elems.push_back(elem); + } + } + prev = curr + 1; + curr = fileName.find(SLASH_TAG, prev); + } + if (prev != fileName.size()) { + elems.push_back(fileName.substr(prev)); + } + for (auto e : elems) { + if (res.size() == 0 && fileName.at(0) != SLASH_TAG) { + res.append(e); + continue; + } + res.append(1, SLASH_TAG).append(e); + } + return res; +} + +/* + * Before: xxx/xxx + * After: xxx/ + */ +JSHandle PathHelper::ResolveDirPath(JSThread *thread, CString fileName) +{ + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + // find last '/', '\\' + int foundPos = static_cast(fileName.find_last_of("/\\")); + if (foundPos == -1) { + return factory->NewFromUtf8(""); + } + CString dirPathStr = fileName.substr(0, foundPos + 1); + return factory->NewFromUtf8(dirPathStr); +} +} // namespace panda::ecmascript::base \ No newline at end of file diff --git a/ecmascript/base/path_helper.h b/ecmascript/base/path_helper.h index b0d3ecba732c3fd198dd2133ef73f0ae3f4d30ff..5ff0fb9bb965c8d78261b986a76a889c3f4eb049 100644 --- a/ecmascript/base/path_helper.h +++ b/ecmascript/base/path_helper.h @@ -27,157 +27,23 @@ namespace panda::ecmascript::base { class PathHelper { public: - static constexpr char EXT_NAME_ABC[] = ".abc"; - static constexpr char EXT_NAME_ETS[] = ".ets"; - static constexpr char EXT_NAME_TS[] = ".ts"; - static constexpr char EXT_NAME_JS[] = ".js"; - static constexpr char EXT_NAME_JSON[] = ".json"; - static constexpr char PREFIX_BUNDLE[] = "@bundle:"; - static constexpr char PREFIX_MODULE[] = "@module:"; - static constexpr char PREFIX_PACKAGE[] = "@package:"; - static constexpr char REQUIRE_NAITVE_MODULE_PREFIX[] = "@native:"; - static constexpr char REQUIRE_NAPI_OHOS_PREFIX[] = "@ohos:"; - static constexpr char REQUIRE_NAPI_APP_PREFIX[] = "@app:"; - static constexpr char NPM_PATH_SEGMENT[] = "node_modules"; - static constexpr char PACKAGE_PATH_SEGMENT[] = "pkg_modules"; - static constexpr char PACKAGE_ENTRY_FILE[] = "/index"; - static constexpr char BUNDLE_INSTALL_PATH[] = "/data/storage/el1/bundle/"; - static constexpr char MERGE_ABC_ETS_MODULES[] = "/ets/modules.abc"; - static constexpr char MODULE_DEFAULE_ETS[] = "/ets/"; - static constexpr char BUNDLE_SUB_INSTALL_PATH[] = "/data/storage/el1/"; - static constexpr char PREVIEW_OF_ACROSS_HAP_FLAG[] = "[preview]"; - static constexpr char NAME_SPACE_TAG[] = "@"; - static constexpr char PREVIER_TEST_DIR[] = ".test"; - - static constexpr size_t MAX_PACKAGE_LEVEL = 1; - static constexpr size_t SEGMENTS_LIMIT_TWO = 2; - static constexpr size_t EXT_NAME_ABC_LEN = 4; - static constexpr size_t EXT_NAME_ETS_LEN = 4; - static constexpr size_t EXT_NAME_TS_LEN = 3; - static constexpr size_t EXT_NAME_JS_LEN = 3; - static constexpr size_t EXT_NAME_JSON_LEN = 5; - static constexpr size_t PREFIX_BUNDLE_LEN = 8; - static constexpr size_t PREFIX_MODULE_LEN = 8; - static constexpr size_t PREFIX_PACKAGE_LEN = 9; - static constexpr size_t NATIVE_PREFIX_SIZE = 8; - static constexpr size_t OHOS_PREFIX_SIZE = 6; - static constexpr size_t APP_PREFIX_SIZE = 5; - - static void ResolveCurrentPath(JSThread *thread, - JSMutableHandle &dirPath, - JSMutableHandle &fileName, - const JSPandaFile *jsPandaFile) - { - ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - CString fullName = jsPandaFile->GetJSPandaFileDesc(); - // find last '/' - int foundPos = static_cast(fullName.find_last_of("/\\")); - if (foundPos == -1) { - RETURN_IF_ABRUPT_COMPLETION(thread); - } - CString dirPathStr = fullName.substr(0, foundPos + 1); - JSHandle dirPathName = factory->NewFromUtf8(dirPathStr); - dirPath.Update(dirPathName.GetTaggedValue()); - - // Get filename from JSPandaFile - JSHandle cbFileName = factory->NewFromUtf8(fullName); - fileName.Update(cbFileName.GetTaggedValue()); - } - - static JSHandle ResolveDirPath(JSThread *thread, - JSHandle fileName) - { - ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - CString fullName = ConvertToString(fileName.GetTaggedValue()); - // find last '/' - int foundPos = static_cast(fullName.find_last_of("/\\")); - if (foundPos == -1) { - RETURN_HANDLE_IF_ABRUPT_COMPLETION(EcmaString, thread); - } - CString dirPathStr = fullName.substr(0, foundPos + 1); - return factory->NewFromUtf8(dirPathStr); - } - - static CString NormalizePath(const CString &fileName) - { - if (fileName.find("//") == CString::npos && fileName.find("./") == CString::npos && - fileName[fileName.size() - 1] != '/') { - return fileName; - } - const char delim = '/'; - CString res = ""; - size_t prev = 0; - size_t curr = fileName.find(delim); - CVector elems; - while (curr != CString::npos) { - if (curr > prev) { - CString elem = fileName.substr(prev, curr - prev); - if (elem == ".." && !elems.empty()) { - elems.pop_back(); - } else if (elem != "." && elem != "..") { - elems.push_back(elem); - } - } - prev = curr + 1; - curr = fileName.find(delim, prev); - } - if (prev != fileName.size()) { - elems.push_back(fileName.substr(prev)); - } - for (auto e : elems) { - if (res.size() == 0 && fileName.at(0) != delim) { - res.append(e); - continue; - } - res.append(1, delim).append(e); - } - return res; - } - - static CString ParseOhmUrl(EcmaVM *vm, const CString &inputFileName, CString &outFileName) - { - CString bundleInstallName(BUNDLE_INSTALL_PATH); - size_t startStrLen = bundleInstallName.length(); - size_t pos = CString::npos; - - if (inputFileName.length() > startStrLen && inputFileName.compare(0, startStrLen, bundleInstallName) == 0) { - pos = startStrLen; - } - CString entryPoint; - if (pos != CString::npos) { - pos = inputFileName.find('/', startStrLen); - if (pos == CString::npos) { - LOG_FULL(FATAL) << "Invalid Ohm url, please check."; - } - CString moduleName = inputFileName.substr(startStrLen, pos - startStrLen); - outFileName = BUNDLE_INSTALL_PATH + moduleName + MERGE_ABC_ETS_MODULES; - entryPoint = vm->GetBundleName() + "/" + inputFileName.substr(startStrLen); - } else { - // Temporarily handle the relative path sent by arkui - if (StringHelper::StringStartWith(inputFileName, PREFIX_BUNDLE)) { - entryPoint = inputFileName.substr(PREFIX_BUNDLE_LEN); - outFileName = ParseUrl(vm, entryPoint); - } else { -#if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) - entryPoint = vm->GetBundleName() + "/" + inputFileName; -#else - // if the inputFileName starts with '.test', the preview test page is started. - // in this case, the path ets does not need to be combined. - if (StringHelper::StringStartWith(inputFileName, PREVIER_TEST_DIR)) { - entryPoint = vm->GetBundleName() + "/" + vm->GetModuleName() + "/" + inputFileName; - } else { - entryPoint = vm->GetBundleName() + "/" + vm->GetModuleName() + MODULE_DEFAULE_ETS + inputFileName; - } -#endif - } - } - if (StringHelper::StringEndWith(entryPoint, EXT_NAME_ABC)) { - entryPoint.erase(entryPoint.length() - EXT_NAME_ABC_LEN, EXT_NAME_ABC_LEN); - } - return entryPoint; - } - - static void CropNamespaceIfAbsent(CString &moduleName) + static constexpr char COLON_TAG = ':'; + static constexpr char CURRENT_DIREATORY_TAG[] = "./"; + static constexpr char DOUBLE_POINT_TAG[] = ".."; + static constexpr char DOUBLE_SLASH_TAG[] = "//"; + static constexpr char NAME_SPACE_TAG = '@'; + static constexpr char POINT_STRING_TAG[] = "."; + static constexpr char POINT_TAG = '.'; + static constexpr char SLASH_TAG = '/'; + + static CString NormalizePath(const CString &fileName); + static JSHandle ResolveDirPath(JSThread *thread, CString fileName); + + /* + * Before: moduleName@nameSpace + * After: moduleName + */ + inline static void DeleteNamespace(CString &moduleName) { size_t pos = moduleName.find(NAME_SPACE_TAG); if (pos == CString::npos) { @@ -186,403 +52,41 @@ public: moduleName.erase(pos, moduleName.size() - pos); } - // current ohmUrl format : @bundle:bundlename/modulename@namespace/entry/src/index - static CString ParseUrl(EcmaVM *vm, const CString &entryPoint) + /* + * Before: bundleName/moduleName@namespace/moduleName/xxx/xxx + * After: moduleName/xxx/xxx + */ + inline static void AdaptOldIsaRecord(CString &recordName) { - CVector vec; - StringHelper::SplitString(entryPoint, vec, 0, SEGMENTS_LIMIT_TWO); - if (vec.size() < SEGMENTS_LIMIT_TWO) { - LOG_ECMA(DEBUG) << "ParseUrl SplitString filed, please check Url" << entryPoint; - return CString(); - } - CString bundleName = vec[0]; - CString moduleName = vec[1]; - CropNamespaceIfAbsent(moduleName); - - CString baseFileName; - if (bundleName != vm->GetBundleName()) { - // Cross-application - baseFileName = - BUNDLE_INSTALL_PATH + bundleName + "/" + moduleName + "/" + moduleName + MERGE_ABC_ETS_MODULES; - } else { - // Intra-application cross hap - baseFileName = BUNDLE_INSTALL_PATH + moduleName + MERGE_ABC_ETS_MODULES; - } - return baseFileName; - } - - static std::string ParseHapPath(const CString &fileName) - { - CString bundleSubInstallName(BUNDLE_SUB_INSTALL_PATH); - size_t startStrLen = bundleSubInstallName.length(); - if (fileName.length() > startStrLen && fileName.compare(0, startStrLen, bundleSubInstallName) == 0) { - CString hapPath = fileName.substr(startStrLen); - size_t pos = hapPath.find(MERGE_ABC_ETS_MODULES); - if (pos != CString::npos) { - return hapPath.substr(0, pos).c_str(); - } - } - return std::string(); - } - - static void CroppingRecord(CString &recordName) - { - size_t pos = recordName.find('/'); + size_t pos = recordName.find(SLASH_TAG); if (pos != CString::npos) { - pos = recordName.find('/', pos + 1); + pos = recordName.find(SLASH_TAG, pos + 1); if (pos != CString::npos) { recordName = recordName.substr(pos + 1); } } } - static CString ParsePrefixBundle(JSThread *thread, const JSPandaFile *jsPandaFile, - [[maybe_unused]] CString &baseFileName, CString moduleRequestName, [[maybe_unused]] CString recordName) - { - EcmaVM *vm = thread->GetEcmaVM(); - moduleRequestName = moduleRequestName.substr(PREFIX_BUNDLE_LEN); - CString entryPoint = moduleRequestName; - if (jsPandaFile->IsRecordWithBundleName()) { - CVector vec; - StringHelper::SplitString(moduleRequestName, vec, 0, SEGMENTS_LIMIT_TWO); - if (vec.size() < SEGMENTS_LIMIT_TWO) { - LOG_ECMA(INFO) << "SplitString filed, please check moduleRequestName"; - return CString(); - } - CString bundleName = vec[0]; - CString moduleName = vec[1]; - CropNamespaceIfAbsent(moduleName); - -#if !defined(PANDA_TARGET_WINDOWS) && !defined(PANDA_TARGET_MACOS) - if (bundleName != vm->GetBundleName()) { - baseFileName = - BUNDLE_INSTALL_PATH + bundleName + '/' + moduleName + '/' + moduleName + MERGE_ABC_ETS_MODULES; - } else if (moduleName != vm->GetModuleName()) { - baseFileName = BUNDLE_INSTALL_PATH + moduleName + MERGE_ABC_ETS_MODULES; - } else { - // Support multi-module card service - baseFileName = vm->GetAssetPath(); - } -#else - CVector currentVec; - StringHelper::SplitString(recordName, currentVec, 0, SEGMENTS_LIMIT_TWO); - if (vec.size() < SEGMENTS_LIMIT_TWO) { - LOG_ECMA(INFO) << "SplitString filed, please check moduleRequestName"; - return CString(); - } - CString currentModuleName = currentVec[1]; - CropNamespaceIfAbsent(currentModuleName); - if (bundleName != vm->GetBundleName() || moduleName != currentModuleName) { - entryPoint = PREVIEW_OF_ACROSS_HAP_FLAG; - if (vm->EnableReportModuleResolvingFailure()) { - LOG_NO_TAG(ERROR) << "[ArkRuntime Log] Importing shared package is not supported in the Previewer."; - } - } -#endif - } else { - CroppingRecord(entryPoint); - } - return entryPoint; - } - - static CString MakeNewRecord(const JSPandaFile *jsPandaFile, CString &baseFileName, const CString &recordName, - const CString &requestName) - { - CString entryPoint; - CString moduleRequestName = RemoveSuffix(requestName); - size_t pos = moduleRequestName.find("./"); - if (pos == 0) { - moduleRequestName = moduleRequestName.substr(2); // 2 means jump "./" - } - pos = recordName.rfind('/'); - if (pos != CString::npos) { - entryPoint = recordName.substr(0, pos + 1) + moduleRequestName; - } else { - entryPoint = moduleRequestName; - } - entryPoint = NormalizePath(entryPoint); - entryPoint = ConfirmLoadingIndexOrNot(jsPandaFile, entryPoint); - if (!entryPoint.empty()) { - return entryPoint; - } - // the package name may have a '.js' suffix, try to parseThirdPartyPackage - entryPoint = ParseThirdPartyPackage(jsPandaFile, recordName, requestName); - if (!entryPoint.empty()) { - return entryPoint; - } - // Execute abc locally - pos = baseFileName.rfind('/'); - if (pos != CString::npos) { - baseFileName = baseFileName.substr(0, pos + 1) + moduleRequestName + EXT_NAME_ABC; - } else { - baseFileName = moduleRequestName + EXT_NAME_ABC; - } - pos = moduleRequestName.rfind('/'); - if (pos != CString::npos) { - entryPoint = moduleRequestName.substr(pos + 1); - } else { - entryPoint = moduleRequestName; - } - return entryPoint; - } - - static CString ConfirmLoadingIndexOrNot(const JSPandaFile *jsPandaFile, const CString &packageEntryPoint) - { - CString entryPoint = packageEntryPoint; - if (jsPandaFile->HasRecord(entryPoint)) { - return entryPoint; - } - // Possible import directory - entryPoint += PACKAGE_ENTRY_FILE; - if (jsPandaFile->HasRecord(entryPoint)) { - return entryPoint; - } - return CString(); - } - - static CString FindNpmEntryPoint(const JSPandaFile *jsPandaFile, const CString &packageEntryPoint) - { - // if we are currently importing a specific file or directory, we will get the entryPoint here - CString entryPoint = ConfirmLoadingIndexOrNot(jsPandaFile, packageEntryPoint); - if (!entryPoint.empty()) { - return entryPoint; - } - // When you come here, must import a packageName - return jsPandaFile->GetEntryPoint(packageEntryPoint); - } - - static CString FindPackageInTopLevel(const JSPandaFile *jsPandaFile, const CString& requestName, - const CString &packagePath) - { - // we find node_modules/0/xxx or node_modules/1/xxx - CString entryPoint; - for (size_t level = 0; level <= MAX_PACKAGE_LEVEL; ++level) { - CString levelStr = std::to_string(level).c_str(); - CString key = packagePath + "/" + levelStr + '/' + requestName; - entryPoint = FindNpmEntryPoint(jsPandaFile, key); - if (!entryPoint.empty()) { - return entryPoint; - } - } - return CString(); - } - - static CString FindOhpmEntryPoint(const JSPandaFile *jsPandaFile, const CString& ohpmPath, - const CString& requestName) - { - CVector vec; - StringHelper::SplitString(requestName, vec, 0); - size_t maxIndex = vec.size() - 1; - CString ohpmKey; - size_t index = 0; - // first we find the ohpmKey by splicing the requestName - while (index <= maxIndex) { - CString maybeKey = ohpmPath + "/" + StringHelper::JoinString(vec, 0, index); - ohpmKey = jsPandaFile->GetNpmEntries(maybeKey); - if (!ohpmKey.empty()) { - break; - } - ++index; - } - if (ohpmKey.empty()) { - return CString(); - } - // second If the ohpmKey is not empty, we will use it to obtain the real entrypoint - CString entryPoint; - if (index == maxIndex) { - // requestName is a packageName - entryPoint = jsPandaFile->GetEntryPoint(ohpmKey); - } else { - // import a specific file or directory - ohpmKey = ohpmKey + "/" + StringHelper::JoinString(vec, index + 1, maxIndex); - entryPoint = ConfirmLoadingIndexOrNot(jsPandaFile, ohpmKey); - } - return entryPoint; - } - - static CString FindPackageInTopLevelWithNamespace(const JSPandaFile *jsPandaFile, const CString& requestName, - const CString &recordName) - { - // find in current module @[moduleName|namespace]/ - CString entryPoint; - CString ohpmPath; - if (StringHelper::StringStartWith(recordName, PACKAGE_PATH_SEGMENT)) { - size_t pos = recordName.find('/'); - if (pos == CString::npos) { - LOG_ECMA(DEBUG) << "wrong recordname : " << recordName; - return CString(); - } - ohpmPath = recordName.substr(0, pos); - entryPoint = FindOhpmEntryPoint(jsPandaFile, recordName.substr(0, pos), requestName); - } else { - CVector vec; - StringHelper::SplitString(recordName, vec, 0, SEGMENTS_LIMIT_TWO); - if (vec.size() < SEGMENTS_LIMIT_TWO) { - LOG_ECMA(DEBUG) << "SplitString filed, please check moduleRequestName"; - return CString(); - } - CString moduleName = vec[1]; - // If namespace exists, use namespace as moduleName - size_t pos = moduleName.find(NAME_SPACE_TAG); - if (pos != CString::npos) { - moduleName = moduleName.substr(pos + 1); - } - ohpmPath = CString(PACKAGE_PATH_SEGMENT) + NAME_SPACE_TAG + moduleName; - entryPoint = FindOhpmEntryPoint(jsPandaFile, ohpmPath, requestName); - } - if (!entryPoint.empty()) { - return entryPoint; - } - // find in project directory / - return FindOhpmEntryPoint(jsPandaFile, PACKAGE_PATH_SEGMENT, requestName); - } - - static CString ParseOhpmPackage(const JSPandaFile *jsPandaFile, const CString &recordName, - const CString &requestName) - { - CString entryPoint; - if (StringHelper::StringStartWith(recordName, PACKAGE_PATH_SEGMENT)) { - //this way is thirdPartyPackage import ThirdPartyPackage - auto info = const_cast(jsPandaFile)->FindRecordInfo(recordName); - CString packageName = info.npmPackageName; - size_t pos = packageName.rfind(PACKAGE_PATH_SEGMENT); - if (pos != CString::npos) { - packageName.erase(pos, packageName.size() - pos); - CString ohpmPath = packageName + PACKAGE_PATH_SEGMENT; - entryPoint = FindOhpmEntryPoint(jsPandaFile, ohpmPath, requestName); - if (!entryPoint.empty()) { - return entryPoint; - } - } - } - // Import packages under the current module or project directory - return FindPackageInTopLevelWithNamespace(jsPandaFile, requestName, recordName); - } - - static CString ParseThirdPartyPackage(const JSPandaFile *jsPandaFile, const CString &recordName, - const CString &requestName, const CString &packagePath) - { - CString entryPoint; - if (StringHelper::StringStartWith(recordName, packagePath)) { - auto info = const_cast(jsPandaFile)->FindRecordInfo(recordName); - CString packageName = info.npmPackageName; - size_t pos = 0; - while (true) { - CString key = packageName + '/' + packagePath + "/" + requestName; - entryPoint = FindNpmEntryPoint(jsPandaFile, key); - if (!entryPoint.empty()) { - return entryPoint; - } - pos = packageName.rfind(packagePath) - 1; - if (pos == CString::npos || pos < 0) { - break; - } - packageName.erase(pos, packageName.size() - pos); - } - } - return FindPackageInTopLevel(jsPandaFile, requestName, packagePath); - } - - static CString ParseThirdPartyPackage(const JSPandaFile *jsPandaFile, const CString &recordName, - const CString &requestName) - { - static CVector packagePaths = {CString(PACKAGE_PATH_SEGMENT), CString(NPM_PATH_SEGMENT)}; - // We need to deal with scenarios like this 'json5/' -> 'json5' - CString normalizeRequestName = NormalizePath(requestName); - CString entryPoint = ParseOhpmPackage(jsPandaFile, recordName, normalizeRequestName); - if (!entryPoint.empty()) { - return entryPoint; - } - // Package compatible with old soft link format - for (size_t i = 0; i < packagePaths.size(); ++i) { - entryPoint = ParseThirdPartyPackage(jsPandaFile, recordName, normalizeRequestName, packagePaths[i]); - if (!entryPoint.empty()) { - return entryPoint; - } - } - return CString(); - } - - static bool IsImportFile(const CString &moduleRequestName) - { - if (moduleRequestName[0] == '.') { - return true; - } - size_t pos = moduleRequestName.rfind('.'); - if (pos != CString::npos) { - CString suffix = moduleRequestName.substr(pos); - if (suffix == EXT_NAME_JS || suffix == EXT_NAME_TS || suffix == EXT_NAME_ETS || suffix == EXT_NAME_JSON) { - return true; - } - } - return false; - } - - static CString RemoveSuffix(const CString &requestName) - { - CString res = requestName; - size_t pos = res.rfind('.'); - if (pos != CString::npos) { - CString suffix = res.substr(pos); - if (suffix == EXT_NAME_JS || suffix == EXT_NAME_TS || suffix == EXT_NAME_ETS || suffix == EXT_NAME_JSON) { - res.erase(pos, suffix.length()); - } - } - return res; - } - - inline static bool IsNativeModuleRequest(const CString &requestName) - { - if (requestName[0] != '@') { - return false; - } - if (StringHelper::StringStartWith(requestName, PathHelper::REQUIRE_NAPI_OHOS_PREFIX) || - StringHelper::StringStartWith(requestName, PathHelper::REQUIRE_NAPI_APP_PREFIX) || - StringHelper::StringStartWith(requestName, PathHelper::REQUIRE_NAITVE_MODULE_PREFIX)) { - return true; - } - return false; - } - - static CString ConcatFileNameWithMerge(JSThread *thread, const JSPandaFile *jsPandaFile, CString &baseFileName, - CString recordName, CString requestName) - { - CString entryPoint; - if (StringHelper::StringStartWith(requestName, PREFIX_BUNDLE)) { - entryPoint = ParsePrefixBundle(thread, jsPandaFile, baseFileName, requestName, recordName); - } else if (StringHelper::StringStartWith(requestName, PREFIX_PACKAGE)) { - entryPoint = requestName.substr(PREFIX_PACKAGE_LEN); - } else if (IsImportFile(requestName)) { // load a relative pathName. - entryPoint = MakeNewRecord(jsPandaFile, baseFileName, recordName, requestName); - } else { - entryPoint = ParseThirdPartyPackage(jsPandaFile, recordName, requestName); - } - if (entryPoint.empty() && thread->GetEcmaVM()->EnableReportModuleResolvingFailure()) { - LOG_ECMA(ERROR) << "Failed to resolve the requested entryPoint. baseFileName : '" << baseFileName << - "'. RecordName : '" << recordName << "'. RequestName : '" << requestName << "'."; - ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - CString msg = "failed to load module'" + requestName + "' which imported by '" + - recordName + "'. Please check the target path."; - JSTaggedValue error = factory->GetJSError(ErrorType::REFERENCE_ERROR, msg.c_str()).GetTaggedValue(); - THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, entryPoint); - } - return entryPoint; - } - - static CString GetStrippedModuleName(const CString &moduleRequestName) + /* + * Before: @***:xxxx + * After: xxxx + */ + inline static CString GetStrippedModuleName(const CString &moduleRequestName) { - // @xxx:**** -> **** - size_t pos = moduleRequestName.find(':'); + size_t pos = moduleRequestName.find(COLON_TAG); if (pos == CString::npos) { LOG_FULL(FATAL) << "Unknown format " << moduleRequestName; } return moduleRequestName.substr(pos + 1); } - static CString GetInternalModulePrefix(const CString &moduleRequestName) + /* + * Before: @xxx:**** + * After: xxx + */ + inline static CString GetInternalModulePrefix(const CString &moduleRequestName) { - // @xxx:* -> xxx - size_t pos = moduleRequestName.find(':'); + size_t pos = moduleRequestName.find(COLON_TAG); if (pos == CString::npos) { LOG_FULL(FATAL) << "Unknown format " << moduleRequestName; } diff --git a/ecmascript/base/string_helper.h b/ecmascript/base/string_helper.h index e15b3d2c2edb543ff7ff56ed08aef2b82217db26..43c049c54f1786015bdc58aa8193be229a0788f2 100644 --- a/ecmascript/base/string_helper.h +++ b/ecmascript/base/string_helper.h @@ -229,6 +229,11 @@ public: return c; } + static inline void InplaceAppend(std::u16string &str1, const std::u16string &str2) + { + str1.append(str2); + } + static inline std::u16string Append(const std::u16string &str1, const std::u16string &str2) { std::u16string tmpStr = str1; diff --git a/ecmascript/base/tests/array_helper_test.cpp b/ecmascript/base/tests/array_helper_test.cpp index 720d8064fcaf6c4a536c285c8cf8490c72435c08..fb0ae0cc237c01d3965b44627c0545c34f5feff3 100644 --- a/ecmascript/base/tests/array_helper_test.cpp +++ b/ecmascript/base/tests/array_helper_test.cpp @@ -104,7 +104,7 @@ HWTEST_F_L0(ArrayHelperTest, SortCompare) EXPECT_EQ(resultValue3, -1); // Y is Undefined EXPECT_EQ(resultValue4, 1); // X > Y EXPECT_EQ(resultValue5, 0); // X = Y - EXPECT_EQ(resultValue6, 0); // X < Y + EXPECT_EQ(resultValue6, -1); // X < Y } /** diff --git a/ecmascript/base/tests/error_helper_test.cpp b/ecmascript/base/tests/error_helper_test.cpp index 3cad7ef139e6cbee694e73a53c8624c3462112b0..3739202b71475ed7ef88ecebee651ebe96d782e4 100644 --- a/ecmascript/base/tests/error_helper_test.cpp +++ b/ecmascript/base/tests/error_helper_test.cpp @@ -105,6 +105,7 @@ HWTEST_F_L0(ErrorHelperTest, ErrorCommonToString_002) JSHandle syntaxErrorFunc = env->GetSyntaxErrorFunction(); JSHandle referenceErrorFunc = env->GetReferenceErrorFunction(); JSHandle aggregateErrorFunc = env->GetAggregateErrorFunction(); + JSHandle terminationErrorFunc = env->GetTerminationErrorFunction(); JSHandle uriErrorObj = factory->NewJSObjectByConstructor(JSHandle(uriErrorFunc), uriErrorFunc); JSHandle oomErrorObj = @@ -115,6 +116,8 @@ HWTEST_F_L0(ErrorHelperTest, ErrorCommonToString_002) factory->NewJSObjectByConstructor(JSHandle(referenceErrorFunc), referenceErrorFunc); JSHandle aggregateErrorObj = factory->NewJSObjectByConstructor(JSHandle(aggregateErrorFunc), aggregateErrorFunc); + JSHandle terminationErrorObj = + factory->NewJSObjectByConstructor(JSHandle(terminationErrorFunc), terminationErrorFunc); EcmaRuntimeCallInfo* argv = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 4); argv->SetFunction(JSTaggedValue::Undefined()); @@ -128,6 +131,12 @@ HWTEST_F_L0(ErrorHelperTest, ErrorCommonToString_002) JSHandle oomError(thread, ErrorHelper::ErrorCommonToString(argv, ErrorType::OOM_ERROR)); TestHelper::TearDownFrame(thread, prev); + argv->SetThis(JSTaggedValue(*terminationErrorObj)); + prev = TestHelper::SetupFrame(thread, argv); + JSHandle terminationError(thread, + ErrorHelper::ErrorCommonToString(argv, ErrorType::TERMINATION_ERROR)); + TestHelper::TearDownFrame(thread, prev); + argv->SetThis(JSTaggedValue(*syntaxErrorObj)); prev = TestHelper::SetupFrame(thread, argv); JSHandle syntaxError(thread, ErrorHelper::ErrorCommonToString(argv, ErrorType::SYNTAX_ERROR)); @@ -324,4 +333,56 @@ HWTEST_F_L0(ErrorHelperTest, ErrorCommonConstructor_003) EXPECT_STREQ(EcmaStringAccessor(JSHandle::Cast(aggregateNameValue)).ToCString().c_str(), "AggregateError"); } + +HWTEST_F_L0(ErrorHelperTest, ErrorCommonConstructor_004) +{ + auto factory = instance->GetFactory(); + auto env = instance->GetGlobalEnv(); + JSHandle msgKey = thread->GlobalConstants()->GetHandledMessageString(); + JSHandle nameKey = thread->GlobalConstants()->GetHandledNameString(); + JSHandle causeKey = thread->GlobalConstants()->GetHandledCauseString(); + + JSHandle error(env->GetErrorFunction()); + JSHandle typeError(env->GetTypeErrorFunction()); + JSHandle objFun = env->GetObjectFunction(); + JSHandle optionsObj = factory->NewJSObjectByConstructor(JSHandle(objFun), objFun); + JSHandle causeValue(factory->NewFromASCII("error cause")); // test error cause + JSObject::SetProperty(thread, optionsObj, causeKey, causeValue); + + JSHandle errorMsg(factory->NewFromASCII("You have an Error!")); + EcmaRuntimeCallInfo *argv1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue(*error), 8); // 8 means 2 call args + argv1->SetFunction(error.GetTaggedValue()); + argv1->SetThis(JSTaggedValue(*error)); + argv1->SetCallArg(0, errorMsg.GetTaggedValue()); + argv1->SetCallArg(1, optionsObj.GetTaggedValue()); + auto prev1 = TestHelper::SetupFrame(thread, argv1); + JSHandle errorResult(thread, ErrorHelper::ErrorCommonConstructor(argv1, ErrorType::ERROR)); + TestHelper::TearDownFrame(thread, prev1); + JSHandle errorMsgValue(JSObject::GetProperty(thread, errorResult, msgKey).GetValue()); + JSHandle errorNameValue(JSObject::GetProperty(thread, errorResult, nameKey).GetValue()); + JSHandle errorCauseValue(JSObject::GetProperty(thread, errorResult, causeKey).GetValue()); + EXPECT_STREQ(EcmaStringAccessor(JSHandle::Cast(errorMsgValue)).ToCString().c_str(), + "You have an Error!"); + EXPECT_STREQ(EcmaStringAccessor(JSHandle::Cast(errorNameValue)).ToCString().c_str(), "Error"); + EXPECT_STREQ(EcmaStringAccessor(JSHandle::Cast(errorCauseValue)).ToCString().c_str(), "error cause"); + + JSHandle typeErrorMsg(factory->NewFromASCII("You have a type error!")); + EcmaRuntimeCallInfo *argv2 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue(*typeError), 8); // 8 means 2 call args + argv2->SetFunction(typeError.GetTaggedValue()); + argv2->SetThis(JSTaggedValue(*typeError)); + argv2->SetCallArg(0, typeErrorMsg.GetTaggedValue()); + argv2->SetCallArg(1, optionsObj.GetTaggedValue()); + auto prev2 = TestHelper::SetupFrame(thread, argv2); + JSHandle typeErrorResult(thread, ErrorHelper::ErrorCommonConstructor(argv2, ErrorType::TYPE_ERROR)); + TestHelper::TearDownFrame(thread, prev2); + JSHandle typeMsgValue(JSObject::GetProperty(thread, typeErrorResult, msgKey).GetValue()); + JSHandle typeNameValue(JSObject::GetProperty(thread, typeErrorResult, nameKey).GetValue()); + JSHandle typeCauseValue(JSObject::GetProperty(thread, typeErrorResult, causeKey).GetValue()); + EXPECT_STREQ(EcmaStringAccessor(JSHandle::Cast(typeMsgValue)).ToCString().c_str(), + "You have a type error!"); + EXPECT_STREQ(EcmaStringAccessor(JSHandle::Cast(typeNameValue)).ToCString().c_str(), "TypeError"); + EXPECT_STREQ(EcmaStringAccessor(JSHandle::Cast(typeCauseValue)).ToCString().c_str(), "error cause"); +} } // namespace panda::test diff --git a/ecmascript/base/tests/json_parser_test.cpp b/ecmascript/base/tests/json_parser_test.cpp index 36f44df820cef4bf4d8ff65238fa3728c032f9f3..6a0a03f8373e3b82b249d51448978589c05668da 100644 --- a/ecmascript/base/tests/json_parser_test.cpp +++ b/ecmascript/base/tests/json_parser_test.cpp @@ -57,21 +57,21 @@ public: HWTEST_F_L0(JsonParserTest, Parser_001) { ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - JsonParser parser(thread); + Utf8JsonParser parser(thread); // JSON Number JSHandle handleMsg2(factory->NewFromASCII("1234")); JSHandle handleStr2(JSTaggedValue::ToString(thread, handleMsg2)); - JSHandle result2 = parser.ParseUtf8(*handleStr2); + JSHandle result2 = parser.Parse(*handleStr2); EXPECT_EQ(result2->GetNumber(), 1234); // JSON Literal JSHandle handleMsg3(factory->NewFromASCII("true")); JSHandle handleStr3(JSTaggedValue::ToString(thread, handleMsg3)); - JSHandle result3 = parser.ParseUtf8(*handleStr3); + JSHandle result3 = parser.Parse(*handleStr3); EXPECT_EQ(result3.GetTaggedValue(), JSTaggedValue::True()); // JSON Unexpected JSHandle handleMsg4(factory->NewFromASCII("trus")); JSHandle handleStr4(JSTaggedValue::ToString(thread, handleMsg4)); - JSHandle result4 = parser.ParseUtf8(*handleStr4); + JSHandle result4 = parser.Parse(*handleStr4); EXPECT_EQ(result4.GetTaggedValue(), JSTaggedValue::Exception()); } @@ -85,28 +85,28 @@ HWTEST_F_L0(JsonParserTest, Parser_001) HWTEST_F_L0(JsonParserTest, Parser_002) { ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - JsonParser parser(thread); + Utf16JsonParser parser(thread); // JSON Number uint16_t array1Utf16[] = {0x31, 0x32, 0x33, 0x34}; // "1234" uint32_t array1Utf16Len = sizeof(array1Utf16) / sizeof(array1Utf16[0]); JSHandle handleMsg2(factory->NewFromUtf16(&array1Utf16[0], array1Utf16Len)); JSHandle handleStr2(JSTaggedValue::ToString(thread, handleMsg2)); - JSHandle result2 = parser.ParseUtf16(*handleStr2); + JSHandle result2 = parser.Parse(*handleStr2); EXPECT_EQ(result2->GetNumber(), 1234); // JSON Literal uint16_t array2Utf16[] = {0x74, 0x72, 0x75, 0x65}; // "true" uint32_t array2Utf16Len = sizeof(array2Utf16) / sizeof(array2Utf16[0]); JSHandle handleMsg3(factory->NewFromUtf16(&array2Utf16[0], array2Utf16Len)); JSHandle handleStr3(JSTaggedValue::ToString(thread, handleMsg3)); - JSHandle result3 = parser.ParseUtf16(*handleStr3); + JSHandle result3 = parser.Parse(*handleStr3); EXPECT_EQ(result3.GetTaggedValue(), JSTaggedValue::True()); // JSON String uint16_t array3Utf16[] = {0x22, 0x73, 0x74, 0x72, 0x69, 0x6E, 0X67, 0x22}; // "string" uint32_t array3Utf16Len = sizeof(array3Utf16) / sizeof(array3Utf16[0]); JSHandle handleMsg4(factory->NewFromUtf16(&array3Utf16[0], array3Utf16Len)); JSHandle handleStr4(JSTaggedValue::ToString(thread, handleMsg4)); - JSHandle result4 = parser.ParseUtf16(*handleStr4); + JSHandle result4 = parser.Parse(*handleStr4); JSHandle handleEcmaStr(result4); EXPECT_STREQ("string", EcmaStringAccessor(handleEcmaStr).ToCString().c_str()); } @@ -121,13 +121,13 @@ HWTEST_F_L0(JsonParserTest, Parser_002) HWTEST_F_L0(JsonParserTest, Parser_003) { ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - JsonParser parser(thread); + Utf8JsonParser parser(thread); JSHandle handleMsg(factory->NewFromASCII( "\t\r \n{\t\r \n \"json\"\t\r\n:\t\r \n{\t\r \n}\t\r \n,\t\r \n \"prop2\"\t\r \n:\t\r \n [\t\r \nfalse\t\r" "\n,\t\r \nnull\t\r \ntrue\t\r,123.456\t\r \n]\t\r \n}\t\r \n")); JSHandle handleStr(JSTaggedValue::ToString(thread, handleMsg)); // JSON Object - JSHandle result = parser.ParseUtf8(*handleStr); + JSHandle result = parser.Parse(*handleStr); EXPECT_TRUE(result->IsECMAObject()); } @@ -142,11 +142,11 @@ HWTEST_F_L0(JsonParserTest, Parser_004) { ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); - JsonParser parser(thread); + Utf8JsonParser parser(thread); JSHandle handleMsg(factory->NewFromASCII("[100,2.5,\"abc\"]")); JSHandle handleStr(JSTaggedValue::ToString(thread, handleMsg)); // JSON Array - JSHandle result = parser.ParseUtf8(*handleStr); + JSHandle result = parser.Parse(*handleStr); JSTaggedValue resultValue(static_cast(result->GetRawData())); EXPECT_TRUE(resultValue.IsECMAObject()); @@ -168,12 +168,12 @@ HWTEST_F_L0(JsonParserTest, Parser_004) HWTEST_F_L0(JsonParserTest, Parser_005) { ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - JsonParser parser(thread); + Utf8JsonParser parser(thread); JSHandle handleMsg(factory->NewFromASCII("{\"epf\":100,\"key1\":400}")); JSHandle handleStr(JSTaggedValue::ToString(thread, handleMsg)); // JSON Object - JSHandle result = parser.ParseUtf8(*handleStr); + JSHandle result = parser.Parse(*handleStr); JSTaggedValue resultValue(static_cast(result->GetRawData())); EXPECT_TRUE(resultValue.IsECMAObject()); @@ -196,9 +196,9 @@ HWTEST_F_L0(JsonParserTest, Parser_005) */ HWTEST_F_L0(JsonParserTest, Parser_006) { - JsonParser parser(thread); + Utf8JsonParser parser(thread); JSHandle emptyString(thread->GlobalConstants()->GetHandledEmptyString()); - JSHandle result = parser.ParseUtf8(*emptyString); + JSHandle result = parser.Parse(*emptyString); EXPECT_TRUE(result->IsException()); } } // namespace panda::test diff --git a/ecmascript/base/tests/typed_array_helper_test.cpp b/ecmascript/base/tests/typed_array_helper_test.cpp index c502f608d8778df1fd52d8e948dad6e05818de24..dce9b45f2b30c58f3777b48bb7ce1e29b49144ea 100755 --- a/ecmascript/base/tests/typed_array_helper_test.cpp +++ b/ecmascript/base/tests/typed_array_helper_test.cpp @@ -146,7 +146,7 @@ HWTEST_F_L0(TypedArrayHelperTest, AllocateTypedArray_001) auto prev = TestHelper::SetupFrame(thread, argv); JSHandle newTarget = BuiltinsBase::GetNewTarget(argv); JSHandle arrayObj = - TypedArrayHelper::AllocateTypedArray(factory, ecmaVm, constructorName, newTarget, DataViewType::UINT8); + TypedArrayHelper::AllocateTypedArray(thread, constructorName, newTarget, DataViewType::UINT8); TestHelper::TearDownFrame(thread, prev); JSTypedArray *jsTypedArray = JSTypedArray::Cast(*arrayObj); EXPECT_EQ(jsTypedArray->GetContentType(), ContentType::Number); @@ -167,7 +167,7 @@ HWTEST_F_L0(TypedArrayHelperTest, AllocateTypedArray_002) auto prev = TestHelper::SetupFrame(thread, argv); JSHandle newTarget = BuiltinsBase::GetNewTarget(argv); JSHandle arrayObj = - TypedArrayHelper::AllocateTypedArray(factory, ecmaVm, constructorName, newTarget, length, DataViewType::UINT8); + TypedArrayHelper::AllocateTypedArray(thread, constructorName, newTarget, length, DataViewType::UINT8); TestHelper::TearDownFrame(thread, prev); JSTypedArray *jsTypedArray = JSTypedArray::Cast(*arrayObj); EXPECT_EQ(jsTypedArray->GetContentType(), ContentType::Number); diff --git a/ecmascript/base/typed_array_helper-inl.h b/ecmascript/base/typed_array_helper-inl.h index e348f809f73eed7eebe650dd8d75daaa40d37df7..eecb5cc730ee8aae430aa61157f2beda5725b6ff 100644 --- a/ecmascript/base/typed_array_helper-inl.h +++ b/ecmascript/base/typed_array_helper-inl.h @@ -126,50 +126,52 @@ JSHandle TypedArrayHelper::GetConstructorFromType(JSThread *thread, { JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); switch (arrayType) { - case DataViewType::INT8: - return JSHandle(env->GetInt8ArrayFunction()); - case DataViewType::UINT8: - return JSHandle(env->GetUint8ArrayFunction()); - case DataViewType::UINT8_CLAMPED: - return JSHandle(env->GetUint8ClampedArrayFunction()); - case DataViewType::INT16: - return JSHandle(env->GetInt16ArrayFunction()); - case DataViewType::UINT16: - return JSHandle(env->GetUint16ArrayFunction()); - case DataViewType::INT32: - return JSHandle(env->GetInt32ArrayFunction()); - case DataViewType::UINT32: - return JSHandle(env->GetUint32ArrayFunction()); - case DataViewType::FLOAT32: - return JSHandle(env->GetFloat32ArrayFunction()); - case DataViewType::FLOAT64: - return JSHandle(env->GetFloat64ArrayFunction()); - case DataViewType::BIGINT64: - return JSHandle(env->GetBigInt64ArrayFunction()); - default: - break; + case DataViewType::INT8: + return JSHandle(env->GetInt8ArrayFunction()); + case DataViewType::UINT8: + return JSHandle(env->GetUint8ArrayFunction()); + case DataViewType::UINT8_CLAMPED: + return JSHandle(env->GetUint8ClampedArrayFunction()); + case DataViewType::INT16: + return JSHandle(env->GetInt16ArrayFunction()); + case DataViewType::UINT16: + return JSHandle(env->GetUint16ArrayFunction()); + case DataViewType::INT32: + return JSHandle(env->GetInt32ArrayFunction()); + case DataViewType::UINT32: + return JSHandle(env->GetUint32ArrayFunction()); + case DataViewType::FLOAT32: + return JSHandle(env->GetFloat32ArrayFunction()); + case DataViewType::FLOAT64: + return JSHandle(env->GetFloat64ArrayFunction()); + case DataViewType::BIGINT64: + return JSHandle(env->GetBigInt64ArrayFunction()); + default: + break; } return JSHandle(env->GetBigUint64ArrayFunction()); } uint32_t TypedArrayHelper::GetSizeFromType(const DataViewType arrayType) { - uint32_t elementSize; if (arrayType == DataViewType::INT8 || arrayType == DataViewType::UINT8 || arrayType == DataViewType::UINT8_CLAMPED) { - elementSize = ElementSize::ONE; - } else if (arrayType == DataViewType::INT16 || - arrayType == DataViewType::UINT16) { - elementSize = ElementSize::TWO; - } else if (arrayType == DataViewType::FLOAT32 || - arrayType == DataViewType::UINT32 || - arrayType == DataViewType::INT32) { - elementSize = ElementSize::FOUR; - } else { - elementSize = ElementSize::EIGHT; + return ElementSize::ONE; + } + + if (arrayType == DataViewType::INT16 || + arrayType == DataViewType::UINT16) { + return ElementSize::TWO; } - return elementSize; + + if (arrayType == DataViewType::FLOAT32 || + arrayType == DataViewType::UINT32 || + arrayType == DataViewType::INT32) { + return ElementSize::FOUR; + } + + return ElementSize::EIGHT; } } // namespace panda::ecmascript::base #endif // ECMASCRIPT_BASE_TYPED_ARRAY_HELPER_INL_H diff --git a/ecmascript/base/typed_array_helper.cpp b/ecmascript/base/typed_array_helper.cpp index 2cfe6b939340a642171b1f541a3d80f9555e11d6..89ffed8b3ff112a271c8a074967b7b5e8ebe2f6f 100644 --- a/ecmascript/base/typed_array_helper.cpp +++ b/ecmascript/base/typed_array_helper.cpp @@ -45,7 +45,6 @@ JSTaggedValue TypedArrayHelper::TypedArrayConstructor(EcmaRuntimeCallInfo *argv, ASSERT(argv); JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); - EcmaVM *ecmaVm = thread->GetEcmaVM(); JSHandle newTarget = BuiltinsBase::GetNewTarget(argv); // 2. If NewTarget is undefined, throw a TypeError exception. if (newTarget->IsUndefined()) { @@ -54,24 +53,23 @@ JSTaggedValue TypedArrayHelper::TypedArrayConstructor(EcmaRuntimeCallInfo *argv, // 3. Let constructorName be the String value of the Constructor Name value specified in Table 61 for this // TypedArray constructor. // 4. Let O be ? AllocateTypedArray(constructorName, NewTarget, "%TypedArray.prototype%"). - ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); JSHandle firstArg = BuiltinsBase::GetCallArg(argv, 0); if (!firstArg->IsECMAObject()) { // es11 22.2.4.1 TypedArray ( ) - int32_t elementLength = 0; + uint32_t elementLength = 0; // es11 22.2.4.2 TypedArray ( length ) if (!firstArg->IsUndefined()) { JSTaggedNumber index = JSTaggedValue::ToIndex(thread, firstArg); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - elementLength = static_cast(index.GetNumber()); + elementLength = static_cast(index.GetNumber()); } - JSHandle obj = TypedArrayHelper::AllocateTypedArray(factory, ecmaVm, constructorName, newTarget, + JSHandle obj = TypedArrayHelper::AllocateTypedArray(thread, constructorName, newTarget, elementLength, arrayType); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return obj.GetTaggedValue(); } - JSHandle obj = - TypedArrayHelper::AllocateTypedArray(factory, ecmaVm, constructorName, newTarget, arrayType); + + JSHandle obj = TypedArrayHelper::AllocateTypedArray(thread, constructorName, newTarget, arrayType); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (firstArg->IsTypedArray()) { return TypedArrayHelper::CreateFromTypedArray(argv, obj, arrayType); @@ -98,11 +96,13 @@ JSTaggedValue TypedArrayHelper::FastCopyElementFromArray(EcmaRuntimeCallInfo *ar if (elements->GetLength() < len) { TypedArrayHelper::CreateFromOrdinaryObject(argv, obj, arrayType); } - EcmaVM *ecmaVm = thread->GetEcmaVM(); - TypedArrayHelper::AllocateTypedArrayBuffer(thread, ecmaVm, obj, len, arrayType); + + TypedArrayHelper::AllocateTypedArrayBuffer(thread, obj, len, arrayType); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle targetObj = JSHandle::Cast(obj); - + JSStableArray::FastCopyFromArrayToTypedArray(thread, targetObj, arrayType, 0, len, elements); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return JSHandle::Cast(targetObj).GetTaggedValue(); } @@ -141,7 +141,7 @@ JSTaggedValue TypedArrayHelper::CreateFromOrdinaryObject(EcmaRuntimeCallInfo *ar } } uint32_t len = static_cast(vec.size()); - TypedArrayHelper::AllocateTypedArrayBuffer(thread, ecmaVm, obj, len, arrayType); + TypedArrayHelper::AllocateTypedArrayBuffer(thread, obj, len, arrayType); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // d. Let k be 0. // e. Repeat, while k < len @@ -154,6 +154,7 @@ JSTaggedValue TypedArrayHelper::CreateFromOrdinaryObject(EcmaRuntimeCallInfo *ar while (k < len) { tKey.Update(JSTaggedValue(k)); JSHandle kKey(JSTaggedValue::ToString(thread, tKey)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle kValue = vec[k]; JSTaggedValue::SetProperty(thread, JSHandle::Cast(obj), kKey, kValue, true); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -173,7 +174,7 @@ JSTaggedValue TypedArrayHelper::CreateFromOrdinaryObject(EcmaRuntimeCallInfo *ar RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); uint64_t rawLen = lenTemp.GetNumber(); // 10. Perform ? AllocateTypedArrayBuffer(O, len). - TypedArrayHelper::AllocateTypedArrayBuffer(thread, ecmaVm, obj, rawLen, arrayType); + TypedArrayHelper::AllocateTypedArrayBuffer(thread, obj, rawLen, arrayType); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 11. Let k be 0. // 12. Repeat, while k < len @@ -187,6 +188,7 @@ JSTaggedValue TypedArrayHelper::CreateFromOrdinaryObject(EcmaRuntimeCallInfo *ar while (k < len) { tKey.Update(JSTaggedValue(k)); JSHandle kKey(JSTaggedValue::ToString(thread, tKey)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle kValue = JSObject::GetProperty(thread, objectArg, kKey).GetValue(); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSTaggedValue::SetProperty(thread, JSHandle::Cast(obj), kKey, kValue, true); @@ -230,7 +232,9 @@ JSTaggedValue TypedArrayHelper::CreateFromTypedArray(EcmaRuntimeCallInfo *argv, // 15. Let byteLength be elementSize × elementLength. uint32_t srcByteOffset = srcObj->GetByteOffset(); uint32_t elementSize = TypedArrayHelper::GetSizeFromType(arrayType); - uint32_t byteLength = elementSize * elementLength; + // If elementLength is a large number, the multiplication of elementSize and elementLength may exceed + // the maximum value of uint32, resulting in data overflow. Therefore, the type of byteLength is uint64_t. + uint64_t byteLength = elementSize * static_cast(elementLength); // 16. If IsSharedArrayBuffer(srcData) is false, then // a. Let bufferConstructor be ? SpeciesConstructor(srcData, %ArrayBuffer%). @@ -307,7 +311,7 @@ JSTaggedValue TypedArrayHelper::CreateFromArrayBuffer(EcmaRuntimeCallInfo *argv, [[maybe_unused]] EcmaHandleScope handleScope(thread); // 5. Let elementSize be the Element Size value specified in Table 61 for constructorName. // 6. Let offset be ? ToIndex(byteOffset). - uint32_t elementSize = static_cast(TypedArrayHelper::GetSizeFromType(arrayType)); + uint32_t elementSize = TypedArrayHelper::GetSizeFromType(arrayType); JSHandle byteOffset = BuiltinsBase::GetCallArg(argv, 1); JSTaggedNumber index = JSTaggedValue::ToIndex(thread, byteOffset); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -320,11 +324,11 @@ JSTaggedValue TypedArrayHelper::CreateFromArrayBuffer(EcmaRuntimeCallInfo *argv, // 8. If length is not undefined, then // a. Let newLength be ? ToIndex(length). JSHandle length = BuiltinsBase::GetCallArg(argv, BuiltinsBase::ArgsPosition::THIRD); - int32_t newLength = 0; + uint64_t newLength = 0; if (!length->IsUndefined()) { index = JSTaggedValue::ToIndex(thread, length); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - newLength = static_cast(index.GetNumber()); + newLength = static_cast(index.GetNumber()); } // 9. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. JSHandle buffer = BuiltinsBase::GetCallArg(argv, 0); @@ -337,7 +341,7 @@ JSTaggedValue TypedArrayHelper::CreateFromArrayBuffer(EcmaRuntimeCallInfo *argv, // a. If bufferByteLength modulo elementSize ≠ 0, throw a RangeError exception. // b. Let newByteLength be bufferByteLength - offset. // c. If newByteLength < 0, throw a RangeError exception. - uint32_t newByteLength = 0; + uint64_t newByteLength = 0; if (length->IsUndefined()) { if (bufferByteLength % elementSize != 0) { THROW_RANGE_ERROR_AND_RETURN(thread, "The bufferByteLength cannot be an integral multiple of elementSize.", @@ -351,9 +355,7 @@ JSTaggedValue TypedArrayHelper::CreateFromArrayBuffer(EcmaRuntimeCallInfo *argv, // 12. Else, // a. Let newByteLength be newLength × elementSize. // b. If offset + newByteLength > bufferByteLength, throw a RangeError exception. - ASSERT((static_cast(newLength) * static_cast(elementSize)) <= - static_cast(INT32_MAX)); - newByteLength = static_cast(newLength) * elementSize; + newByteLength = newLength * elementSize; if (offset + newByteLength > bufferByteLength) { THROW_RANGE_ERROR_AND_RETURN(thread, "The newByteLength is out of range.", JSTaggedValue::Exception()); } @@ -364,23 +366,22 @@ JSTaggedValue TypedArrayHelper::CreateFromArrayBuffer(EcmaRuntimeCallInfo *argv, // 16. Set O.[[ArrayLength]] to newByteLength / elementSize. JSTypedArray *jsTypedArray = JSTypedArray::Cast(*obj); jsTypedArray->SetViewedArrayBufferOrByteArray(thread, buffer); - jsTypedArray->SetByteLength(static_cast(newByteLength)); + jsTypedArray->SetByteLength(newByteLength); jsTypedArray->SetByteOffset(offset); - jsTypedArray->SetArrayLength(static_cast(newByteLength / elementSize)); + jsTypedArray->SetArrayLength(newByteLength / elementSize); // 17. Return O. return obj.GetTaggedValue(); } // es11 22.2.4.2.1 Runtime Semantics: AllocateTypedArray ( constructorName, newTarget, defaultProto ) -JSHandle TypedArrayHelper::AllocateTypedArray(ObjectFactory *factory, EcmaVM *ecmaVm, +JSHandle TypedArrayHelper::AllocateTypedArray(JSThread *thread, const JSHandle &constructorName, const JSHandle &newTarget, const DataViewType arrayType) { - JSThread *thread = ecmaVm->GetJSThread(); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); // 1. Let proto be ? GetPrototypeFromConstructor(newTarget, defaultProto). // 2. Let obj be ! IntegerIndexedObjectCreate(proto). - RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSHandle(thread, JSTaggedValue::Exception())); JSHandle typedArrayFunc = TypedArrayHelper::GetConstructorFromType(thread, arrayType); JSHandle obj = factory->NewJSObjectByConstructor(typedArrayFunc, newTarget); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSHandle(thread, JSTaggedValue::Exception())); @@ -407,18 +408,17 @@ JSHandle TypedArrayHelper::AllocateTypedArray(ObjectFactory *factory, jsTypedArray->SetIsOnHeap(false); // 9. Return obj. return obj; -} // namespace panda::ecmascript::base +} // es11 22.2.4.2.1 Runtime Semantics: AllocateTypedArray ( constructorName, newTarget, defaultProto, length ) -JSHandle TypedArrayHelper::AllocateTypedArray(ObjectFactory *factory, EcmaVM *ecmaVm, +JSHandle TypedArrayHelper::AllocateTypedArray(JSThread *thread, const JSHandle &constructorName, - const JSHandle &newTarget, int32_t length, + const JSHandle &newTarget, uint32_t length, const DataViewType arrayType) { - JSThread *thread = ecmaVm->GetJSThread(); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); // 1. Let proto be ? GetPrototypeFromConstructor(newTarget, defaultProto). // 2. Let obj be ! IntegerIndexedObjectCreate(proto). - RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSHandle(thread, JSTaggedValue::Exception())); JSHandle typedArrayFunc = TypedArrayHelper::GetConstructorFromType(thread, arrayType); JSHandle obj = factory->NewJSObjectByConstructor(typedArrayFunc, newTarget); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSHandle(thread, JSTaggedValue::Exception())); @@ -428,14 +428,14 @@ JSHandle TypedArrayHelper::AllocateTypedArray(ObjectFactory *factory, // 7. If length is not present, then // 8. Else, // a. Perform ? AllocateTypedArrayBuffer(obj, length). - TypedArrayHelper::AllocateTypedArrayBuffer(thread, ecmaVm, obj, length, arrayType); + TypedArrayHelper::AllocateTypedArrayBuffer(thread, obj, length, arrayType); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSHandle(thread, JSTaggedValue::Exception())); // 9. Return obj. return obj; } // es11 22.2.4.2.2 Runtime Semantics: AllocateTypedArrayBuffer ( O, length ) -JSHandle TypedArrayHelper::AllocateTypedArrayBuffer(JSThread *thread, EcmaVM *ecmaVm, +JSHandle TypedArrayHelper::AllocateTypedArrayBuffer(JSThread *thread, const JSHandle &obj, uint64_t length, const DataViewType arrayType) { @@ -456,7 +456,7 @@ JSHandle TypedArrayHelper::AllocateTypedArrayBuffer(JSThread *thread, // 7. Let data be ? AllocateArrayBuffer(%ArrayBuffer%, byteLength). JSTaggedValue data; if (byteLength > JSTypedArray::MAX_ONHEAP_LENGTH) { - JSHandle constructor = ecmaVm->GetGlobalEnv()->GetArrayBufferFunction(); + JSHandle constructor = thread->GetEcmaVM()->GetGlobalEnv()->GetArrayBufferFunction(); data = BuiltinsArrayBuffer::AllocateArrayBuffer(thread, constructor, byteLength); JSTypedArray::Cast(*obj)->SetIsOnHeap(false); } else { @@ -544,6 +544,27 @@ JSHandle TypedArrayHelper::TypedArrayCreate(JSThread *thread, const JS return newTypedArray; } +// TypedArrayCreateSameType ( exemplar, argumentList ) +JSHandle TypedArrayHelper::TypedArrayCreateSameType(JSThread *thread, const JSHandle &obj, + uint32_t argc, JSTaggedType argv[]) +{ + // 1. Let constructor be the intrinsic object associated with the constructor name exemplar.[[TypedArrayName]] + // in Table 70. + JSHandle buffHandle(thread, JSTaggedValue(argv[0])); + JSHandle constructor = + TypedArrayHelper::GetConstructor(thread, JSHandle(obj)); + argv[0] = buffHandle.GetTaggedType(); + // 2. Let result be ? TypedArrayCreate(constructor, argumentList). + JSHandle result = TypedArrayHelper::TypedArrayCreate(thread, constructor, argc, argv); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSHandle(thread, JSTaggedValue::Exception())); + // 3. Assert: result has [[TypedArrayName]] and [[ContentType]] internal slots. + // 4. Assert: result.[[ContentType]] is exemplar.[[ContentType]]. + [[maybe_unused]] ContentType objContentType = obj->GetContentType(); + [[maybe_unused]] ContentType resultContentType = JSHandle::Cast(result)->GetContentType(); + ASSERT(objContentType == resultContentType); + return result; +} + // es11 22.2.3.5.1 Runtime Semantics: ValidateTypedArray ( O ) JSTaggedValue TypedArrayHelper::ValidateTypedArray(JSThread *thread, const JSHandle &value) { @@ -577,7 +598,7 @@ int32_t TypedArrayHelper::SortCompare(JSThread *thread, const JSHandleIsUndefined()) { - const int32_t argsLength = 2; + const uint32_t argsLength = 2; JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, callbackfnHandle, undefined, undefined, argsLength); @@ -623,7 +644,9 @@ int32_t TypedArrayHelper::SortCompare(JSThread *thread, const JSHandle &constructorName, const DataViewType arrayType); - static JSHandle AllocateTypedArray(ObjectFactory *factory, EcmaVM *ecmaVm, + static JSHandle AllocateTypedArray(JSThread *thread, const JSHandle &constructorName, const JSHandle &newTarget, const DataViewType arrayType); - static JSHandle AllocateTypedArray(ObjectFactory *factory, EcmaVM *ecmaVm, + static JSHandle AllocateTypedArray(JSThread *thread, const JSHandle &constructorName, - const JSHandle &newTarget, int32_t length, + const JSHandle &newTarget, uint32_t length, const DataViewType arrayType); static JSHandle TypedArraySpeciesCreate(JSThread *thread, const JSHandle &obj, uint32_t argc, JSTaggedType argv[]); static JSHandle TypedArrayCreate(JSThread *thread, const JSHandle &constructor, uint32_t argc, const JSTaggedType argv[]); + static JSHandle TypedArrayCreateSameType(JSThread *thread, const JSHandle &obj, + uint32_t argc, JSTaggedType argv[]); static JSTaggedValue ValidateTypedArray(JSThread *thread, const JSHandle &value); inline static DataViewType GetType(const JSHandle &obj); inline static DataViewType GetType(JSType type); @@ -61,7 +63,7 @@ private: const DataViewType arrayType); static JSTaggedValue CreateFromArrayBuffer(EcmaRuntimeCallInfo *argv, const JSHandle &obj, const DataViewType arrayType); - static JSHandle AllocateTypedArrayBuffer(JSThread *thread, EcmaVM *ecmaVm, const JSHandle &obj, + static JSHandle AllocateTypedArrayBuffer(JSThread *thread, const JSHandle &obj, uint64_t length, const DataViewType arrayType); static JSTaggedValue FastCopyElementFromArray(EcmaRuntimeCallInfo *argv, const JSHandle &obj, const DataViewType arrayType); diff --git a/ecmascript/builtins/builtins.cpp b/ecmascript/builtins/builtins.cpp index 9d75686f2c6e7d45b6d903b2569c3c5d0a061d88..ec8d6f5b52ade39442970d2b7345d571216b9c36 100644 --- a/ecmascript/builtins/builtins.cpp +++ b/ecmascript/builtins/builtins.cpp @@ -92,6 +92,8 @@ #include "ecmascript/require/js_cjs_module_cache.h" #include "ecmascript/require/js_cjs_require.h" #include "ecmascript/require/js_cjs_exports.h" +#include "ecmascript/symbol_table.h" +#include "ecmascript/marker_cell.h" #include "ecmascript/napi/include/jsnapi.h" #include "ecmascript/object_factory.h" #ifdef ARK_SUPPORT_INTL @@ -143,6 +145,7 @@ using URIError = builtins::BuiltinsURIError; using SyntaxError = builtins::BuiltinsSyntaxError; using EvalError = builtins::BuiltinsEvalError; using OOMError = builtins::BuiltinsOOMError; +using TerminationError = builtins::BuiltinsTerminationError; using ErrorType = base::ErrorType; using RandomGenerator = base::RandomGenerator; using Global = builtins::BuiltinsGlobal; @@ -256,15 +259,15 @@ void Builtins::Initialize(const JSHandle &env, JSThread *thread, bool env->SetObjectFunction(thread_, objectFunction); env->SetObjectFunctionPrototype(thread_, objFuncPrototype); - JSHandle FunctionClass = factory_->CreateFunctionClass(FunctionKind::BASE_CONSTRUCTOR, JSFunction::SIZE, + JSHandle functionClass = factory_->CreateFunctionClass(FunctionKind::BASE_CONSTRUCTOR, JSFunction::SIZE, JSType::JS_FUNCTION, env->GetFunctionPrototype()); - env->SetFunctionClassWithProto(thread_, FunctionClass); - FunctionClass = factory_->CreateFunctionClass(FunctionKind::NORMAL_FUNCTION, JSFunction::SIZE, JSType::JS_FUNCTION, + env->SetFunctionClassWithProto(thread_, functionClass); + functionClass = factory_->CreateFunctionClass(FunctionKind::NORMAL_FUNCTION, JSFunction::SIZE, JSType::JS_FUNCTION, env->GetFunctionPrototype()); - env->SetFunctionClassWithoutProto(thread_, FunctionClass); - FunctionClass = factory_->CreateFunctionClass(FunctionKind::CLASS_CONSTRUCTOR, JSFunction::SIZE, + env->SetFunctionClassWithoutProto(thread_, functionClass); + functionClass = factory_->CreateFunctionClass(FunctionKind::CLASS_CONSTRUCTOR, JSFunction::SIZE, JSType::JS_FUNCTION, env->GetFunctionPrototype()); - env->SetFunctionClassWithoutName(thread_, FunctionClass); + env->SetFunctionClassWithoutName(thread_, functionClass); if (!isRealm) { InitializeAllTypeError(env, objFuncClass); @@ -291,30 +294,30 @@ void Builtins::Initialize(const JSHandle &env, JSThread *thread, bool LazyInitializeDataView(env); LazyInitializeSharedArrayBuffer(env); } else { - InitializeDate(env, objFuncClass); - InitializeSet(env, objFuncClass); - InitializeMap(env, objFuncClass); + InitializeDate(env, objFuncPrototypeVal); + InitializeSet(env, objFuncPrototypeVal); + InitializeMap(env, objFuncPrototypeVal); InitializeWeakMap(env, objFuncClass); InitializeWeakSet(env, objFuncClass); InitializeWeakRef(env, objFuncClass); InitializeFinalizationRegistry(env, objFuncClass); - InitializeTypedArray(env, objFuncClass); + InitializeTypedArray(env, objFuncPrototypeVal); InitializeArrayBuffer(env, objFuncClass); - InitializeDataView(env, objFuncClass); + InitializeDataView(env, objFuncPrototypeVal); InitializeSharedArrayBuffer(env, objFuncClass); } InitializeNumber(env, globalObject, primRefObjHClass); InitializeObject(env, objFuncPrototype, objectFunction); InitializeBoolean(env, primRefObjHClass); InitializeRegExp(env); - InitializeString(env, primRefObjHClass); + InitializeString(env, objFuncPrototypeVal); JSHandle argumentsClass = factory_->CreateJSArguments(env); env->SetArgumentsClass(thread_, argumentsClass); SetArgumentsSharedAccessor(env); - InitializeGlobalObject(env, globalObject); InitializeMath(env, objFuncPrototypeVal); + InitializeGlobalObject(env, globalObject); InitializeAtomics(env, objFuncPrototypeVal); InitializeJson(env, objFuncPrototypeVal); InitializeIterator(env, objFuncClass); @@ -357,6 +360,7 @@ void Builtins::Initialize(const JSHandle &env, JSThread *thread, bool InitializeCjsRequire(env); InitializeDefaultExportOfScript(env); InitializeFunctionHclassForOptimized(env); + InitializePropertyDetector(env, lazyInit); JSHandle generatorFuncClass = factory_->CreateFunctionClass(FunctionKind::GENERATOR_FUNCTION, JSFunction::SIZE, JSType::JS_GENERATOR_FUNCTION, env->GetGeneratorFunctionPrototype()); @@ -397,6 +401,18 @@ void Builtins::InitializeFunctionHclassForOptimized(const JSHandle &e #undef JSFUNCTION_JCLASS_LIST } +void Builtins::InitializePropertyDetector(const JSHandle &env, bool lazyInit) const +{ +#define INITIALIZE_PROPERTY_DETECTOR(type, name, index) \ + JSHandle name##detector = factory_->NewMarkerCell(); \ + if (lazyInit) { \ + name##detector->InvalidatePropertyDetector(); \ + } \ + env->Set##name(thread_, name##detector); + GLOBAL_ENV_DETECTOR_FIELDS(INITIALIZE_PROPERTY_DETECTOR) +#undef INITIALIZE_PROPERTY_DETECTOR +} + void Builtins::SetLazyAccessor(const JSHandle &object, const JSHandle &key, const JSHandle &accessor) const { @@ -425,6 +441,7 @@ void Builtins::InitializeGlobalObject(const JSHandle &env, const JSHa // Global object test SetFunction(env, globalObject, "print", Global::PrintEntrypoint, 0); + SetFunction(env, globalObject, "markModuleCollectable", Global::MarkModuleCollectable, 0); #if ECMASCRIPT_ENABLE_RUNTIME_STAT SetFunction(env, globalObject, "startRuntimeStat", Global::StartRuntimeStat, 0); SetFunction(env, globalObject, "stopRuntimeStat", Global::StopRuntimeStat, 0); @@ -455,6 +472,8 @@ void Builtins::InitializeGlobalObject(const JSHandle &env, const JSHa SetFunction(env, globalObject, "isNaN", Global::IsNaN, FunctionLength::ONE); SetFunction(env, globalObject, "decodeURI", Global::DecodeURI, FunctionLength::ONE); SetFunction(env, globalObject, "encodeURI", Global::EncodeURI, FunctionLength::ONE); + SetFunction(env, globalObject, "escape", Global::Escape, FunctionLength::ONE); + SetFunction(env, globalObject, "unescape", Global::Unescape, FunctionLength::ONE); SetFunction(env, globalObject, "decodeURIComponent", Global::DecodeURIComponent, FunctionLength::ONE); SetFunction(env, globalObject, "encodeURIComponent", Global::EncodeURIComponent, FunctionLength::ONE); @@ -515,7 +534,8 @@ void Builtins::InitializeFunction(const JSHandle &env, const JSHandle // Function.prototype method // 19.2.3.1 Function.prototype.apply ( thisArg, argArray ) - SetFunction(env, funcFuncPrototypeObj, "apply", Function::FunctionPrototypeApply, FunctionLength::TWO); + SetFunction(env, funcFuncPrototypeObj, "apply", Function::FunctionPrototypeApply, FunctionLength::TWO, + BUILTINS_STUB_ID(FunctionPrototypeApply)); // 19.2.3.2 Function.prototype.bind ( thisArg , ...args) SetFunction(env, funcFuncPrototypeObj, "bind", Function::FunctionPrototypeBind, FunctionLength::ONE); // 19.2.3.3 Function.prototype.call (thisArg , ...args) @@ -530,66 +550,15 @@ void Builtins::InitializeObject(const JSHandle &env, const JSHandleGlobalConstants()->GetHandledToStringString(), Object::ToString, - FunctionLength::ZERO); - // 19.1.3.7 Object.prototype.valueOf() - SetFunction(env, objFuncPrototype, thread_->GlobalConstants()->GetHandledValueOfString(), Object::ValueOf, - FunctionLength::ZERO); - - SetFunction(env, objFuncPrototype, "createRealm", Object::CreateRealm, FunctionLength::ZERO); + for (const base::BuiltinFunctionEntry &entry: Object::GetObjectFunctions()) { + SetFunction(env, objFunc, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } + // Object.prototype method + for (const base::BuiltinFunctionEntry &entry: Object::GetObjectPrototypeFunctions()) { + SetFunction(env, objFuncPrototype, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } // B.2.2.1 Object.prototype.__proto__ JSHandle protoKey(factory_->NewFromASCII("__proto__")); @@ -620,8 +589,10 @@ void Builtins::InitializeSymbol(const JSHandle &env, const JSHandle::Cast(symbolFunction), true, false, true); JSObject::DefineOwnProperty(thread_, symbolFuncPrototype, constructorKey, descriptor); - SetFunction(env, symbolFunction, "for", Symbol::For, FunctionLength::ONE); - SetFunction(env, symbolFunction, "keyFor", Symbol::KeyFor, FunctionLength::ONE); + for (const base::BuiltinFunctionEntry &entry: Symbol::GetSymbolFunctions()) { + SetFunction(env, symbolFunction, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } // Symbol attribute JSHandle hasInstanceSymbol(factory_->NewWellKnownSymbolWithChar("Symbol.hasInstance")); @@ -630,22 +601,16 @@ void Builtins::InitializeSymbol(const JSHandle &env, const JSHandle toStringTagSymbol(factory_->NewWellKnownSymbolWithChar("Symbol.toStringTag")); SetNoneAttributeProperty(symbolFunction, "toStringTag", toStringTagSymbol); - JSHandle iteratorSymbol(factory_->NewPublicSymbolWithChar("Symbol.iterator")); - SetNoneAttributeProperty(symbolFunction, "iterator", iteratorSymbol); JSHandle asyncIteratorSymbol(factory_->NewPublicSymbolWithChar("Symbol.asyncIterator")); SetNoneAttributeProperty(symbolFunction, "asyncIterator", asyncIteratorSymbol); JSHandle matchSymbol(factory_->NewPublicSymbolWithChar("Symbol.match")); SetNoneAttributeProperty(symbolFunction, "match", matchSymbol); JSHandle matchAllSymbol(factory_->NewPublicSymbolWithChar("Symbol.matchAll")); SetNoneAttributeProperty(symbolFunction, "matchAll", matchAllSymbol); - JSHandle replaceSymbol(factory_->NewPublicSymbolWithChar("Symbol.replace")); - SetNoneAttributeProperty(symbolFunction, "replace", replaceSymbol); JSHandle searchSymbol(factory_->NewPublicSymbolWithChar("Symbol.search")); SetNoneAttributeProperty(symbolFunction, "search", searchSymbol); JSHandle speciesSymbol(factory_->NewPublicSymbolWithChar("Symbol.species")); SetNoneAttributeProperty(symbolFunction, "species", speciesSymbol); - JSHandle splitSymbol(factory_->NewPublicSymbolWithChar("Symbol.split")); - SetNoneAttributeProperty(symbolFunction, "split", splitSymbol); JSHandle toPrimitiveSymbol(factory_->NewPublicSymbolWithChar("Symbol.toPrimitive")); SetNoneAttributeProperty(symbolFunction, "toPrimitive", toPrimitiveSymbol); JSHandle unscopablesSymbol(factory_->NewPublicSymbolWithChar("Symbol.unscopables")); @@ -655,6 +620,32 @@ void Builtins::InitializeSymbol(const JSHandle &env, const JSHandle detachSymbol(factory_->NewPublicSymbolWithChar("Symbol.detach")); SetNoneAttributeProperty(symbolFunction, "detach", detachSymbol); + // Symbol attributes with detectors + // Create symbol string before create symbol to allocate symbol continuously + // Attention: Symbol serialization & deserialization are not supported now and + // the order of symbols and symbol-strings must be maintained too when + // Symbol serialization & deserialization are ready. +#define INIT_SYMBOL_STRING(name, description, key) \ + { \ + [[maybe_unused]] JSHandle string = factory_->NewFromUtf8(description); \ + } +DETECTOR_SYMBOL_LIST(INIT_SYMBOL_STRING) +#undef INIT_SYMBOL_STRING + +#define INIT_PUBLIC_SYMBOL(name, description, key) \ + JSHandle key##Symbol = factory_->NewEmptySymbol(); \ + JSHandle key##String = factory_->NewFromUtf8(description); \ + key##Symbol->SetDescription(thread_, key##String.GetTaggedValue()); \ + key##Symbol->SetHashField(SymbolTable::Hash(key##String.GetTaggedValue())); \ + env->Set##name(thread_, key##Symbol); +DETECTOR_SYMBOL_LIST(INIT_PUBLIC_SYMBOL) +#undef INIT_PUBLIC_SYMBOL + +#define REGISTER_SYMBOL(name, description, key) \ + SetNoneAttributeProperty(symbolFunction, #key, JSHandle(key##Symbol)); +DETECTOR_SYMBOL_LIST(REGISTER_SYMBOL) +#undef REGISTER_SYMBOL + // symbol.prototype.description PropertyDescriptor descriptionDesc(thread_); JSHandle getterKey(factory_->NewFromASCII("description")); @@ -674,14 +665,11 @@ void Builtins::InitializeSymbol(const JSHandle &env, const JSHandleSetHasInstanceSymbol(thread_, hasInstanceSymbol); env->SetIsConcatSpreadableSymbol(thread_, isConcatSpreadableSymbol); env->SetToStringTagSymbol(thread_, toStringTagSymbol); - env->SetIteratorSymbol(thread_, iteratorSymbol); env->SetAsyncIteratorSymbol(thread_, asyncIteratorSymbol); env->SetMatchSymbol(thread_, matchSymbol); env->SetMatchAllSymbol(thread_, matchAllSymbol); - env->SetReplaceSymbol(thread_, replaceSymbol); env->SetSearchSymbol(thread_, searchSymbol); env->SetSpeciesSymbol(thread_, speciesSymbol); - env->SetSplitSymbol(thread_, splitSymbol); env->SetToPrimitiveSymbol(thread_, toPrimitiveSymbol); env->SetUnscopablesSymbol(thread_, unscopablesSymbol); env->SetAttachSymbol(thread_, attachSymbol); @@ -725,25 +713,18 @@ void Builtins::InitializeSymbolWithRealm(const JSHandle &realm, PropertyDescriptor descriptor(thread_, JSHandle::Cast(symbolFunction), true, false, true); JSObject::DefineOwnProperty(thread_, symbolFuncPrototype, constructorKey, descriptor); - SetFunction(realm, symbolFunction, "for", Symbol::For, FunctionLength::ONE); - SetFunction(realm, symbolFunction, "keyFor", Symbol::KeyFor, FunctionLength::ONE); + for (const base::BuiltinFunctionEntry &entry: Symbol::GetSymbolFunctions()) { + SetFunction(realm, symbolFunction, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } + +#define BUILTIN_SYMBOL_CREATE_WITH_REALM(name, Name) \ + SetNoneAttributeProperty(symbolFunction, #name, env->Get##Name##Symbol()); \ + realm->Set##Name##Symbol(thread_, env->Get##Name##Symbol()); + realm->SetSymbolFunction(thread_, symbolFunction); // Symbol attribute - SetNoneAttributeProperty(symbolFunction, "hasInstance", env->GetHasInstanceSymbol()); - SetNoneAttributeProperty(symbolFunction, "isConcatSpreadable", env->GetIsConcatSpreadableSymbol()); - SetNoneAttributeProperty(symbolFunction, "toStringTag", env->GetToStringTagSymbol()); - SetNoneAttributeProperty(symbolFunction, "iterator", env->GetIteratorSymbol()); - SetNoneAttributeProperty(symbolFunction, "asyncIterator", env->GetAsyncIteratorSymbol()); - SetNoneAttributeProperty(symbolFunction, "match", env->GetMatchSymbol()); - SetNoneAttributeProperty(symbolFunction, "matchAll", env->GetMatchAllSymbol()); - SetNoneAttributeProperty(symbolFunction, "replace", env->GetReplaceSymbol()); - SetNoneAttributeProperty(symbolFunction, "search", env->GetSearchSymbol()); - SetNoneAttributeProperty(symbolFunction, "species", env->GetSpeciesSymbol()); - SetNoneAttributeProperty(symbolFunction, "split", env->GetSplitSymbol()); - SetNoneAttributeProperty(symbolFunction, "toPrimitive", env->GetToPrimitiveSymbol()); - SetNoneAttributeProperty(symbolFunction, "unscopables", env->GetUnscopablesSymbol()); - SetNoneAttributeProperty(symbolFunction, "attach", env->GetAttachSymbol()); - SetNoneAttributeProperty(symbolFunction, "detach", env->GetDetachSymbol()); + BUILTIN_ALL_SYMBOLS(BUILTIN_SYMBOL_CREATE_WITH_REALM) // symbol.prototype.description PropertyDescriptor descriptionDesc(thread_); @@ -756,28 +737,10 @@ void Builtins::InitializeSymbolWithRealm(const JSHandle &realm, "[Symbol.toPrimitive]", Symbol::ToPrimitive, FunctionLength::ONE); // install the Symbol.prototype methods - SetFunction(realm, symbolFuncPrototype, thread_->GlobalConstants()->GetHandledToStringString(), Symbol::ToString, - FunctionLength::ZERO); - SetFunction(realm, symbolFuncPrototype, thread_->GlobalConstants()->GetHandledValueOfString(), Symbol::ValueOf, - FunctionLength::ZERO); - - realm->SetSymbolFunction(thread_, symbolFunction); - realm->SetHasInstanceSymbol(thread_, env->GetHasInstanceSymbol()); - realm->SetIsConcatSpreadableSymbol(thread_, env->GetIsConcatSpreadableSymbol()); - realm->SetToStringTagSymbol(thread_, env->GetToStringTagSymbol()); - realm->SetIteratorSymbol(thread_, env->GetIteratorSymbol()); - realm->SetAsyncIteratorSymbol(thread_, env->GetAsyncIteratorSymbol()); - realm->SetMatchSymbol(thread_, env->GetMatchSymbol()); - realm->SetMatchAllSymbol(thread_, env->GetMatchAllSymbol()); - realm->SetReplaceSymbol(thread_, env->GetReplaceSymbol()); - realm->SetSearchSymbol(thread_, env->GetSearchSymbol()); - realm->SetSpeciesSymbol(thread_, env->GetSpeciesSymbol()); - realm->SetSplitSymbol(thread_, env->GetSplitSymbol()); - realm->SetToPrimitiveSymbol(thread_, env->GetToPrimitiveSymbol()); - realm->SetUnscopablesSymbol(thread_, env->GetUnscopablesSymbol()); - realm->SetAttachSymbol(thread_, env->GetAttachSymbol()); - realm->SetDetachSymbol(thread_, env->GetDetachSymbol()); - + for (const base::BuiltinFunctionEntry &entry: Symbol::GetSymbolPrototypeFunctions()) { + SetFunction(realm, symbolFuncPrototype, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } // Setup %SymbolPrototype% SetStringTagSymbol(realm, symbolFuncPrototype, "Symbol"); @@ -792,6 +755,7 @@ void Builtins::InitializeSymbolWithRealm(const JSHandle &realm, realm, funcFuncPrototypeObj, realm->GetHasInstanceSymbol(), "[Symbol.hasInstance]", Function::FunctionPrototypeHasInstance, FunctionLength::ONE); } +#undef BUILTIN_SYMBOL_CREATE_WITH_REALM void Builtins::InitializeNumber(const JSHandle &env, const JSHandle &globalObject, const JSHandle &primRefObjHClass) @@ -809,41 +773,28 @@ void Builtins::InitializeNumber(const JSHandle &env, const JSHandle numFunction( - NewBuiltinConstructor(env, numFuncPrototype, Number::NumberConstructor, "Number", FunctionLength::ONE)); + NewBuiltinConstructor(env, numFuncPrototype, Number::NumberConstructor, "Number", FunctionLength::ONE, + BUILTINS_STUB_ID(NumberConstructor))); numFunction.GetObject()->SetFunctionPrototype(thread_, numFuncInstanceHClass.GetTaggedValue()); // Number.prototype method - SetFunction(env, numFuncPrototype, "toExponential", Number::ToExponential, FunctionLength::ONE); - SetFunction(env, numFuncPrototype, "toFixed", Number::ToFixed, FunctionLength::ONE); - SetFunction(env, numFuncPrototype, "toLocaleString", Number::ToLocaleString, FunctionLength::ZERO); - SetFunction(env, numFuncPrototype, "toPrecision", Number::ToPrecision, FunctionLength::ONE); - SetFunction(env, numFuncPrototype, thread_->GlobalConstants()->GetHandledToStringString(), Number::ToString, - FunctionLength::ONE); - SetFunction(env, numFuncPrototype, thread_->GlobalConstants()->GetHandledValueOfString(), Number::ValueOf, - FunctionLength::ZERO); - + for (const base::BuiltinFunctionEntry &entry: Number::GetNumberPrototypeFunctions()) { + SetFunction(env, numFuncPrototype, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } // Number method - SetFunction(env, numFunction, "isFinite", Number::IsFinite, FunctionLength::ONE); - SetFunction(env, numFunction, "isInteger", Number::IsInteger, FunctionLength::ONE); - SetFunction(env, numFunction, "isNaN", Number::IsNaN, FunctionLength::ONE); - SetFunction(env, numFunction, "isSafeInteger", Number::IsSafeInteger, FunctionLength::ONE); - SetFuncToObjAndGlobal(env, globalObject, numFunction, "parseFloat", Number::ParseFloat, FunctionLength::ONE); - SetFuncToObjAndGlobal(env, globalObject, numFunction, "parseInt", Number::ParseInt, FunctionLength::TWO); - + for (const base::BuiltinFunctionEntry &entry: Number::GetNumberNonGlobalFunctions()) { + SetFunction(env, numFunction, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } + for (const base::BuiltinFunctionEntry &entry: Number::GetNumberGlobalFunctions()) { + SetFuncToObjAndGlobal(env, globalObject, numFunction, + entry.GetName(), entry.GetEntrypoint(), entry.GetLength(), entry.GetBuiltinStubId()); + } // Number constant - const double epsilon = 2.220446049250313e-16; - const double maxSafeInteger = 9007199254740991; - const double maxValue = 1.7976931348623157e+308; - const double minValue = 5e-324; - const double positiveInfinity = std::numeric_limits::infinity(); - SetConstant(numFunction, "MAX_VALUE", JSTaggedValue(maxValue)); - SetConstant(numFunction, "MIN_VALUE", JSTaggedValue(minValue)); - SetConstant(numFunction, "NaN", JSTaggedValue(NAN)); - SetConstant(numFunction, "NEGATIVE_INFINITY", JSTaggedValue(-positiveInfinity)); - SetConstant(numFunction, "POSITIVE_INFINITY", JSTaggedValue(positiveInfinity)); - SetConstant(numFunction, "MAX_SAFE_INTEGER", JSTaggedValue(maxSafeInteger)); - SetConstant(numFunction, "MIN_SAFE_INTEGER", JSTaggedValue(-maxSafeInteger)); - SetConstant(numFunction, "EPSILON", JSTaggedValue(epsilon)); + for (const base::BuiltinConstantEntry &entry: Number::GetNumberConstants()) { + SetConstant(numFunction, entry.GetName(), entry.GetTaggedValue()); + } env->SetNumberFunction(thread_, numFunction); } @@ -889,12 +840,13 @@ void Builtins::InitializeBigInt(const JSHandle &env, const JSHandleSetBigIntFunction(thread_, bigIntFunction); } -void Builtins::InitializeDate(const JSHandle &env, const JSHandle &objFuncClass) const +void Builtins::InitializeDate(const JSHandle &env, JSHandle objFuncPrototypeVal) const { [[maybe_unused]] EcmaHandleScope scope(thread_); - constexpr int utcLength = 7; // Date.prototype - JSHandle dateFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); + JSHandle dateFuncPrototypeHClass = factory_->NewEcmaHClass( + JSObject::SIZE, Date::GetNumPrototypeInlinedProperties(), JSType::JS_OBJECT, objFuncPrototypeVal); + JSHandle dateFuncPrototype = factory_->NewJSObjectWithInit(dateFuncPrototypeHClass); JSHandle dateFuncPrototypeValue(dateFuncPrototype); // Date.prototype_or_hclass @@ -908,66 +860,26 @@ void Builtins::InitializeDate(const JSHandle &env, const JSHandle(dateFunction)->SetFunctionPrototype(thread_, dateFuncInstanceHClass.GetTaggedValue()); // Date.prototype method - SetFunction(env, dateFuncPrototype, "getDate", Date::GetDate, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getDay", Date::GetDay, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getFullYear", Date::GetFullYear, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getHours", Date::GetHours, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getMilliseconds", Date::GetMilliseconds, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getMinutes", Date::GetMinutes, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getMonth", Date::GetMonth, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getSeconds", Date::GetSeconds, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getTime", Date::GetTime, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getTimezoneOffset", Date::GetTimezoneOffset, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getUTCDate", Date::GetUTCDate, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getUTCDay", Date::GetUTCDay, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getUTCFullYear", Date::GetUTCFullYear, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getUTCHours", Date::GetUTCHours, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getUTCMilliseconds", Date::GetUTCMilliseconds, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getUTCMinutes", Date::GetUTCMinutes, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getUTCMonth", Date::GetUTCMonth, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "getUTCSeconds", Date::GetUTCSeconds, FunctionLength::ZERO); - - SetFunction(env, dateFuncPrototype, "setDate", Date::SetDate, FunctionLength::ONE); - SetFunction(env, dateFuncPrototype, "setFullYear", Date::SetFullYear, FunctionLength::THREE); - SetFunction(env, dateFuncPrototype, "setHours", Date::SetHours, FunctionLength::FOUR); - SetFunction(env, dateFuncPrototype, "setMilliseconds", Date::SetMilliseconds, FunctionLength::ONE); - SetFunction(env, dateFuncPrototype, "setMinutes", Date::SetMinutes, FunctionLength::THREE); - SetFunction(env, dateFuncPrototype, "setMonth", Date::SetMonth, FunctionLength::TWO); - SetFunction(env, dateFuncPrototype, "setSeconds", Date::SetSeconds, FunctionLength::TWO); - SetFunction(env, dateFuncPrototype, "setTime", Date::SetTime, FunctionLength::ONE); - SetFunction(env, dateFuncPrototype, "setUTCDate", Date::SetUTCDate, FunctionLength::ONE); - SetFunction(env, dateFuncPrototype, "setUTCFullYear", Date::SetUTCFullYear, FunctionLength::THREE); - SetFunction(env, dateFuncPrototype, "setUTCHours", Date::SetUTCHours, FunctionLength::FOUR); - SetFunction(env, dateFuncPrototype, "setUTCMilliseconds", Date::SetUTCMilliseconds, FunctionLength::ONE); - SetFunction(env, dateFuncPrototype, "setUTCMinutes", Date::SetUTCMinutes, FunctionLength::THREE); - SetFunction(env, dateFuncPrototype, "setUTCMonth", Date::SetUTCMonth, FunctionLength::TWO); - SetFunction(env, dateFuncPrototype, "setUTCSeconds", Date::SetUTCSeconds, FunctionLength::TWO); - - SetFunction(env, dateFuncPrototype, "toDateString", Date::ToDateString, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "toISOString", Date::ToISOString, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "toJSON", Date::ToJSON, FunctionLength::ONE); - SetFunction(env, dateFuncPrototype, "toLocaleDateString", Date::ToLocaleDateString, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "toLocaleString", Date::ToLocaleString, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "toLocaleTimeString", Date::ToLocaleTimeString, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, thread_->GlobalConstants()->GetHandledToStringString(), Date::ToString, - FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "toTimeString", Date::ToTimeString, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, "toUTCString", Date::ToUTCString, FunctionLength::ZERO); - SetFunction(env, dateFuncPrototype, thread_->GlobalConstants()->GetHandledValueOfString(), Date::ValueOf, - FunctionLength::ZERO); - + for (const base::BuiltinFunctionEntry &entry: Date::GetDatePrototypeFunctions()) { + SetFunction(env, dateFuncPrototype, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } SetFunctionAtSymbol(env, dateFuncPrototype, env->GetToPrimitiveSymbol(), "[Symbol.toPrimitive]", Date::ToPrimitive, FunctionLength::ONE); // Date method - SetFunction(env, dateFunction, "now", Date::Now, FunctionLength::ZERO); - SetFunction(env, dateFunction, "parse", Date::Parse, FunctionLength::ONE); - SetFunction(env, dateFunction, "UTC", Date::UTC, utcLength); - + for (const base::BuiltinFunctionEntry &entry: Date::GetDateFunctions()) { + SetFunction(env, dateFunction, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } // Date.length - SetConstant(dateFunction, "length", JSTaggedValue(utcLength)); + SetConstant(dateFunction, "length", JSTaggedValue(Date::UTC_LENGTH)); env->SetDateFunction(thread_, dateFunction); + env->SetDatePrototype(thread_, dateFuncPrototype); + thread_->SetInitialBuiltinHClass(BuiltinTypeId::DATE, + dateFunction->GetJSHClass(), + dateFuncPrototype->GetJSHClass()); } void Builtins::LazyInitializeDate(const JSHandle &env) const @@ -1017,7 +929,7 @@ void Builtins::InitializeProxy(const JSHandle &env) } JSHandle Builtins::InitializeExoticConstructor(const JSHandle &env, EcmaEntrypoint ctorFunc, - const char *name, int length) + std::string_view name, int length) { JSHandle ctor = factory_->NewJSFunction(env, reinterpret_cast(ctorFunc), FunctionKind::BUILTIN_PROXY_CONSTRUCTOR); @@ -1098,6 +1010,7 @@ void Builtins::InitializeAllTypeError(const JSHandle &env, const JSHa InitializeError(env, errorNativeFuncInstanceHClass, JSType::JS_SYNTAX_ERROR); InitializeError(env, errorNativeFuncInstanceHClass, JSType::JS_EVAL_ERROR); InitializeError(env, errorNativeFuncInstanceHClass, JSType::JS_OOM_ERROR); + InitializeError(env, errorNativeFuncInstanceHClass, JSType::JS_TERMINATION_ERROR); } void Builtins::InitializeAllTypeErrorWithRealm(const JSHandle &realm) const @@ -1115,6 +1028,7 @@ void Builtins::InitializeAllTypeErrorWithRealm(const JSHandle &realm) SetErrorWithRealm(realm, JSType::JS_SYNTAX_ERROR); SetErrorWithRealm(realm, JSType::JS_EVAL_ERROR); SetErrorWithRealm(realm, JSType::JS_OOM_ERROR); + SetErrorWithRealm(realm, JSType::JS_TERMINATION_ERROR); } void Builtins::SetErrorWithRealm(const JSHandle &realm, const JSType &errorTag) const @@ -1165,6 +1079,11 @@ void Builtins::SetErrorWithRealm(const JSHandle &realm, const JSType nameString = JSHandle(thread_->GlobalConstants()->GetHandledOOMErrorString()); realm->SetOOMErrorFunction(thread_, nativeErrorFunction); break; + case JSType::JS_TERMINATION_ERROR: + nativeErrorFunction = env->GetTerminationErrorFunction(); + nameString = JSHandle(thread_->GlobalConstants()->GetHandledTerminationErrorString()); + realm->SetTerminationErrorFunction(thread_, nativeErrorFunction); + break; default: break; } @@ -1173,7 +1092,7 @@ void Builtins::SetErrorWithRealm(const JSHandle &realm, const JSType } void Builtins::GeneralUpdateError(ErrorParameter *error, EcmaEntrypoint constructor, EcmaEntrypoint method, - const char *name, JSType type) const + std::string_view name, JSType type) const { error->nativeConstructor = constructor; error->nativeMethod = method; @@ -1223,6 +1142,10 @@ void Builtins::InitializeError(const JSHandle &env, const JSHandle &env, const JSHandleSetSyntaxErrorFunction(thread_, nativeErrorFunction); } else if (errorTag == JSType::JS_EVAL_ERROR) { env->SetEvalErrorFunction(thread_, nativeErrorFunction); - } else { + } else if (errorTag == JSType::JS_OOM_ERROR) { env->SetOOMErrorFunction(thread_, nativeErrorFunction); + } else { + env->SetTerminationErrorFunction(thread_, nativeErrorFunction); } } // namespace panda::ecmascript void Builtins::InitializeCtor(const JSHandle &env, const JSHandle &prototype, - const JSHandle &ctor, const char *name, int length) const + const JSHandle &ctor, std::string_view name, int length) const { const GlobalEnvConstants *globalConst = thread_->GlobalConstants(); JSFunction::SetFunctionLength(thread_, ctor, JSTaggedValue(length)); @@ -1297,12 +1222,14 @@ void Builtins::InitializeCtor(const JSHandle &env, const JSHandle &env, const JSHandle &objFuncClass) const +void Builtins::InitializeSet(const JSHandle &env, JSHandle objFuncPrototypeVal) const { [[maybe_unused]] EcmaHandleScope scope(thread_); const GlobalEnvConstants *globalConst = thread_->GlobalConstants(); // Set.prototype - JSHandle setFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); + JSHandle setFuncPrototypeHClass = factory_->NewEcmaHClass( + JSObject::SIZE, BuiltinsSet::GetNumPrototypeInlinedProperties(), JSType::JS_OBJECT, objFuncPrototypeVal); + JSHandle setFuncPrototype = factory_->NewJSObjectWithInit(setFuncPrototypeHClass); JSHandle setFuncPrototypeValue(setFuncPrototype); // Set.prototype_or_hclass JSHandle setFuncInstanceHClass = @@ -1315,25 +1242,18 @@ void Builtins::InitializeSet(const JSHandle &env, const JSHandle constructorKey = globalConst->GetHandledConstructorString(); JSObject::SetProperty(thread_, JSHandle(setFuncPrototype), constructorKey, setFunction); - // set.prototype.add() - SetFunction(env, setFuncPrototype, "add", BuiltinsSet::Add, FunctionLength::ONE); - // set.prototype.clear() - SetFunction(env, setFuncPrototype, "clear", BuiltinsSet::Clear, FunctionLength::ZERO); - // set.prototype.delete() - SetFunction(env, setFuncPrototype, "delete", BuiltinsSet::Delete, FunctionLength::ONE); - // set.prototype.has() - SetFunction(env, setFuncPrototype, "has", BuiltinsSet::Has, FunctionLength::ONE); - // set.prototype.forEach() - SetFunction(env, setFuncPrototype, "forEach", BuiltinsSet::ForEach, FunctionLength::ONE); - // set.prototype.entries() - SetFunction(env, setFuncPrototype, "entries", BuiltinsSet::Entries, FunctionLength::ZERO); - // set.prototype.keys() - SetFunction(env, setFuncPrototype, "values", BuiltinsSet::Values, FunctionLength::ZERO); - // set.prototype.values() + RETURN_IF_ABRUPT_COMPLETION(thread_); + // Set.prototype functions, excluding keys() + for (const base::BuiltinFunctionEntry &entry: BuiltinsSet::GetSetPrototypeFunctions()) { + SetFunction(env, setFuncPrototype, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } + // Set.prototype.keys, which is strictly equal to Set.prototype.values JSHandle keys(factory_->NewFromASCII("keys")); JSHandle values(factory_->NewFromASCII("values")); JSHandle valuesFunc = JSObject::GetMethod(thread_, JSHandle::Cast(setFuncPrototype), values); + RETURN_IF_ABRUPT_COMPLETION(thread_); PropertyDescriptor descriptor(thread_, valuesFunc, true, false, true); JSObject::DefineOwnProperty(thread_, setFuncPrototype, keys, descriptor); @@ -1356,6 +1276,11 @@ void Builtins::InitializeSet(const JSHandle &env, const JSHandleSetBuiltinsSetFunction(thread_, setFunction); + env->SetSetPrototype(thread_, setFuncPrototype); + env->SetSetProtoValuesFunction(thread_, valuesFunc); + thread_->SetInitialBuiltinHClass(BuiltinTypeId::SET, + setFunction->GetTaggedObject()->GetClass(), + setFuncPrototype->GetJSHClass()); } void Builtins::LazyInitializeSet(const JSHandle &env) @@ -1368,12 +1293,14 @@ void Builtins::LazyInitializeSet(const JSHandle &env) env->SetBuiltinsSetFunction(thread_, accessor); } -void Builtins::InitializeMap(const JSHandle &env, const JSHandle &objFuncClass) const +void Builtins::InitializeMap(const JSHandle &env, JSHandle objFuncPrototypeVal) const { [[maybe_unused]] EcmaHandleScope scope(thread_); const GlobalEnvConstants *globalConst = thread_->GlobalConstants(); // Map.prototype - JSHandle mapFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); + JSHandle mapFuncPrototypeHClass = factory_->NewEcmaHClass( + JSObject::SIZE, BuiltinsMap::GetNumPrototypeInlinedProperties(), JSType::JS_OBJECT, objFuncPrototypeVal); + JSHandle mapFuncPrototype = factory_->NewJSObjectWithInit(mapFuncPrototypeHClass); JSHandle mapFuncPrototypeValue(mapFuncPrototype); // Map.prototype_or_hclass JSHandle mapFuncInstanceHClass = @@ -1388,25 +1315,12 @@ void Builtins::InitializeMap(const JSHandle &env, const JSHandle constructorKey = globalConst->GetHandledConstructorString(); JSObject::SetProperty(thread_, JSHandle(mapFuncPrototype), constructorKey, mapFunction); - // map.prototype.set() - SetFunction(env, mapFuncPrototype, globalConst->GetHandledSetString(), BuiltinsMap::Set, FunctionLength::TWO); - // map.prototype.clear() - SetFunction(env, mapFuncPrototype, "clear", BuiltinsMap::Clear, FunctionLength::ZERO); - // map.prototype.delete() - SetFunction(env, mapFuncPrototype, "delete", BuiltinsMap::Delete, FunctionLength::ONE); - // map.prototype.has() - SetFunction(env, mapFuncPrototype, "has", BuiltinsMap::Has, FunctionLength::ONE); - // map.prototype.get() - SetFunction(env, mapFuncPrototype, thread_->GlobalConstants()->GetHandledGetString(), BuiltinsMap::Get, - FunctionLength::ONE); - // map.prototype.forEach() - SetFunction(env, mapFuncPrototype, "forEach", BuiltinsMap::ForEach, FunctionLength::ONE); - // map.prototype.keys() - SetFunction(env, mapFuncPrototype, "keys", BuiltinsMap::Keys, FunctionLength::ZERO); - // map.prototype.values() - SetFunction(env, mapFuncPrototype, "values", BuiltinsMap::Values, FunctionLength::ZERO); - // map.prototype.entries() - SetFunction(env, mapFuncPrototype, "entries", BuiltinsMap::Entries, FunctionLength::ZERO); + RETURN_IF_ABRUPT_COMPLETION(thread_); + // Map.prototype functions + for (const base::BuiltinFunctionEntry &entry: BuiltinsMap::GetMapPrototypeFunctions()) { + SetFunction(env, mapFuncPrototype, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } // @@ToStringTag SetStringTagSymbol(env, mapFuncPrototype, "Map"); @@ -1426,11 +1340,16 @@ void Builtins::InitializeMap(const JSHandle &env, const JSHandle entries(factory_->NewFromASCII("entries")); JSHandle entriesFunc = JSObject::GetMethod(thread_, JSHandle::Cast(mapFuncPrototype), entries); + RETURN_IF_ABRUPT_COMPLETION(thread_); PropertyDescriptor descriptor(thread_, entriesFunc, true, false, true); JSObject::DefineOwnProperty(thread_, mapFuncPrototype, iteratorSymbol, descriptor); env->SetBuiltinsMapFunction(thread_, mapFunction); env->SetMapPrototype(thread_, mapFuncPrototype); + env->SetMapProtoEntriesFunction(thread_, entriesFunc); + thread_->SetInitialBuiltinHClass(BuiltinTypeId::MAP, + mapFunction->GetTaggedObject()->GetClass(), + mapFuncPrototype->GetJSHClass()); } void Builtins::LazyInitializeMap(const JSHandle &env) const @@ -1464,6 +1383,7 @@ void Builtins::InitializeWeakMap(const JSHandle &env, const JSHandle< // "constructor" property on the prototype JSHandle constructorKey = globalConst->GetHandledConstructorString(); JSObject::SetProperty(thread_, JSHandle(weakMapFuncPrototype), constructorKey, weakMapFunction); + RETURN_IF_ABRUPT_COMPLETION(thread_); // weakmap.prototype.set() SetFunction(env, weakMapFuncPrototype, globalConst->GetHandledSetString(), BuiltinsWeakMap::Set, FunctionLength::TWO); @@ -1508,6 +1428,7 @@ void Builtins::InitializeWeakSet(const JSHandle &env, const JSHandle< // "constructor" property on the prototype JSHandle constructorKey = globalConst->GetHandledConstructorString(); JSObject::SetProperty(thread_, JSHandle(weakSetFuncPrototype), constructorKey, weakSetFunction); + RETURN_IF_ABRUPT_COMPLETION(thread_); // set.prototype.add() SetFunction(env, weakSetFuncPrototype, "add", BuiltinsWeakSet::Add, FunctionLength::ONE); // set.prototype.delete() @@ -1538,18 +1459,11 @@ void Builtins::InitializeAtomics(const JSHandle &env, JSHandle atomicsHClass = factory_->NewEcmaHClass(JSObject::SIZE, JSType::JS_OBJECT, objFuncPrototypeVal); JSHandle atomicsObject = factory_->NewJSObject(atomicsHClass); - SetFunction(env, atomicsObject, "add", Atomics::Add, FunctionLength::THREE); - SetFunction(env, atomicsObject, "and", Atomics::And, FunctionLength::THREE); - SetFunction(env, atomicsObject, "sub", Atomics::Sub, FunctionLength::THREE); - SetFunction(env, atomicsObject, "or", Atomics::Or, FunctionLength::THREE); - SetFunction(env, atomicsObject, "xor", Atomics::Xor, FunctionLength::THREE); - SetFunction(env, atomicsObject, "compareExchange", Atomics::CompareExchange, FunctionLength::FOUR); - SetFunction(env, atomicsObject, "exchange", Atomics::Exchange, FunctionLength::THREE); - SetFunction(env, atomicsObject, "isLockFree", Atomics::IsLockFree, FunctionLength::ONE); - SetFunction(env, atomicsObject, "load", Atomics::Load, FunctionLength::TWO); - SetFunction(env, atomicsObject, "store", Atomics::Store, FunctionLength::THREE); - SetFunction(env, atomicsObject, "wait", Atomics::Wait, FunctionLength::FOUR); - SetFunction(env, atomicsObject, "notify", Atomics::Notify, FunctionLength::THREE); + // Atomics functions + for (const base::BuiltinFunctionEntry &entry: Atomics::GetAtomicsFunctions()) { + SetFunction(env, atomicsObject, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } JSHandle atomicsString(factory_->NewFromASCII("Atomics")); JSHandle globalObject(thread_, env->GetGlobalObject()); PropertyDescriptor atomicsDesc(thread_, JSHandle::Cast(atomicsObject), true, false, true); @@ -1577,6 +1491,7 @@ void Builtins::InitializeWeakRef(const JSHandle &env, const JSHandle< // "constructor" property on the prototype JSHandle constructorKey = globalConst->GetHandledConstructorString(); JSObject::SetProperty(thread_, JSHandle(weakRefFuncPrototype), constructorKey, weakRefFunction); + RETURN_IF_ABRUPT_COMPLETION(thread_); // WeakRef.prototype.deref() SetFunction(env, weakRefFuncPrototype, "deref", BuiltinsWeakRef::Deref, FunctionLength::ZERO); @@ -1619,6 +1534,7 @@ void Builtins::InitializeFinalizationRegistry(const JSHandle &env, JSHandle constructorKey = globalConst->GetHandledConstructorString(); JSObject::SetProperty(thread_, JSHandle(finalizationRegistryFuncPrototype), constructorKey, finalizationRegistryFunction); + RETURN_IF_ABRUPT_COMPLETION(thread_); // FinalizationRegistry.prototype.deref() SetFunction(env, finalizationRegistryFuncPrototype, "register", BuiltinsFinalizationRegistry::Register, FunctionLength::TWO); @@ -1647,50 +1563,14 @@ void Builtins::InitializeMath(const JSHandle &env, const JSHandle mathClass = factory_->NewEcmaHClass(JSObject::SIZE, JSType::JS_OBJECT, objFuncPrototypeVal); JSHandle mathObject = factory_->NewJSObjectWithInit(mathClass); RandomGenerator::InitRandom(); - SetFunction(env, mathObject, "abs", Math::Abs, FunctionLength::ONE, BUILTINS_STUB_ID(ABS)); - SetFunction(env, mathObject, "acos", Math::Acos, FunctionLength::ONE, BUILTINS_STUB_ID(ACOS)); - SetFunction(env, mathObject, "acosh", Math::Acosh, FunctionLength::ONE); - SetFunction(env, mathObject, "asin", Math::Asin, FunctionLength::ONE); - SetFunction(env, mathObject, "asinh", Math::Asinh, FunctionLength::ONE); - SetFunction(env, mathObject, "atan", Math::Atan, FunctionLength::ONE, BUILTINS_STUB_ID(ATAN)); - SetFunction(env, mathObject, "atanh", Math::Atanh, FunctionLength::ONE); - SetFunction(env, mathObject, "atan2", Math::Atan2, FunctionLength::TWO); - SetFunction(env, mathObject, "cbrt", Math::Cbrt, FunctionLength::ONE); - SetFunction(env, mathObject, "ceil", Math::Ceil, FunctionLength::ONE); - SetFunction(env, mathObject, "clz32", Math::Clz32, FunctionLength::ONE); - SetFunction(env, mathObject, "cos", Math::Cos, FunctionLength::ONE, BUILTINS_STUB_ID(COS)); - SetFunction(env, mathObject, "cosh", Math::Cosh, FunctionLength::ONE); - SetFunction(env, mathObject, "exp", Math::Exp, FunctionLength::ONE); - SetFunction(env, mathObject, "expm1", Math::Expm1, FunctionLength::ONE); - SetFunction(env, mathObject, "floor", Math::Floor, FunctionLength::ONE, BUILTINS_STUB_ID(FLOOR)); - SetFunction(env, mathObject, "fround", Math::Fround, FunctionLength::ONE); - SetFunction(env, mathObject, "hypot", Math::Hypot, FunctionLength::TWO); - SetFunction(env, mathObject, "imul", Math::Imul, FunctionLength::TWO); - SetFunction(env, mathObject, "log", Math::Log, FunctionLength::ONE); - SetFunction(env, mathObject, "log1p", Math::Log1p, FunctionLength::ONE); - SetFunction(env, mathObject, "log10", Math::Log10, FunctionLength::ONE); - SetFunction(env, mathObject, "log2", Math::Log2, FunctionLength::ONE); - SetFunction(env, mathObject, "max", Math::Max, FunctionLength::TWO); - SetFunction(env, mathObject, "min", Math::Min, FunctionLength::TWO); - SetFunction(env, mathObject, "pow", Math::Pow, FunctionLength::TWO); - SetFunction(env, mathObject, "random", Math::Random, FunctionLength::ZERO); - SetFunction(env, mathObject, "round", Math::Round, FunctionLength::ONE); - SetFunction(env, mathObject, "sign", Math::Sign, FunctionLength::ONE); - SetFunction(env, mathObject, "sin", Math::Sin, FunctionLength::ONE, BUILTINS_STUB_ID(SIN)); - SetFunction(env, mathObject, "sinh", Math::Sinh, FunctionLength::ONE); - SetFunction(env, mathObject, "sqrt", Math::Sqrt, FunctionLength::ONE, BUILTINS_STUB_ID(SQRT)); - SetFunction(env, mathObject, "tan", Math::Tan, FunctionLength::ONE); - SetFunction(env, mathObject, "tanh", Math::Tanh, FunctionLength::ONE); - SetFunction(env, mathObject, "trunc", Math::Trunc, FunctionLength::ONE); - - SetConstant(mathObject, "E", JSTaggedValue(Math::E)); - SetConstant(mathObject, "LN10", JSTaggedValue(Math::LN10)); - SetConstant(mathObject, "LN2", JSTaggedValue(Math::LN2)); - SetConstant(mathObject, "LOG10E", JSTaggedValue(Math::LOG10E)); - SetConstant(mathObject, "LOG2E", JSTaggedValue(Math::LOG2E)); - SetConstant(mathObject, "PI", JSTaggedValue(Math::PI)); - SetConstant(mathObject, "SQRT1_2", JSTaggedValue(Math::SQRT1_2)); - SetConstant(mathObject, "SQRT2", JSTaggedValue(Math::SQRT2)); + + for (const base::BuiltinFunctionEntry &entry: Math::GetMathFunctions()) { + SetFunction(env, mathObject, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } + for (const base::BuiltinConstantEntry &entry: Math::GetMathConstants()) { + SetConstant(mathObject, entry.GetName(), entry.GetTaggedValue()); + } JSHandle mathString(factory_->NewFromASCII("Math")); JSHandle globalObject(thread_, env->GetGlobalObject()); @@ -1708,7 +1588,7 @@ void Builtins::InitializeJson(const JSHandle &env, const JSHandle jsonObject = factory_->NewJSObjectWithInit(jsonHClass); SetFunction(env, jsonObject, "parse", Json::Parse, FunctionLength::TWO); - SetFunction(env, jsonObject, "stringify", Json::Stringify, FunctionLength::THREE); + SetFunction(env, jsonObject, "stringify", Json::Stringify, FunctionLength::THREE, BUILTINS_STUB_ID(STRINGIFY)); PropertyDescriptor jsonDesc(thread_, JSHandle::Cast(jsonObject), true, false, true); JSHandle jsonString(factory_->NewFromASCII("JSON")); @@ -1719,11 +1599,14 @@ void Builtins::InitializeJson(const JSHandle &env, const JSHandleSetJsonFunction(thread_, jsonObject); } -void Builtins::InitializeString(const JSHandle &env, const JSHandle &primRefObjHClass) const +void Builtins::InitializeString(const JSHandle &env, JSHandle objFuncPrototypeVal) const { [[maybe_unused]] EcmaHandleScope scope(thread_); // String.prototype JSHandle toObject(factory_->GetEmptyString()); + JSHandle primRefObjHClass = + factory_->NewEcmaHClass(JSPrimitiveRef::SIZE, BuiltinsSet::GetNumPrototypeInlinedProperties(), + JSType::JS_PRIMITIVE_REF, objFuncPrototypeVal); JSHandle stringFuncPrototype = JSHandle::Cast(factory_->NewJSPrimitiveRef(primRefObjHClass, toObject)); JSHandle stringFuncPrototypeValue(stringFuncPrototype); @@ -1738,54 +1621,18 @@ void Builtins::InitializeString(const JSHandle &env, const JSHandle()->SetFunctionPrototype(thread_, stringFuncInstanceHClass.GetTaggedValue()); // String.prototype method - SetFunction(env, stringFuncPrototype, "charAt", BuiltinsString::CharAt, FunctionLength::ONE, - BUILTINS_STUB_ID(CharAt)); - SetFunction(env, stringFuncPrototype, "charCodeAt", BuiltinsString::CharCodeAt, FunctionLength::ONE, - BUILTINS_STUB_ID(CharCodeAt)); - SetFunction(env, stringFuncPrototype, "codePointAt", BuiltinsString::CodePointAt, FunctionLength::ONE); - SetFunction(env, stringFuncPrototype, "concat", BuiltinsString::Concat, FunctionLength::ONE); - SetFunction(env, stringFuncPrototype, "endsWith", BuiltinsString::EndsWith, FunctionLength::ONE); - SetFunction(env, stringFuncPrototype, "includes", BuiltinsString::Includes, FunctionLength::ONE); - SetFunction(env, stringFuncPrototype, "indexOf", BuiltinsString::IndexOf, FunctionLength::ONE, - BUILTINS_STUB_ID(IndexOf)); - SetFunction(env, stringFuncPrototype, "lastIndexOf", BuiltinsString::LastIndexOf, FunctionLength::ONE); - SetFunction(env, stringFuncPrototype, "localeCompare", BuiltinsString::LocaleCompare, FunctionLength::ONE); - SetFunction(env, stringFuncPrototype, "match", BuiltinsString::Match, FunctionLength::ONE); - SetFunction(env, stringFuncPrototype, "matchAll", BuiltinsString::MatchAll, FunctionLength::ONE); - SetFunction(env, stringFuncPrototype, "repeat", BuiltinsString::Repeat, FunctionLength::ONE); - SetFunction(env, stringFuncPrototype, "normalize", BuiltinsString::Normalize, FunctionLength::ZERO); - SetFunction(env, stringFuncPrototype, "padStart", BuiltinsString::PadStart, FunctionLength::ONE); - SetFunction(env, stringFuncPrototype, "padEnd", BuiltinsString::PadEnd, FunctionLength::ONE); - SetFunction(env, stringFuncPrototype, "replace", BuiltinsString::Replace, FunctionLength::TWO); - SetFunction(env, stringFuncPrototype, "replaceAll", BuiltinsString::ReplaceAll, FunctionLength::TWO); - SetFunction(env, stringFuncPrototype, "search", BuiltinsString::Search, FunctionLength::ONE); - SetFunction(env, stringFuncPrototype, "slice", BuiltinsString::Slice, FunctionLength::TWO); - SetFunction(env, stringFuncPrototype, "split", BuiltinsString::Split, FunctionLength::TWO); - SetFunction(env, stringFuncPrototype, "startsWith", BuiltinsString::StartsWith, FunctionLength::ONE); - SetFunction(env, stringFuncPrototype, "substring", BuiltinsString::Substring, FunctionLength::TWO, - BUILTINS_STUB_ID(Substring)); - SetFunction(env, stringFuncPrototype, "substr", BuiltinsString::SubStr, FunctionLength::TWO); - SetFunction(env, stringFuncPrototype, "at", BuiltinsString::At, FunctionLength::ONE); - SetFunction(env, stringFuncPrototype, "toLocaleLowerCase", BuiltinsString::ToLocaleLowerCase, FunctionLength::ZERO); - SetFunction(env, stringFuncPrototype, "toLocaleUpperCase", BuiltinsString::ToLocaleUpperCase, FunctionLength::ZERO); - SetFunction(env, stringFuncPrototype, "toLowerCase", BuiltinsString::ToLowerCase, FunctionLength::ZERO); - SetFunction(env, stringFuncPrototype, thread_->GlobalConstants()->GetHandledToStringString(), - BuiltinsString::ToString, FunctionLength::ZERO); - SetFunction(env, stringFuncPrototype, "toUpperCase", BuiltinsString::ToUpperCase, FunctionLength::ZERO); - SetFunction(env, stringFuncPrototype, "trim", BuiltinsString::Trim, FunctionLength::ZERO); - SetFunction(env, stringFuncPrototype, "trimStart", BuiltinsString::TrimStart, FunctionLength::ZERO); - SetFunction(env, stringFuncPrototype, "trimEnd", BuiltinsString::TrimEnd, FunctionLength::ZERO); - SetFunction(env, stringFuncPrototype, "trimLeft", BuiltinsString::TrimLeft, FunctionLength::ZERO); - SetFunction(env, stringFuncPrototype, "trimRight", BuiltinsString::TrimRight, FunctionLength::ZERO); - SetFunction(env, stringFuncPrototype, thread_->GlobalConstants()->GetHandledValueOfString(), - BuiltinsString::ValueOf, FunctionLength::ZERO); - SetFunctionAtSymbol(env, stringFuncPrototype, env->GetIteratorSymbol(), "[Symbol.iterator]", - BuiltinsString::GetStringIterator, FunctionLength::ZERO); + for (const base::BuiltinFunctionEntry &entry: BuiltinsString::GetStringPrototypeFunctions()) { + SetFunction(env, stringFuncPrototype, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } + JSHandle stringIter = SetAndReturnFunctionAtSymbol(env, stringFuncPrototype, + env->GetIteratorSymbol(), "[Symbol.iterator]", BuiltinsString::GetStringIterator, FunctionLength::ZERO); // String method - SetFunction(env, stringFunction, "fromCharCode", BuiltinsString::FromCharCode, FunctionLength::ONE); - SetFunction(env, stringFunction, "fromCodePoint", BuiltinsString::FromCodePoint, FunctionLength::ONE); - SetFunction(env, stringFunction, "raw", BuiltinsString::Raw, FunctionLength::ONE); + for (const base::BuiltinFunctionEntry &entry: BuiltinsString::GetStringFunctions()) { + SetFunction(env, stringFunction, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } // String.prototype.length JSHandle lengthGetter = CreateGetter(env, BuiltinsString::GetLength, "length", FunctionLength::ZERO); @@ -1793,6 +1640,11 @@ void Builtins::InitializeString(const JSHandle &env, const JSHandleSetStringFunction(thread_, stringFunction); + env->SetStringPrototype(thread_, stringFuncPrototype); + env->SetStringProtoIterFunction(thread_, stringIter); + thread_->SetInitialBuiltinHClass(BuiltinTypeId::STRING, + stringFunction->GetJSHClass(), + stringFuncPrototype->GetJSHClass()); } void Builtins::InitializeStringIterator(const JSHandle &env, @@ -1809,7 +1661,8 @@ void Builtins::InitializeStringIterator(const JSHandle &env, factory_->NewJSFunction(env, static_cast(nullptr), FunctionKind::BASE_CONSTRUCTOR)); strIterFunction->SetFunctionPrototype(thread_, strIterFuncInstanceHClass.GetTaggedValue()); - SetFunction(env, strIterPrototype, "next", StringIterator::Next, FunctionLength::ZERO); + SetFunction(env, strIterPrototype, "next", StringIterator::Next, FunctionLength::ZERO, + BUILTINS_STUB_ID(STRING_ITERATOR_PROTO_NEXT)); SetStringTagSymbol(env, strIterPrototype, "String Iterator"); env->SetStringIterator(thread_, strIterFunction); @@ -1929,9 +1782,15 @@ void Builtins::InitializeSetIterator(const JSHandle &env, // SetIterator.prototype JSHandle setIteratorPrototype(factory_->NewJSObjectWithInit(iteratorFuncClass)); // Iterator.prototype.next() - SetFunction(env, setIteratorPrototype, "next", JSSetIterator::Next, FunctionLength::ZERO); + SetFunction(env, setIteratorPrototype, "next", JSSetIterator::Next, FunctionLength::ZERO, + BUILTINS_STUB_ID(SET_ITERATOR_PROTO_NEXT)); SetStringTagSymbol(env, setIteratorPrototype, "Set Iterator"); env->SetSetIteratorPrototype(thread_, setIteratorPrototype); + JSHandle protoValue = env->GetSetIteratorPrototype(); + const GlobalEnvConstants *globalConst = thread_->GlobalConstants(); + JSHandle hclassHandle(globalConst->GetHandledJSSetIteratorClass()); + hclassHandle->SetPrototype(thread_, protoValue); + hclassHandle->SetExtensible(true); } void Builtins::InitializeMapIterator(const JSHandle &env, @@ -1940,17 +1799,25 @@ void Builtins::InitializeMapIterator(const JSHandle &env, // MapIterator.prototype JSHandle mapIteratorPrototype(factory_->NewJSObjectWithInit(iteratorFuncClass)); // Iterator.prototype.next() - SetFunction(env, mapIteratorPrototype, "next", JSMapIterator::Next, FunctionLength::ZERO); + SetFunction(env, mapIteratorPrototype, "next", JSMapIterator::Next, FunctionLength::ZERO, + BUILTINS_STUB_ID(MAP_ITERATOR_PROTO_NEXT)); SetStringTagSymbol(env, mapIteratorPrototype, "Map Iterator"); env->SetMapIteratorPrototype(thread_, mapIteratorPrototype); + JSHandle protoValue = env->GetMapIteratorPrototype(); + const GlobalEnvConstants *globalConst = thread_->GlobalConstants(); + JSHandle hclassHandle(globalConst->GetHandledJSMapIteratorClass()); + hclassHandle->SetPrototype(thread_, protoValue); + hclassHandle->SetExtensible(true); } + void Builtins::InitializeArrayIterator(const JSHandle &env, const JSHandle &iteratorFuncClass) const { // ArrayIterator.prototype JSHandle arrayIteratorPrototype(factory_->NewJSObjectWithInit(iteratorFuncClass)); // Iterator.prototype.next() - SetFunction(env, arrayIteratorPrototype, "next", JSArrayIterator::Next, FunctionLength::ZERO); + SetFunction(env, arrayIteratorPrototype, "next", JSArrayIterator::Next, FunctionLength::ZERO, + BUILTINS_STUB_ID(ARRAY_ITERATOR_PROTO_NEXT)); SetStringTagSymbol(env, arrayIteratorPrototype, "Array Iterator"); env->SetArrayIteratorPrototype(thread_, arrayIteratorPrototype); } @@ -1988,7 +1855,7 @@ void Builtins::InitializeRegExp(const JSHandle &env) const GlobalEnvConstants *globalConstants = thread_->GlobalConstants(); // RegExp.prototype method - SetFunction(env, regPrototype, "exec", RegExp::Exec, FunctionLength::ONE); + JSHandle execFunc = SetAndReturnFunction(env, regPrototype, "exec", RegExp::Exec, FunctionLength::ONE); SetFunction(env, regPrototype, "test", RegExp::Test, FunctionLength::ONE); SetFunction(env, regPrototype, globalConstants->GetHandledToStringString(), RegExp::ToString, FunctionLength::ZERO); @@ -2005,6 +1872,11 @@ void Builtins::InitializeRegExp(const JSHandle &env) JSHandle globalKey(globalConstants->GetHandledGlobalString()); SetGetter(regPrototype, globalKey, globalGetter); + JSHandle hasIndicesGetter = + CreateGetter(env, RegExp::GetHasIndices, "hasIndices", FunctionLength::ZERO); + JSHandle hasIndicesKey(factory_->NewFromASCII("hasIndices")); + SetGetter(regPrototype, hasIndicesKey, hasIndicesGetter); + JSHandle ignoreCaseGetter = CreateGetter(env, RegExp::GetIgnoreCase, "ignoreCase", FunctionLength::ZERO); JSHandle ignoreCaseKey(factory_->NewFromASCII("ignoreCase")); @@ -2048,6 +1920,12 @@ void Builtins::InitializeRegExp(const JSHandle &env) FunctionLength::TWO); env->SetRegExpFunction(thread_, regexpFunction); + env->SetRegExpPrototype(thread_, regPrototype); + env->SetRegExpExecFunction(thread_, execFunc); + // Set RegExp.prototype hclass + JSHandle regPrototypeClass(thread_, regPrototype->GetJSHClass()); + env->SetRegExpPrototypeClass(thread_, regPrototypeClass.GetTaggedValue()); + auto globalConst = const_cast(thread_->GlobalConstants()); globalConst->SetConstant(ConstantIndex::JS_REGEXP_CLASS_INDEX, regexpFuncInstanceHClass.GetTaggedValue()); } @@ -2056,17 +1934,20 @@ void Builtins::InitializeArray(const JSHandle &env, const JSHandle arrBaseFuncInstanceHClass = factory_->CreateJSArrayInstanceClass(objFuncPrototypeVal); + JSHandle arrBaseFuncInstanceHClass = factory_->CreateJSArrayInstanceClass( + objFuncPrototypeVal, BuiltinsArray::GetNumPrototypeInlinedProperties()); // Array.prototype JSHandle arrFuncPrototype = factory_->NewJSObjectWithInit(arrBaseFuncInstanceHClass); - JSHandle::Cast(arrFuncPrototype)->SetLength(thread_, JSTaggedValue(FunctionLength::ZERO)); + JSHandle::Cast(arrFuncPrototype)->SetLength(FunctionLength::ZERO); auto accessor = thread_->GlobalConstants()->GetArrayLengthAccessor(); JSArray::Cast(*arrFuncPrototype)->SetPropertyInlinedProps(thread_, JSArray::LENGTH_INLINE_PROPERTY_INDEX, accessor); JSHandle arrFuncPrototypeValue(arrFuncPrototype); // Array.prototype_or_hclass JSHandle arrFuncInstanceHClass = factory_->CreateJSArrayInstanceClass(arrFuncPrototypeValue); + auto globalConstant = const_cast(thread_->GlobalConstants()); + globalConstant->InitElementKindHClass(thread_, arrFuncInstanceHClass); // Array = new Function() JSHandle arrayFunction( @@ -2081,54 +1962,26 @@ void Builtins::InitializeArray(const JSHandle &env, const JSHandleSetFunctionPrototype(thread_, arrFuncInstanceHClass.GetTaggedValue()); - // Array.prototype method - SetFunction(env, arrFuncPrototype, "concat", BuiltinsArray::Concat, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "copyWithin", BuiltinsArray::CopyWithin, FunctionLength::TWO); - SetFunction(env, arrFuncPrototype, "entries", BuiltinsArray::Entries, FunctionLength::ZERO); - SetFunction(env, arrFuncPrototype, "every", BuiltinsArray::Every, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "fill", BuiltinsArray::Fill, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "filter", BuiltinsArray::Filter, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "find", BuiltinsArray::Find, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "findIndex", BuiltinsArray::FindIndex, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "forEach", BuiltinsArray::ForEach, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "indexOf", BuiltinsArray::IndexOf, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "join", BuiltinsArray::Join, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "keys", BuiltinsArray::Keys, FunctionLength::ZERO); - SetFunction(env, arrFuncPrototype, "lastIndexOf", BuiltinsArray::LastIndexOf, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "map", BuiltinsArray::Map, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "pop", BuiltinsArray::Pop, FunctionLength::ZERO); - SetFunction(env, arrFuncPrototype, "push", BuiltinsArray::Push, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "reduce", BuiltinsArray::Reduce, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "reduceRight", BuiltinsArray::ReduceRight, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "reverse", BuiltinsArray::Reverse, FunctionLength::ZERO); - SetFunction(env, arrFuncPrototype, "shift", BuiltinsArray::Shift, FunctionLength::ZERO); - SetFunction(env, arrFuncPrototype, "slice", BuiltinsArray::Slice, FunctionLength::TWO); - SetFunction(env, arrFuncPrototype, "some", BuiltinsArray::Some, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "sort", BuiltinsArray::Sort, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "splice", BuiltinsArray::Splice, FunctionLength::TWO); - SetFunction(env, arrFuncPrototype, thread_->GlobalConstants()->GetHandledToLocaleStringString(), - BuiltinsArray::ToLocaleString, FunctionLength::ZERO); - SetFunction(env, arrFuncPrototype, thread_->GlobalConstants()->GetHandledToStringString(), BuiltinsArray::ToString, - FunctionLength::ZERO); - SetFunction(env, arrFuncPrototype, "unshift", BuiltinsArray::Unshift, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "values", BuiltinsArray::Values, FunctionLength::ZERO); - SetFunction(env, arrFuncPrototype, "includes", BuiltinsArray::Includes, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "flat", BuiltinsArray::Flat, FunctionLength::ZERO); - SetFunction(env, arrFuncPrototype, "flatMap", BuiltinsArray::FlatMap, FunctionLength::ONE); - SetFunction(env, arrFuncPrototype, "at", BuiltinsArray::At, FunctionLength::ONE); + // Array.prototype methods (excluding constructor and '@@' internal properties) + for (const base::BuiltinFunctionEntry &entry: BuiltinsArray::GetArrayPrototypeFunctions()) { + SetFunction(env, arrFuncPrototype, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } // %ArrayPrototype% [ @@iterator ] JSHandle values(factory_->NewFromASCII("values")); JSHandle iteratorSymbol = env->GetIteratorSymbol(); JSHandle valuesFunc = JSObject::GetMethod(thread_, JSHandle::Cast(arrFuncPrototype), values); + RETURN_IF_ABRUPT_COMPLETION(thread_); PropertyDescriptor iteartorDesc(thread_, valuesFunc, true, false, true); JSObject::DefineOwnProperty(thread_, arrFuncPrototype, iteratorSymbol, iteartorDesc); - // Array method - SetFunction(env, arrayFunction, "from", BuiltinsArray::From, FunctionLength::ONE); - SetFunction(env, arrayFunction, "isArray", BuiltinsArray::IsArray, FunctionLength::ONE); - SetFunction(env, arrayFunction, "of", BuiltinsArray::Of, FunctionLength::ZERO); + // Array methods (excluding '@@' internal properties) + for (const base::BuiltinFunctionEntry &entry: BuiltinsArray::GetArrayFunctions()) { + SetFunction(env, arrayFunction, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } // 22.1.2.5 get %Array% [ @@species ] JSHandle speciesSymbol = env->GetSpeciesSymbol(); @@ -2155,18 +2008,25 @@ void Builtins::InitializeArray(const JSHandle &env, const JSHandleSetArrayProtoValuesFunction(thread_, desc.GetValue()); env->SetArrayFunction(thread_, arrayFunction); env->SetArrayPrototype(thread_, arrFuncPrototype); + + thread_->SetInitialBuiltinHClass(BuiltinTypeId::ARRAY, + arrayFunction->GetJSHClass(), + arrFuncPrototype->GetJSHClass()); } -void Builtins::InitializeTypedArray(const JSHandle &env, const JSHandle &objFuncClass) const +void Builtins::InitializeTypedArray(const JSHandle &env, JSHandle objFuncPrototypeVal) const { [[maybe_unused]] EcmaHandleScope scope(thread_); // TypedArray.prototype - JSHandle typedArrFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); + JSHandle typedArrFuncPrototypeHClass = factory_->NewEcmaHClass( + JSObject::SIZE, BuiltinsTypedArray::GetNumPrototypeInlinedProperties(), + JSType::JS_OBJECT, objFuncPrototypeVal); + JSHandle typedArrFuncPrototype = factory_->NewJSObjectWithInit(typedArrFuncPrototypeHClass); JSHandle typedArrFuncPrototypeValue(typedArrFuncPrototype); // TypedArray.prototype_or_hclass JSHandle typedArrFuncInstanceHClass = factory_->NewEcmaHClass( - panda::ecmascript::JSTypedArray::SIZE, JSType::JS_TYPED_ARRAY, typedArrFuncPrototypeValue); + JSTypedArray::SIZE, JSType::JS_TYPED_ARRAY, typedArrFuncPrototypeValue); // TypedArray = new Function() JSHandle typedArrayFunction(NewBuiltinConstructor( @@ -2176,57 +2036,23 @@ void Builtins::InitializeTypedArray(const JSHandle &env, const JSHand ->SetProtoOrHClass(thread_, typedArrFuncInstanceHClass.GetTaggedValue()); // TypedArray.prototype method - SetFunction(env, typedArrFuncPrototype, "copyWithin", BuiltinsTypedArray::CopyWithin, FunctionLength::TWO); - SetFunction(env, typedArrFuncPrototype, "entries", BuiltinsTypedArray::Entries, FunctionLength::ZERO); - SetFunction(env, typedArrFuncPrototype, "every", BuiltinsTypedArray::Every, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "at", BuiltinsTypedArray::At, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "fill", BuiltinsTypedArray::Fill, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "filter", BuiltinsTypedArray::Filter, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "find", BuiltinsTypedArray::Find, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "findIndex", BuiltinsTypedArray::FindIndex, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "forEach", BuiltinsTypedArray::ForEach, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "indexOf", BuiltinsTypedArray::IndexOf, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "join", BuiltinsTypedArray::Join, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "keys", BuiltinsTypedArray::Keys, FunctionLength::ZERO); - SetFunction(env, typedArrFuncPrototype, "lastIndexOf", BuiltinsTypedArray::LastIndexOf, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "map", BuiltinsTypedArray::Map, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "reduce", BuiltinsTypedArray::Reduce, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "reduceRight", BuiltinsTypedArray::ReduceRight, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "reverse", BuiltinsTypedArray::Reverse, FunctionLength::ZERO); - SetFunction(env, typedArrFuncPrototype, "set", BuiltinsTypedArray::Set, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "slice", BuiltinsTypedArray::Slice, FunctionLength::TWO); - SetFunction(env, typedArrFuncPrototype, "some", BuiltinsTypedArray::Some, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "sort", BuiltinsTypedArray::Sort, FunctionLength::ONE); - SetFunction(env, typedArrFuncPrototype, "subarray", BuiltinsTypedArray::Subarray, FunctionLength::TWO); - SetFunction(env, typedArrFuncPrototype, thread_->GlobalConstants()->GetHandledToLocaleStringString(), - BuiltinsTypedArray::ToLocaleString, FunctionLength::ZERO); - SetFunction(env, typedArrFuncPrototype, "values", BuiltinsTypedArray::Values, FunctionLength::ZERO); - SetFunction(env, typedArrFuncPrototype, "includes", BuiltinsTypedArray::Includes, FunctionLength::ONE); - - JSHandle bufferGetter = - CreateGetter(env, BuiltinsTypedArray::GetBuffer, "buffer", FunctionLength::ZERO); - JSHandle bufferKey(factory_->NewFromASCII("buffer")); - SetGetter(typedArrFuncPrototype, bufferKey, bufferGetter); - - JSHandle byteLengthGetter = - CreateGetter(env, BuiltinsTypedArray::GetByteLength, "byteLength", FunctionLength::ZERO); - JSHandle byteLengthKey(factory_->NewFromASCII("byteLength")); - SetGetter(typedArrFuncPrototype, byteLengthKey, byteLengthGetter); - - JSHandle byteOffsetGetter = - CreateGetter(env, BuiltinsTypedArray::GetByteOffset, "byteOffset", FunctionLength::ZERO); - JSHandle byteOffsetKey(factory_->NewFromASCII("byteOffset")); - SetGetter(typedArrFuncPrototype, byteOffsetKey, byteOffsetGetter); - - JSHandle lengthGetter = - CreateGetter(env, BuiltinsTypedArray::GetLength, "length", FunctionLength::ZERO); - JSHandle lengthKey(factory_->NewFromASCII("length")); - SetGetter(typedArrFuncPrototype, lengthKey, lengthGetter); + for (const base::BuiltinFunctionEntry &entry: BuiltinsTypedArray::GetTypedArrayPrototypeFunctions()) { + SetFunction(env, typedArrFuncPrototype, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } + // TypedArray.prototype get accessor + for (const base::BuiltinFunctionEntry &entry: BuiltinsTypedArray::GetTypedArrayPrototypeAccessors()) { + JSHandle getter = + CreateGetter(env, entry.GetEntrypoint(), entry.GetName(), entry.GetLength()); + JSHandle key(factory_->NewFromASCII(entry.GetName())); + SetGetter(typedArrFuncPrototype, key, getter); + } - // %TypedArray%.prototype.toString() + // %TypedArray%.prototype.toString(), which is strictly equal to Array.prototype.toString JSHandle arrFuncPrototype = env->GetArrayPrototype(); JSHandle toStringFunc = JSObject::GetMethod(thread_, arrFuncPrototype, thread_->GlobalConstants()->GetHandledToStringString()); + RETURN_IF_ABRUPT_COMPLETION(thread_); PropertyDescriptor toStringDesc(thread_, toStringFunc, true, false, true); JSObject::DefineOwnProperty(thread_, typedArrFuncPrototype, thread_->GlobalConstants()->GetHandledToStringString(), toStringDesc); @@ -2236,6 +2062,7 @@ void Builtins::InitializeTypedArray(const JSHandle &env, const JSHand JSHandle iteratorSymbol = env->GetIteratorSymbol(); JSHandle valuesFunc = JSObject::GetMethod(thread_, JSHandle::Cast(typedArrFuncPrototype), values); + RETURN_IF_ABRUPT_COMPLETION(thread_); PropertyDescriptor iteartorDesc(thread_, valuesFunc, true, false, true); JSObject::DefineOwnProperty(thread_, typedArrFuncPrototype, iteratorSymbol, iteartorDesc); @@ -2246,8 +2073,10 @@ void Builtins::InitializeTypedArray(const JSHandle &env, const JSHand SetGetter(typedArrFuncPrototype, toStringTagSymbol, toStringTagGetter); // TypedArray method - SetFunction(env, typedArrayFunction, "from", BuiltinsTypedArray::From, FunctionLength::ONE); - SetFunction(env, typedArrayFunction, "of", BuiltinsTypedArray::Of, FunctionLength::ZERO); + for (const base::BuiltinFunctionEntry &entry: BuiltinsTypedArray::GetTypedArrayFunctions()) { + SetFunction(env, typedArrayFunction, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } // 22.2.2.4 get %TypedArray% [ @@species ] JSHandle speciesSymbol = env->GetSpeciesSymbol(); @@ -2257,23 +2086,20 @@ void Builtins::InitializeTypedArray(const JSHandle &env, const JSHand env->SetTypedArrayFunction(thread_, typedArrayFunction.GetTaggedValue()); env->SetTypedArrayPrototype(thread_, typedArrFuncPrototype); + env->SetTypedArrayProtoValuesFunction(thread_, valuesFunc); + thread_->SetInitialBuiltinHClass(BuiltinTypeId::TYPED_ARRAY, + typedArrayFunction->GetJSHClass(), + typedArrFuncPrototype->GetJSHClass()); JSHandle specificTypedArrayFuncClass = factory_->NewEcmaHClass(JSFunction::SIZE, JSType::JS_FUNCTION, env->GetTypedArrayFunction()); specificTypedArrayFuncClass->SetConstructor(true); env->SetSpecificTypedArrayFunctionClass(thread_, specificTypedArrayFuncClass); - InitializeInt8Array(env, typedArrFuncInstanceHClass); - InitializeUint8Array(env, typedArrFuncInstanceHClass); - InitializeUint8ClampedArray(env, typedArrFuncInstanceHClass); - InitializeInt16Array(env, typedArrFuncInstanceHClass); - InitializeUint16Array(env, typedArrFuncInstanceHClass); - InitializeInt32Array(env, typedArrFuncInstanceHClass); - InitializeUint32Array(env, typedArrFuncInstanceHClass); - InitializeFloat32Array(env, typedArrFuncInstanceHClass); - InitializeFloat64Array(env, typedArrFuncInstanceHClass); - InitializeBigInt64Array(env, typedArrFuncInstanceHClass); - InitializeBigUint64Array(env, typedArrFuncInstanceHClass); +#define BUILTIN_TYPED_ARRAY_CALL_INITIALIZE(Type, TYPE, bytesPerElement) \ + Initialize##Type(env, typedArrFuncInstanceHClass); + BUILTIN_TYPED_ARRAY_TYPES(BUILTIN_TYPED_ARRAY_CALL_INITIALIZE) +#undef BUILTIN_TYPED_ARRAY_CALL_INITIALIZE } void Builtins::LazyInitializeTypedArray(const JSHandle &env) const @@ -2287,299 +2113,55 @@ void Builtins::LazyInitializeTypedArray(const JSHandle &env) const env->SetTypedArrayFunction(thread_, accessor); env->SetTypedArrayPrototype(thread_, accessor); env->SetSpecificTypedArrayFunctionClass(thread_, accessor); - LazyInitializeInt8Array(env); - LazyInitializeUint8Array(env); - LazyInitializeUint8ClampedArray(env); - LazyInitializeInt16Array(env); - LazyInitializeUint16Array(env); - LazyInitializeInt32Array(env); - LazyInitializeUint32Array(env); - LazyInitializeFloat32Array(env); - LazyInitializeFloat64Array(env); - LazyInitializeBigInt64Array(env); - LazyInitializeBigUint64Array(env); -} - -void Builtins::InitializeInt8Array(const JSHandle &env, const JSHandle &objFuncClass) const -{ - [[maybe_unused]] EcmaHandleScope scope(thread_); - // Int8Array.prototype - JSHandle int8ArrFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); - JSHandle int8ArrFuncPrototypeValue(int8ArrFuncPrototype); - - // Int8Array.prototype_or_hclass - JSHandle int8ArrFuncInstanceHClass = factory_->NewEcmaHClass( - panda::ecmascript::JSTypedArray::SIZE, JSType::JS_INT8_ARRAY, int8ArrFuncPrototypeValue); - - // Int8Array = new Function() - JSHandle int8ArrayFunction = factory_->NewSpecificTypedArrayFunction( - env, reinterpret_cast(BuiltinsTypedArray::Int8ArrayConstructor)); - InitializeCtor(env, int8ArrFuncPrototype, int8ArrayFunction, "Int8Array", FunctionLength::THREE); - - int8ArrayFunction->SetProtoOrHClass(thread_, int8ArrFuncInstanceHClass.GetTaggedValue()); - - constexpr int bytesPerElement = 1; - SetConstant(int8ArrFuncPrototype, "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - SetConstant(JSHandle(int8ArrayFunction), "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - env->SetInt8ArrayFunction(thread_, int8ArrayFunction); -} - -void Builtins::InitializeUint8Array(const JSHandle &env, const JSHandle &objFuncClass) const -{ - [[maybe_unused]] EcmaHandleScope scope(thread_); - // Uint8Array.prototype - JSHandle uint8ArrFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); - JSHandle uint8ArrFuncPrototypeValue(uint8ArrFuncPrototype); - - // Uint8Array.prototype_or_hclass - JSHandle uint8ArrFuncInstanceHClass = factory_->NewEcmaHClass( - panda::ecmascript::JSTypedArray::SIZE, JSType::JS_UINT8_ARRAY, uint8ArrFuncPrototypeValue); - - // Uint8Array = new Function() - JSHandle uint8ArrayFunction = factory_->NewSpecificTypedArrayFunction( - env, reinterpret_cast(BuiltinsTypedArray::Uint8ArrayConstructor)); - InitializeCtor(env, uint8ArrFuncPrototype, uint8ArrayFunction, "Uint8Array", FunctionLength::THREE); - - uint8ArrayFunction->SetProtoOrHClass(thread_, uint8ArrFuncInstanceHClass.GetTaggedValue()); - - constexpr int bytesPerElement = 1; - SetConstant(uint8ArrFuncPrototype, "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - SetConstant(JSHandle(uint8ArrayFunction), "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - env->SetUint8ArrayFunction(thread_, uint8ArrayFunction); -} - -#define TYPED_ARRAY_LAZY_INITIALIZE(type) \ - void Builtins::LazyInitialize##type(const JSHandle &env) const \ - { \ - [[maybe_unused]] EcmaHandleScope scope(thread_); \ - JSHandle globalObject(thread_, env->GetGlobalObject()); \ - JSHandle key(factory_->NewFromUtf8(#type)); \ - auto accessor = factory_->NewInternalAccessor(nullptr, reinterpret_cast(BuiltinsLazyCallback::type)); \ - SetLazyAccessor(globalObject, key, accessor); \ - env->Set##type##Function(thread_, accessor); \ - } - -ITERATE_TYPED_ARRAY(TYPED_ARRAY_LAZY_INITIALIZE) -#undef TYPED_ARRAY_LAZY_INITIALIZE - -void Builtins::InitializeUint8ClampedArray(const JSHandle &env, - const JSHandle &objFuncClass) const -{ - [[maybe_unused]] EcmaHandleScope scope(thread_); - // Uint8ClampedArray.prototype - JSHandle uint8ClampedArrFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); - JSHandle uint8ClampedArrFuncPrototypeValue(uint8ClampedArrFuncPrototype); - - // Uint8ClampedArray.prototype_or_hclass - JSHandle uint8ClampedArrFuncInstanceHClass = - factory_->NewEcmaHClass(panda::ecmascript::JSTypedArray::SIZE, JSType::JS_UINT8_CLAMPED_ARRAY, - uint8ClampedArrFuncPrototypeValue); - - // Uint8ClampedArray = new Function() - JSHandle uint8ClampedArrayFunction = factory_->NewSpecificTypedArrayFunction( - env, reinterpret_cast(BuiltinsTypedArray::Uint8ClampedArrayConstructor)); - InitializeCtor(env, uint8ClampedArrFuncPrototype, uint8ClampedArrayFunction, "Uint8ClampedArray", - FunctionLength::THREE); - - uint8ClampedArrayFunction->SetProtoOrHClass(thread_, uint8ClampedArrFuncInstanceHClass.GetTaggedValue()); - - constexpr int bytesPerElement = 1; - SetConstant(uint8ClampedArrFuncPrototype, "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - SetConstant(JSHandle(uint8ClampedArrayFunction), "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - env->SetUint8ClampedArrayFunction(thread_, uint8ClampedArrayFunction); -} - -void Builtins::InitializeInt16Array(const JSHandle &env, const JSHandle &objFuncClass) const -{ - [[maybe_unused]] EcmaHandleScope scope(thread_); - // Int16Array.prototype - JSHandle int16ArrFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); - JSHandle int16ArrFuncPrototypeValue(int16ArrFuncPrototype); - - // Int16Array.prototype_or_hclass - JSHandle int16ArrFuncInstanceHClass = factory_->NewEcmaHClass( - panda::ecmascript::JSTypedArray::SIZE, JSType::JS_INT16_ARRAY, int16ArrFuncPrototypeValue); - - // Int16Array = new Function() - JSHandle int16ArrayFunction = factory_->NewSpecificTypedArrayFunction( - env, reinterpret_cast(BuiltinsTypedArray::Int16ArrayConstructor)); - InitializeCtor(env, int16ArrFuncPrototype, int16ArrayFunction, "Int16Array", FunctionLength::THREE); - - int16ArrayFunction->SetProtoOrHClass(thread_, int16ArrFuncInstanceHClass.GetTaggedValue()); - - constexpr int bytesPerElement = 2; - SetConstant(int16ArrFuncPrototype, "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - SetConstant(JSHandle(int16ArrayFunction), "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - env->SetInt16ArrayFunction(thread_, int16ArrayFunction); -} - -void Builtins::InitializeUint16Array(const JSHandle &env, const JSHandle &objFuncClass) const -{ - [[maybe_unused]] EcmaHandleScope scope(thread_); - // Uint16Array.prototype - JSHandle uint16ArrFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); - JSHandle uint16ArrFuncPrototypeValue(uint16ArrFuncPrototype); - - // Uint16Array.prototype_or_hclass - JSHandle uint16ArrFuncInstanceHClass = factory_->NewEcmaHClass( - panda::ecmascript::JSTypedArray::SIZE, JSType::JS_UINT16_ARRAY, uint16ArrFuncPrototypeValue); - - // Uint16Array = new Function() - JSHandle uint16ArrayFunction = factory_->NewSpecificTypedArrayFunction( - env, reinterpret_cast(BuiltinsTypedArray::Uint16ArrayConstructor)); - InitializeCtor(env, uint16ArrFuncPrototype, uint16ArrayFunction, "Uint16Array", FunctionLength::THREE); - - uint16ArrayFunction->SetProtoOrHClass(thread_, uint16ArrFuncInstanceHClass.GetTaggedValue()); - - constexpr int bytesPerElement = 2; - SetConstant(uint16ArrFuncPrototype, "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - SetConstant(JSHandle(uint16ArrayFunction), "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - env->SetUint16ArrayFunction(thread_, uint16ArrayFunction); -} - -void Builtins::InitializeInt32Array(const JSHandle &env, const JSHandle &objFuncClass) const -{ - [[maybe_unused]] EcmaHandleScope scope(thread_); - // Int32Array.prototype - JSHandle int32ArrFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); - JSHandle int32ArrFuncPrototypeValue(int32ArrFuncPrototype); - - // Int32Array.prototype_or_hclass - JSHandle int32ArrFuncInstanceHClass = factory_->NewEcmaHClass( - panda::ecmascript::JSTypedArray::SIZE, JSType::JS_INT32_ARRAY, int32ArrFuncPrototypeValue); - - // Int32Array = new Function() - JSHandle int32ArrayFunction = factory_->NewSpecificTypedArrayFunction( - env, reinterpret_cast(BuiltinsTypedArray::Int32ArrayConstructor)); - InitializeCtor(env, int32ArrFuncPrototype, int32ArrayFunction, "Int32Array", FunctionLength::THREE); - - int32ArrayFunction->SetProtoOrHClass(thread_, int32ArrFuncInstanceHClass.GetTaggedValue()); - - constexpr int bytesPerElement = 4; - SetConstant(int32ArrFuncPrototype, "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - SetConstant(JSHandle(int32ArrayFunction), "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - env->SetInt32ArrayFunction(thread_, int32ArrayFunction); -} - -void Builtins::InitializeUint32Array(const JSHandle &env, const JSHandle &objFuncClass) const -{ - [[maybe_unused]] EcmaHandleScope scope(thread_); - // Uint32Array.prototype - JSHandle uint32ArrFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); - JSHandle uint32ArrFuncPrototypeValue(uint32ArrFuncPrototype); - - // Uint32Array.prototype_or_hclass - JSHandle uint32ArrFuncInstanceHClass = factory_->NewEcmaHClass( - panda::ecmascript::JSTypedArray::SIZE, JSType::JS_UINT32_ARRAY, uint32ArrFuncPrototypeValue); - - // Uint32Array = new Function() - JSHandle uint32ArrayFunction = factory_->NewSpecificTypedArrayFunction( - env, reinterpret_cast(BuiltinsTypedArray::Uint32ArrayConstructor)); - InitializeCtor(env, uint32ArrFuncPrototype, uint32ArrayFunction, "Uint32Array", FunctionLength::THREE); - - uint32ArrayFunction->SetProtoOrHClass(thread_, uint32ArrFuncInstanceHClass.GetTaggedValue()); - - constexpr int bytesPerElement = 4; - SetConstant(uint32ArrFuncPrototype, "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - SetConstant(JSHandle(uint32ArrayFunction), "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - env->SetUint32ArrayFunction(thread_, uint32ArrayFunction); -} - -void Builtins::InitializeFloat32Array(const JSHandle &env, const JSHandle &objFuncClass) const -{ - [[maybe_unused]] EcmaHandleScope scope(thread_); - // Float32Array.prototype - JSHandle float32ArrFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); - JSHandle float32ArrFuncPrototypeValue(float32ArrFuncPrototype); - - // Float32Array.prototype_or_hclass - JSHandle float32ArrFuncInstanceHClass = factory_->NewEcmaHClass( - panda::ecmascript::JSTypedArray::SIZE, JSType::JS_FLOAT32_ARRAY, float32ArrFuncPrototypeValue); - - // Float32Array = new Function() - JSHandle float32ArrayFunction = factory_->NewSpecificTypedArrayFunction( - env, reinterpret_cast(BuiltinsTypedArray::Float32ArrayConstructor)); - InitializeCtor(env, float32ArrFuncPrototype, float32ArrayFunction, "Float32Array", FunctionLength::THREE); - - float32ArrayFunction->SetProtoOrHClass(thread_, float32ArrFuncInstanceHClass.GetTaggedValue()); - - constexpr int bytesPerElement = 4; - SetConstant(float32ArrFuncPrototype, "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - SetConstant(JSHandle(float32ArrayFunction), "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - env->SetFloat32ArrayFunction(thread_, float32ArrayFunction); -} - -void Builtins::InitializeFloat64Array(const JSHandle &env, const JSHandle &objFuncClass) const -{ - [[maybe_unused]] EcmaHandleScope scope(thread_); - // Float64Array.prototype - JSHandle float64ArrFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); - JSHandle float64ArrFuncPrototypeValue(float64ArrFuncPrototype); - - // Float64Array.prototype_or_hclass - JSHandle float64ArrFuncInstanceHClass = factory_->NewEcmaHClass( - panda::ecmascript::JSTypedArray::SIZE, JSType::JS_FLOAT64_ARRAY, float64ArrFuncPrototypeValue); - - // Float64Array = new Function() - JSHandle float64ArrayFunction = factory_->NewSpecificTypedArrayFunction( - env, reinterpret_cast(BuiltinsTypedArray::Float64ArrayConstructor)); - InitializeCtor(env, float64ArrFuncPrototype, float64ArrayFunction, "Float64Array", FunctionLength::THREE); - float64ArrayFunction->SetProtoOrHClass(thread_, float64ArrFuncInstanceHClass.GetTaggedValue()); - - constexpr int bytesPerElement = 8; - SetConstant(float64ArrFuncPrototype, "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - SetConstant(JSHandle(float64ArrayFunction), "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - env->SetFloat64ArrayFunction(thread_, float64ArrayFunction); -} - -void Builtins::InitializeBigInt64Array(const JSHandle &env, const JSHandle &objFuncClass) const -{ - [[maybe_unused]] EcmaHandleScope scope(thread_); - // BigInt64Array.prototype - JSHandle bigInt64ArrFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); - JSHandle bigInt64ArrFuncPrototypeValue(bigInt64ArrFuncPrototype); - - // BigInt64Array.prototype_or_hclass - JSHandle bigInt64ArrFuncInstanceHClass = factory_->NewEcmaHClass( - panda::ecmascript::JSTypedArray::SIZE, JSType::JS_BIGINT64_ARRAY, bigInt64ArrFuncPrototypeValue); - - // BigInt64Array = new Function() - JSHandle bigInt64ArrayFunction = factory_->NewSpecificTypedArrayFunction( - env, reinterpret_cast(BuiltinsTypedArray::BigInt64ArrayConstructor)); - InitializeCtor(env, bigInt64ArrFuncPrototype, bigInt64ArrayFunction, "BigInt64Array", FunctionLength::THREE); - - bigInt64ArrayFunction->SetProtoOrHClass(thread_, bigInt64ArrFuncInstanceHClass.GetTaggedValue()); - - constexpr int bytesPerElement = 8; - SetConstant(bigInt64ArrFuncPrototype, "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - SetConstant(JSHandle(bigInt64ArrayFunction), "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - env->SetBigInt64ArrayFunction(thread_, bigInt64ArrayFunction); -} - -void Builtins::InitializeBigUint64Array(const JSHandle &env, const JSHandle &objFuncClass) const -{ - [[maybe_unused]] EcmaHandleScope scope(thread_); - // BigUint64Array.prototype - JSHandle bigUint64ArrFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); - JSHandle bigUint64ArrFuncPrototypeValue(bigUint64ArrFuncPrototype); - - // BigUint64Array.prototype_or_hclass - JSHandle bigUint64ArrFuncInstanceHClass = factory_->NewEcmaHClass( - panda::ecmascript::JSTypedArray::SIZE, JSType::JS_BIGUINT64_ARRAY, bigUint64ArrFuncPrototypeValue); - - // BigUint64Array = new Function() - JSHandle bigUint64ArrayFunction = factory_->NewSpecificTypedArrayFunction( - env, reinterpret_cast(BuiltinsTypedArray::BigUint64ArrayConstructor)); - InitializeCtor(env, bigUint64ArrFuncPrototype, bigUint64ArrayFunction, "BigUint64Array", FunctionLength::THREE); - - bigUint64ArrayFunction->SetProtoOrHClass(thread_, bigUint64ArrFuncInstanceHClass.GetTaggedValue()); - - constexpr int bytesPerElement = 8; - SetConstant(bigUint64ArrFuncPrototype, "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - SetConstant(JSHandle(bigUint64ArrayFunction), "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); - env->SetBigUint64ArrayFunction(thread_, bigUint64ArrayFunction); -} +#define BUILTIN_TYPED_ARRAY_CALL_LAZY_INITIALIZE(Type, TYPE, bytesPerElement) \ + LazyInitialize##Type(env); + BUILTIN_TYPED_ARRAY_TYPES(BUILTIN_TYPED_ARRAY_CALL_LAZY_INITIALIZE) +#undef BUILTIN_TYPED_ARRAY_CALL_LAZY_INITIALIZE +} + +#define BUILTIN_TYPED_ARRAY_DEFINE_INITIALIZE(Type, TYPE, bytesPerElement) \ +void Builtins::Initialize##Type(const JSHandle &env, const JSHandle &arrFuncClass) const \ +{ \ + [[maybe_unused]] EcmaHandleScope scope(thread_); \ + /* %TypedArray%.prototype (where %TypedArray% is one of Int8Array, Uint8Array, etc.) */ \ + JSHandle arrFuncPrototype = factory_->NewJSObjectWithInit(arrFuncClass); \ + JSHandle arrFuncPrototypeValue(arrFuncPrototype); \ + /* %TypedArray%.prototype_or_hclass */ \ + JSHandle arrFuncInstanceHClass = factory_->NewEcmaHClass( \ + panda::ecmascript::JSTypedArray::SIZE, JSType::JS_##TYPE, arrFuncPrototypeValue); \ + /* %TypedArray% = new Function() */ \ + JSHandle arrayFunction = factory_->NewSpecificTypedArrayFunction( \ + env, reinterpret_cast(BuiltinsTypedArray::Type##Constructor)); \ + InitializeCtor(env, arrFuncPrototype, arrayFunction, #Type, FunctionLength::THREE); \ + \ + arrayFunction->SetProtoOrHClass(thread_, arrFuncInstanceHClass.GetTaggedValue()); \ + SetConstant(arrFuncPrototype, "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); \ + SetConstant(JSHandle(arrayFunction), "BYTES_PER_ELEMENT", JSTaggedValue(bytesPerElement)); \ + env->Set##Type##Function(thread_, arrayFunction); \ + env->Set##Type##FunctionPrototype(thread_, arrFuncPrototypeValue); \ + /* Initializes HClass record of %TypedArray% */ \ + thread_->SetInitialBuiltinHClass(BuiltinTypeId::TYPE, \ + arrayFunction->GetJSHClass(), \ + arrFuncPrototype->GetJSHClass()); \ +} + +BUILTIN_TYPED_ARRAY_TYPES(BUILTIN_TYPED_ARRAY_DEFINE_INITIALIZE) +#undef BUILTIN_TYPED_ARRAY_DEFINE_INITIALIZE + +#define BUILTIN_TYPED_ARRAY_DEFINE_LAZY_INITIALIZE(Type, TYPE, bytesPerElement) \ +void Builtins::LazyInitialize##Type(const JSHandle &env) const \ +{ \ + [[maybe_unused]] EcmaHandleScope scope(thread_); \ + JSHandle globalObject(thread_, env->GetGlobalObject()); \ + JSHandle key(factory_->NewFromUtf8(#Type)); \ + auto accessor = factory_->NewInternalAccessor(nullptr, reinterpret_cast(BuiltinsLazyCallback::Type)); \ + SetLazyAccessor(globalObject, key, accessor); \ + env->Set##Type##Function(thread_, accessor); \ +} + +BUILTIN_TYPED_ARRAY_TYPES(BUILTIN_TYPED_ARRAY_DEFINE_LAZY_INITIALIZE) +#undef BUILTIN_TYPED_ARRAY_DEFINE_LAZY_INITIALIZE void Builtins::InitializeArrayBuffer(const JSHandle &env, const JSHandle &objFuncClass) const { @@ -2642,20 +2224,11 @@ void Builtins::InitializeReflect(const JSHandle &env, factory_->NewEcmaHClass(JSObject::SIZE, JSType::JS_OBJECT, objFuncPrototypeVal); JSHandle reflectObject = factory_->NewJSObjectWithInit(reflectHClass); - SetFunction(env, reflectObject, "apply", Reflect::ReflectApply, FunctionLength::THREE); - SetFunction(env, reflectObject, "construct", Reflect::ReflectConstruct, FunctionLength::TWO); - SetFunction(env, reflectObject, "defineProperty", Reflect::ReflectDefineProperty, FunctionLength::THREE); - SetFunction(env, reflectObject, "deleteProperty", Reflect::ReflectDeleteProperty, FunctionLength::TWO); - SetFunction(env, reflectObject, "get", Reflect::ReflectGet, FunctionLength::TWO); - SetFunction(env, reflectObject, "getOwnPropertyDescriptor", Reflect::ReflectGetOwnPropertyDescriptor, - FunctionLength::TWO); - SetFunction(env, reflectObject, "getPrototypeOf", Reflect::ReflectGetPrototypeOf, FunctionLength::ONE); - SetFunction(env, reflectObject, "has", Reflect::ReflectHas, FunctionLength::TWO); - SetFunction(env, reflectObject, "isExtensible", Reflect::ReflectIsExtensible, FunctionLength::ONE); - SetFunction(env, reflectObject, "ownKeys", Reflect::ReflectOwnKeys, FunctionLength::ONE); - SetFunction(env, reflectObject, "preventExtensions", Reflect::ReflectPreventExtensions, FunctionLength::ONE); - SetFunction(env, reflectObject, "set", Reflect::ReflectSet, FunctionLength::THREE); - SetFunction(env, reflectObject, "setPrototypeOf", Reflect::ReflectSetPrototypeOf, FunctionLength::TWO); + // Reflect functions + for (const base::BuiltinFunctionEntry &entry: Reflect::GetReflectFunctions()) { + SetFunction(env, reflectObject, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } JSHandle reflectString(factory_->NewFromASCII("Reflect")); JSHandle globalObject(thread_, env->GetGlobalObject()); @@ -2718,7 +2291,8 @@ void Builtins::LazyInitializeSharedArrayBuffer(const JSHandle &env) c [[maybe_unused]] EcmaHandleScope scope(thread_); JSHandle globalObject(thread_, env->GetGlobalObject()); JSHandle key(factory_->NewFromUtf8("SharedArrayBuffer")); - auto accessor = factory_->NewInternalAccessor(nullptr, reinterpret_cast(BuiltinsLazyCallback::SharedArrayBuffer)); + auto accessor = + factory_->NewInternalAccessor(nullptr, reinterpret_cast(BuiltinsLazyCallback::SharedArrayBuffer)); SetLazyAccessor(globalObject, key, accessor); env->SetSharedArrayBufferFunction(thread_, accessor); } @@ -2738,18 +2312,15 @@ void Builtins::InitializePromise(const JSHandle &env, const JSHandle< JSHandle(promiseFunction)->SetFunctionPrototype(thread_, promiseFuncInstanceHClass.GetTaggedValue()); // Promise method - SetFunction(env, promiseFunction, "all", Promise::All, FunctionLength::ONE); - SetFunction(env, promiseFunction, "race", Promise::Race, FunctionLength::ONE); - SetFunction(env, promiseFunction, "resolve", Promise::Resolve, FunctionLength::ONE); - SetFunction(env, promiseFunction, "reject", Promise::Reject, FunctionLength::ONE); - SetFunction(env, promiseFunction, "any", Promise::Any, FunctionLength::ONE); - SetFunction(env, promiseFunction, "allSettled", Promise::AllSettled, FunctionLength::ONE); - + for (const base::BuiltinFunctionEntry &entry: Promise::GetPromiseFunctions()) { + SetFunction(env, promiseFunction, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } // promise.prototype method - SetFunction(env, promiseFuncPrototype, "catch", Promise::Catch, FunctionLength::ONE); - SetFunction(env, promiseFuncPrototype, "then", Promise::Then, FunctionLength::TWO); - SetFunction(env, promiseFuncPrototype, "finally", Promise::Finally, FunctionLength::ONE); - + for (const base::BuiltinFunctionEntry &entry: Promise::GetPromisePrototypeFunctions()) { + SetFunction(env, promiseFuncPrototype, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } // Promise.prototype [ @@toStringTag ] SetStringTagSymbol(env, promiseFuncPrototype, "Promise"); @@ -2828,11 +2399,13 @@ void Builtins::InitializePromiseJob(const JSHandle &env) env->SetDynamicImportJob(thread_, func); } -void Builtins::InitializeDataView(const JSHandle &env, const JSHandle &objFuncClass) const +void Builtins::InitializeDataView(const JSHandle &env, JSHandle objFuncPrototypeVal) const { [[maybe_unused]] EcmaHandleScope scope(thread_); // ArrayBuffer.prototype - JSHandle dataViewFuncPrototype = factory_->NewJSObjectWithInit(objFuncClass); + JSHandle dataViewFuncPrototypeHClass = factory_->NewEcmaHClass( + JSObject::SIZE, DataView::GetNumPrototypeInlinedProperties(), JSType::JS_OBJECT, objFuncPrototypeVal); + JSHandle dataViewFuncPrototype = factory_->NewJSObjectWithInit(dataViewFuncPrototypeHClass); JSHandle dataViewFuncPrototypeValue(dataViewFuncPrototype); // ArrayBuffer.prototype_or_hclass @@ -2845,26 +2418,10 @@ void Builtins::InitializeDataView(const JSHandle &env, const JSHandle JSHandle(dataViewFunction)->SetProtoOrHClass(thread_, dataViewFuncInstanceHClass.GetTaggedValue()); // DataView.prototype method - SetFunction(env, dataViewFuncPrototype, "getFloat32", DataView::GetFloat32, FunctionLength::ONE); - SetFunction(env, dataViewFuncPrototype, "getFloat64", DataView::GetFloat64, FunctionLength::ONE); - SetFunction(env, dataViewFuncPrototype, "getInt8", DataView::GetInt8, FunctionLength::ONE); - SetFunction(env, dataViewFuncPrototype, "getInt16", DataView::GetInt16, FunctionLength::ONE); - SetFunction(env, dataViewFuncPrototype, "getInt32", DataView::GetInt32, FunctionLength::ONE); - SetFunction(env, dataViewFuncPrototype, "getUint8", DataView::GetUint8, FunctionLength::ONE); - SetFunction(env, dataViewFuncPrototype, "getUint16", DataView::GetUint16, FunctionLength::ONE); - SetFunction(env, dataViewFuncPrototype, "getUint32", DataView::GetUint32, FunctionLength::ONE); - SetFunction(env, dataViewFuncPrototype, "getBigInt64", DataView::GetBigInt64, FunctionLength::ONE); - SetFunction(env, dataViewFuncPrototype, "getBigUint64", DataView::GetBigUint64, FunctionLength::ONE); - SetFunction(env, dataViewFuncPrototype, "setFloat32", DataView::SetFloat32, FunctionLength::TWO); - SetFunction(env, dataViewFuncPrototype, "setFloat64", DataView::SetFloat64, FunctionLength::TWO); - SetFunction(env, dataViewFuncPrototype, "setInt8", DataView::SetInt8, FunctionLength::TWO); - SetFunction(env, dataViewFuncPrototype, "setInt16", DataView::SetInt16, FunctionLength::TWO); - SetFunction(env, dataViewFuncPrototype, "setInt32", DataView::SetInt32, FunctionLength::TWO); - SetFunction(env, dataViewFuncPrototype, "setUint8", DataView::SetUint8, FunctionLength::TWO); - SetFunction(env, dataViewFuncPrototype, "setUint16", DataView::SetUint16, FunctionLength::TWO); - SetFunction(env, dataViewFuncPrototype, "setUint32", DataView::SetUint32, FunctionLength::TWO); - SetFunction(env, dataViewFuncPrototype, "setBigInt64", DataView::SetBigInt64, FunctionLength::TWO); - SetFunction(env, dataViewFuncPrototype, "setBigUint64", DataView::SetBigUint64, FunctionLength::TWO); + for (const base::BuiltinFunctionEntry &entry: DataView::GetDataViewPrototypeFunctions()) { + SetFunction(env, dataViewFuncPrototype, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } // 24.2.4.1 get DataView.prototype.buffer JSHandle bufferGetter = CreateGetter(env, DataView::GetBuffer, "buffer", FunctionLength::ZERO); @@ -2884,7 +2441,12 @@ void Builtins::InitializeDataView(const JSHandle &env, const JSHandle // 24.2.4.21 DataView.prototype[ @@toStringTag ] SetStringTagSymbol(env, dataViewFuncPrototype, "DataView"); + env->SetDataViewFunction(thread_, dataViewFunction.GetTaggedValue()); + env->SetDataViewPrototype(thread_, dataViewFuncPrototype.GetTaggedValue()); + thread_->SetInitialBuiltinHClass(BuiltinTypeId::DATA_VIEW, + dataViewFunction->GetJSHClass(), + dataViewFuncPrototype->GetJSHClass()); } void Builtins::LazyInitializeDataView(const JSHandle &env) const @@ -2899,7 +2461,7 @@ void Builtins::LazyInitializeDataView(const JSHandle &env) const JSHandle Builtins::NewBuiltinConstructor(const JSHandle &env, const JSHandle &prototype, EcmaEntrypoint ctorFunc, - const char *name, int length, + std::string_view name, int length, kungfu::BuiltinsStubCSigns::ID builtinId) const { JSHandle ctor = @@ -2910,7 +2472,7 @@ JSHandle Builtins::NewBuiltinConstructor(const JSHandle & JSHandle Builtins::NewBuiltinCjsCtor(const JSHandle &env, const JSHandle &prototype, EcmaEntrypoint ctorFunc, - const char *name, int length) const + std::string_view name, int length) const { JSHandle ctor = factory_->NewJSFunction(env, reinterpret_cast(ctorFunc), FunctionKind::BUILTIN_CONSTRUCTOR); @@ -2944,7 +2506,7 @@ JSHandle Builtins::NewFunction(const JSHandle &env, const return function; } -void Builtins::SetFunction(const JSHandle &env, const JSHandle &obj, const char *key, +void Builtins::SetFunction(const JSHandle &env, const JSHandle &obj, std::string_view key, EcmaEntrypoint func, int length, kungfu::BuiltinsStubCSigns::ID builtinId) const { JSHandle keyString(factory_->NewFromUtf8(key)); @@ -2960,7 +2522,25 @@ void Builtins::SetFunction(const JSHandle &env, const JSHandle &env, const JSHandle &obj, const char *key, +JSHandle Builtins::SetAndReturnFunction(const JSHandle &env, const JSHandle &obj, + const char *key, EcmaEntrypoint func, int length, + kungfu::BuiltinsStubCSigns::ID builtinId) const +{ + JSHandle keyString(factory_->NewFromUtf8(key)); + return SetAndReturnFunction(env, obj, keyString, func, length, builtinId); +} + +JSHandle Builtins::SetAndReturnFunction(const JSHandle &env, const JSHandle &obj, + const JSHandle &key, EcmaEntrypoint func, int length, + kungfu::BuiltinsStubCSigns::ID builtinId) const +{ + JSHandle function(NewFunction(env, key, func, length, builtinId)); + PropertyDescriptor descriptor(thread_, JSHandle(function), true, false, true); + JSObject::DefineOwnProperty(thread_, obj, key, descriptor); + return function; +} + +void Builtins::SetFrozenFunction(const JSHandle &env, const JSHandle &obj, std::string_view key, EcmaEntrypoint func, int length) const { JSHandle keyString(factory_->NewFromUtf8(key)); @@ -2971,8 +2551,8 @@ void Builtins::SetFrozenFunction(const JSHandle &env, const JSHandle< template void Builtins::SetFunctionAtSymbol(const JSHandle &env, const JSHandle &obj, - const JSHandle &symbol, const char *name, EcmaEntrypoint func, - int length) const + const JSHandle &symbol, std::string_view name, + EcmaEntrypoint func, int length) const { JSHandle function = factory_->NewJSFunction(env, reinterpret_cast(func)); JSFunction::SetFunctionLength(thread_, function, JSTaggedValue(length)); @@ -2997,7 +2577,40 @@ void Builtins::SetFunctionAtSymbol(const JSHandle &env, const JSHandl JSObject::DefineOwnProperty(thread_, obj, symbol, descriptor); } -void Builtins::SetStringTagSymbol(const JSHandle &env, const JSHandle &obj, const char *key) const +template +JSHandle Builtins::SetAndReturnFunctionAtSymbol(const JSHandle &env, + const JSHandle &obj, + const JSHandle &symbol, + std::string_view name, + EcmaEntrypoint func, + int length) const +{ + JSHandle function = factory_->NewJSFunction(env, reinterpret_cast(func)); + JSFunction::SetFunctionLength(thread_, function, JSTaggedValue(length)); + JSHandle nameString(factory_->NewFromUtf8(name)); + JSHandle baseFunction(function); + JSHandle handleUndefine(thread_, JSTaggedValue::Undefined()); + JSFunction::SetFunctionName(thread_, baseFunction, nameString, handleUndefine); + // NOLINTNEXTLINE(readability-braces-around-statements, bugprone-suspicious-semicolon) + if constexpr (flag == JSSymbol::SYMBOL_TO_PRIMITIVE_TYPE) { + PropertyDescriptor descriptor(thread_, JSHandle::Cast(function), false, false, true); + JSObject::DefineOwnProperty(thread_, obj, symbol, descriptor); + return JSHandle(function); + } else if constexpr (flag == JSSymbol::SYMBOL_HAS_INSTANCE_TYPE) { // NOLINTE(readability-braces-around-statements) + // ecma 19.2.3.6 Function.prototype[@@hasInstance] has the attributes + // { [[Writable]]: false, [[Enumerable]]: false, [[Configurable]]: false }. + PropertyDescriptor descriptor(thread_, JSHandle::Cast(function), false, false, false); + JSObject::DefineOwnProperty(thread_, obj, symbol, descriptor); + env->SetHasInstanceFunction(thread_, function); + return JSHandle(function); + } + PropertyDescriptor descriptor(thread_, JSHandle::Cast(function), true, false, true); + JSObject::DefineOwnProperty(thread_, obj, symbol, descriptor); + return JSHandle(function); +} + +void Builtins::SetStringTagSymbol(const JSHandle &env, const JSHandle &obj, + std::string_view key) const { JSHandle tag(factory_->NewFromUtf8(key)); JSHandle symbol = env->GetToStringTagSymbol(); @@ -3005,43 +2618,56 @@ void Builtins::SetStringTagSymbol(const JSHandle &env, const JSHandle JSObject::DefineOwnProperty(thread_, obj, symbol, desc); } -JSHandle Builtins::CreateGetter(const JSHandle &env, EcmaEntrypoint func, const char *name, - int length) const +JSHandle Builtins::CreateGetter(const JSHandle &env, EcmaEntrypoint func, + std::string_view name, int length) const +{ + JSHandle funcName(factory_->NewFromUtf8(name)); + return CreateGetter(env, func, funcName, length); +} + +JSHandle Builtins::CreateGetter(const JSHandle &env, EcmaEntrypoint func, + JSHandle key, int length) const { JSHandle function = factory_->NewJSFunction(env, reinterpret_cast(func)); JSFunction::SetFunctionLength(thread_, function, JSTaggedValue(length)); - JSHandle funcName(factory_->NewFromUtf8(name)); JSHandle prefix = thread_->GlobalConstants()->GetHandledGetString(); - JSFunction::SetFunctionName(thread_, JSHandle(function), funcName, prefix); + JSFunction::SetFunctionName(thread_, JSHandle(function), key, prefix); return JSHandle(function); } -JSHandle Builtins::CreateSetter(const JSHandle &env, EcmaEntrypoint func, const char *name, - int length) +JSHandle Builtins::CreateSetter(const JSHandle &env, EcmaEntrypoint func, + std::string_view name, int length) const +{ + JSHandle funcName(factory_->NewFromUtf8(name)); + return CreateSetter(env, func, funcName, length); +} + +JSHandle Builtins::CreateSetter(const JSHandle &env, EcmaEntrypoint func, + JSHandle key, int length) const { JSHandle function = factory_->NewJSFunction(env, reinterpret_cast(func)); JSFunction::SetFunctionLength(thread_, function, JSTaggedValue(length)); - JSHandle funcName(factory_->NewFromUtf8(name)); JSHandle prefix = thread_->GlobalConstants()->GetHandledSetString(); - JSFunction::SetFunctionName(thread_, JSHandle(function), funcName, prefix); + JSFunction::SetFunctionName(thread_, JSHandle(function), key, prefix); return JSHandle(function); } -void Builtins::SetConstant(const JSHandle &obj, const char *key, JSTaggedValue value) const +void Builtins::SetConstant(const JSHandle &obj, std::string_view key, JSTaggedValue value) const { JSHandle keyString(factory_->NewFromUtf8(key)); PropertyDescriptor descriptor(thread_, JSHandle(thread_, value), false, false, false); JSObject::DefineOwnProperty(thread_, obj, keyString, descriptor); } -void Builtins::SetConstantObject(const JSHandle &obj, const char *key, JSHandle &value) const +void Builtins::SetConstantObject(const JSHandle &obj, std::string_view key, + JSHandle &value) const { JSHandle keyString(factory_->NewFromUtf8(key)); PropertyDescriptor descriptor(thread_, value, false, false, false); JSObject::DefineOwnProperty(thread_, obj, keyString, descriptor); } -void Builtins::SetNonConstantObject(const JSHandle &obj, const char *key, +void Builtins::SetNonConstantObject(const JSHandle &obj, std::string_view key, JSHandle &value) const { JSHandle keyString(factory_->NewFromUtf8(key)); @@ -3049,21 +2675,22 @@ void Builtins::SetNonConstantObject(const JSHandle &obj, const char *k JSObject::DefineOwnProperty(thread_, obj, keyString, descriptor); } -void Builtins::SetGlobalThis(const JSHandle &obj, const char *key, const JSHandle &globalValue) +void Builtins::SetGlobalThis(const JSHandle &obj, std::string_view key, + const JSHandle &globalValue) { JSHandle keyString(factory_->NewFromUtf8(key)); PropertyDescriptor descriptor(thread_, globalValue, true, false, true); JSObject::DefineOwnProperty(thread_, obj, keyString, descriptor); } -void Builtins::SetAttribute(const JSHandle &obj, const char *key, const char *value) const +void Builtins::SetAttribute(const JSHandle &obj, std::string_view key, std::string_view value) const { JSHandle keyString(factory_->NewFromUtf8(key)); PropertyDescriptor descriptor(thread_, JSHandle(factory_->NewFromUtf8(value)), true, false, true); JSObject::DefineOwnProperty(thread_, obj, keyString, descriptor); } -void Builtins::SetNoneAttributeProperty(const JSHandle &obj, const char *key, +void Builtins::SetNoneAttributeProperty(const JSHandle &obj, std::string_view key, const JSHandle &value) const { JSHandle keyString(factory_->NewFromUtf8(key)); @@ -3072,14 +2699,20 @@ void Builtins::SetNoneAttributeProperty(const JSHandle &obj, const cha } void Builtins::SetFuncToObjAndGlobal(const JSHandle &env, const JSHandle &globalObject, - const JSHandle &obj, const char *key, EcmaEntrypoint func, int length) + const JSHandle &obj, std::string_view key, + EcmaEntrypoint func, int length, kungfu::BuiltinsStubCSigns::ID builtinId) { - JSHandle function = factory_->NewJSFunction(env, reinterpret_cast(func)); + JSHandle function = factory_->NewJSFunction(env, reinterpret_cast(func), + FunctionKind::NORMAL_FUNCTION, builtinId); JSFunction::SetFunctionLength(thread_, function, JSTaggedValue(length)); JSHandle keyString(factory_->NewFromUtf8(key)); JSHandle baseFunction(function); JSHandle handleUndefine(thread_, JSTaggedValue::Undefined()); JSFunction::SetFunctionName(thread_, baseFunction, keyString, handleUndefine); + if (IS_TYPED_BUILTINS_ID(builtinId)) { + auto globalConst = const_cast(thread_->GlobalConstants()); + globalConst->SetConstant(GET_TYPED_CONSTANT_INDEX(builtinId), function); + } PropertyDescriptor descriptor(thread_, JSHandle::Cast(function), true, false, true); JSObject::DefineOwnProperty(thread_, obj, keyString, descriptor); JSObject::DefineOwnProperty(thread_, globalObject, keyString, descriptor); @@ -3286,9 +2919,10 @@ void Builtins::SetGetter(const JSHandle &obj, const JSHandle::Cast(obj), key, accessor, attr); } + #ifdef ARK_SUPPORT_INTL JSHandle Builtins::NewIntlConstructor(const JSHandle &env, const JSHandle &prototype, - EcmaEntrypoint ctorFunc, const char *name, int length) + EcmaEntrypoint ctorFunc, std::string_view name, int length) { JSHandle ctor = factory_->NewJSFunction(env, reinterpret_cast(ctorFunc), FunctionKind::BUILTIN_CONSTRUCTOR); @@ -3312,7 +2946,7 @@ ITERATE_INTL(INTL_LAZY_INITIALIZE) #undef INTL_LAZY_INITIALIZE void Builtins::InitializeIntlCtor(const JSHandle &env, const JSHandle &prototype, - const JSHandle &ctor, const char *name, int length) + const JSHandle &ctor, std::string_view name, int length) { const GlobalEnvConstants *globalConst = thread_->GlobalConstants(); JSFunction::SetFunctionLength(thread_, ctor, JSTaggedValue(length)); @@ -3669,52 +3303,74 @@ void Builtins::InitializeListFormat(const JSHandle &env) // 13.4.5 Intl.ListFormat.prototype.resolvedOptions() SetFunction(env, lfPrototype, "resolvedOptions", ListFormat::ResolvedOptions, FunctionLength::ZERO); } -#endif +#endif // #ifdef ARK_SUPPORT_INTL + JSHandle Builtins::InitializeArkTools(const JSHandle &env) const { JSHandle tools = factory_->NewEmptyJSObject(); - SetFunction(env, tools, "print", builtins::BuiltinsArkTools::ObjectDump, FunctionLength::ZERO); - SetFunction(env, tools, "excutePendingJob", builtins::BuiltinsArkTools::ExcutePendingJob, FunctionLength::ZERO); - SetFunction(env, tools, "getLexicalEnv", builtins::BuiltinsArkTools::GetLexicalEnv, FunctionLength::ONE); - SetFunction(env, tools, "compareHClass", builtins::BuiltinsArkTools::CompareHClass, FunctionLength::TWO); - SetFunction(env, tools, "dumpHClass", builtins::BuiltinsArkTools::DumpHClass, FunctionLength::ONE); - SetFunction(env, tools, "isTSHClass", builtins::BuiltinsArkTools::IsTSHClass, FunctionLength::ONE); - SetFunction(env, tools, "getHClass", builtins::BuiltinsArkTools::GetHClass, FunctionLength::ONE); - SetFunction(env, tools, "hasTSSubtyping", builtins::BuiltinsArkTools::HasTSSubtyping, FunctionLength::ONE); - SetFunction(env, tools, "isNotHoleProperty", builtins::BuiltinsArkTools::IsNotHoleProperty, - FunctionLength::TWO); - SetFunction(env, tools, "forceFullGC", builtins::BuiltinsArkTools::ForceFullGC, FunctionLength::ZERO); - SetFunction(env, tools, "removeAOTFlag", builtins::BuiltinsArkTools::RemoveAOTFlag, FunctionLength::ONE); -#if defined(ECMASCRIPT_SUPPORT_CPUPROFILER) - SetFunction(env, tools, "startCpuProf", builtins::BuiltinsArkTools::StartCpuProfiler, FunctionLength::ZERO); - SetFunction(env, tools, "stopCpuProf", builtins::BuiltinsArkTools::StopCpuProfiler, FunctionLength::ZERO); -#endif - SetFunction(env, tools, "isPrototype", builtins::BuiltinsArkTools::IsPrototype, FunctionLength::ONE); - SetFunction(env, tools, "timeInUs", builtins::BuiltinsArkTools::TimeInUs, FunctionLength::ZERO); + for (const base::BuiltinFunctionEntry &entry: builtins::BuiltinsArkTools::GetArkToolsFunctions()) { + SetFunction(env, tools, entry.GetName(), entry.GetEntrypoint(), + entry.GetLength(), entry.GetBuiltinStubId()); + } return tools; } void Builtins::InitializeGlobalRegExp(JSHandle &obj) const { - JSHandle emptyString = thread_->GlobalConstants()->GetHandledEmptyString(); - JSHandle newBox1 = JSHandle(factory_->NewPropertyBox(emptyString)); - SetConstantObject(obj, "$1", newBox1); - JSHandle newBox2 = JSHandle(factory_->NewPropertyBox(emptyString)); - SetConstantObject(obj, "$2", newBox2); - JSHandle newBox3 = JSHandle(factory_->NewPropertyBox(emptyString)); - SetConstantObject(obj, "$3", newBox3); - JSHandle newBox4 = JSHandle(factory_->NewPropertyBox(emptyString)); - SetConstantObject(obj, "$4", newBox4); - JSHandle newBox5 = JSHandle(factory_->NewPropertyBox(emptyString)); - SetConstantObject(obj, "$5", newBox5); - JSHandle newBox6 = JSHandle(factory_->NewPropertyBox(emptyString)); - SetConstantObject(obj, "$6", newBox6); - JSHandle newBox7 = JSHandle(factory_->NewPropertyBox(emptyString)); - SetConstantObject(obj, "$7", newBox7); - JSHandle newBox8 = JSHandle(factory_->NewPropertyBox(emptyString)); - SetConstantObject(obj, "$8", newBox8); - JSHandle newBox9 = JSHandle(factory_->NewPropertyBox(emptyString)); - SetConstantObject(obj, "$9", newBox9); + // $1 + auto accessor1 = factory_->NewInternalAccessor(reinterpret_cast(RegExp::SetCapture1), + reinterpret_cast(RegExp::GetCapture1)); + PropertyDescriptor descriptor1(thread_, JSHandle::Cast(accessor1), true, false, true); + JSHandle dollar1Key = thread_->GlobalConstants()->GetHandledDollarStringOne(); + JSObject::DefineOwnProperty(thread_, obj, dollar1Key, descriptor1); + // $2 + auto accessor2 = factory_->NewInternalAccessor(reinterpret_cast(RegExp::SetCapture2), + reinterpret_cast(RegExp::GetCapture2)); + PropertyDescriptor descriptor2(thread_, JSHandle::Cast(accessor2), true, false, true); + JSHandle dollar2Key = thread_->GlobalConstants()->GetHandledDollarStringTwo(); + JSObject::DefineOwnProperty(thread_, obj, dollar2Key, descriptor2); + // $3 + auto accessor3 = factory_->NewInternalAccessor(reinterpret_cast(RegExp::SetCapture3), + reinterpret_cast(RegExp::GetCapture3)); + PropertyDescriptor descriptor3(thread_, JSHandle::Cast(accessor3), true, false, true); + JSHandle dollar3Key = thread_->GlobalConstants()->GetHandledDollarStringThree(); + JSObject::DefineOwnProperty(thread_, obj, dollar3Key, descriptor3); + // $4 + auto accessor4 = factory_->NewInternalAccessor(reinterpret_cast(RegExp::SetCapture4), + reinterpret_cast(RegExp::GetCapture4)); + PropertyDescriptor descriptor4(thread_, JSHandle::Cast(accessor4), true, false, true); + JSHandle dollar4Key = thread_->GlobalConstants()->GetHandledDollarStringFour(); + JSObject::DefineOwnProperty(thread_, obj, dollar4Key, descriptor4); + // $5 + auto accessor5 = factory_->NewInternalAccessor(reinterpret_cast(RegExp::SetCapture5), + reinterpret_cast(RegExp::GetCapture5)); + PropertyDescriptor descriptor5(thread_, JSHandle::Cast(accessor5), true, false, true); + JSHandle dollar5Key = thread_->GlobalConstants()->GetHandledDollarStringFive(); + JSObject::DefineOwnProperty(thread_, obj, dollar5Key, descriptor5); + // $6 + auto accessor6 = factory_->NewInternalAccessor(reinterpret_cast(RegExp::SetCapture6), + reinterpret_cast(RegExp::GetCapture6)); + PropertyDescriptor descriptor6(thread_, JSHandle::Cast(accessor6), true, false, true); + JSHandle dollar6Key = thread_->GlobalConstants()->GetHandledDollarStringSix(); + JSObject::DefineOwnProperty(thread_, obj, dollar6Key, descriptor6); + // $7 + auto accessor7 = factory_->NewInternalAccessor(reinterpret_cast(RegExp::SetCapture7), + reinterpret_cast(RegExp::GetCapture7)); + PropertyDescriptor descriptor7(thread_, JSHandle::Cast(accessor7), true, false, true); + JSHandle dollar7Key = thread_->GlobalConstants()->GetHandledDollarStringSeven(); + JSObject::DefineOwnProperty(thread_, obj, dollar7Key, descriptor7); + // $8 + auto accessor8 = factory_->NewInternalAccessor(reinterpret_cast(RegExp::SetCapture8), + reinterpret_cast(RegExp::GetCapture8)); + PropertyDescriptor descriptor8(thread_, JSHandle::Cast(accessor8), true, false, true); + JSHandle dollar8Key = thread_->GlobalConstants()->GetHandledDollarStringEight(); + JSObject::DefineOwnProperty(thread_, obj, dollar8Key, descriptor8); + // $9 + auto accessor9 = factory_->NewInternalAccessor(reinterpret_cast(RegExp::SetCapture9), + reinterpret_cast(RegExp::GetCapture9)); + PropertyDescriptor descriptor9(thread_, JSHandle::Cast(accessor9), true, false, true); + JSHandle dollar9Key = thread_->GlobalConstants()->GetHandledDollarStringNine(); + JSObject::DefineOwnProperty(thread_, obj, dollar9Key, descriptor9); } JSHandle Builtins::InitializeArkPrivate(const JSHandle &env) const diff --git a/ecmascript/builtins/builtins.h b/ecmascript/builtins/builtins.h index 6c03118514e1cabbbb8be44829062db91dc88512..0b021baa33c4c4843d7475e3a4830267bf67e835 100644 --- a/ecmascript/builtins/builtins.h +++ b/ecmascript/builtins/builtins.h @@ -28,7 +28,7 @@ namespace panda::ecmascript { struct ErrorParameter { EcmaEntrypoint nativeConstructor{nullptr}; EcmaEntrypoint nativeMethod{nullptr}; - const char *nativePropertyName{nullptr}; + std::string_view nativePropertyName{}; JSType nativeJstype{JSType::INVALID}; }; @@ -52,24 +52,26 @@ private: EcmaVM *vm_{nullptr}; JSHandle NewBuiltinConstructor(const JSHandle &env, const JSHandle &prototype, - EcmaEntrypoint ctorFunc, const char *name, int length, + EcmaEntrypoint ctorFunc, std::string_view name, int length, kungfu::BuiltinsStubCSigns::ID builtinId = kungfu::BuiltinsStubCSigns::INVALID) const; JSHandle NewBuiltinCjsCtor(const JSHandle &env, const JSHandle &prototype, EcmaEntrypoint ctorFunc, - const char *name, int length) const; + std::string_view name, int length) const; JSHandle NewFunction(const JSHandle &env, const JSHandle &key, EcmaEntrypoint func, int length, kungfu::BuiltinsStubCSigns::ID builtinId = kungfu::BuiltinsStubCSigns::INVALID) const; + void InitializePropertyDetector(const JSHandle &env, bool lazyInit) const; + void SetLazyAccessor(const JSHandle &object, const JSHandle &key, const JSHandle &accessor) const; void InitializeCtor(const JSHandle &env, const JSHandle &prototype, - const JSHandle &ctor, const char *name, int length) const; + const JSHandle &ctor, std::string_view name, int length) const; void InitializeGlobalObject(const JSHandle &env, const JSHandle &globalObject); @@ -87,7 +89,7 @@ private: void InitializeBigIntWithRealm(const JSHandle &realm) const; - void InitializeDate(const JSHandle &env, const JSHandle &objFuncClass) const; + void InitializeDate(const JSHandle &env, JSHandle objFuncPrototypeVal) const; void LazyInitializeDate(const JSHandle &env) const; void InitializeBoolean(const JSHandle &env, const JSHandle &primRefObjClass) const; @@ -98,7 +100,7 @@ private: void InitializeArray(const JSHandle &env, const JSHandle &objFuncPrototypeVal) const; - void InitializeTypedArray(const JSHandle &env, const JSHandle &objFuncClass) const; + void InitializeTypedArray(const JSHandle &env, JSHandle objFuncPrototypeVal) const; void LazyInitializeTypedArray(const JSHandle &env) const; void InitializeInt8Array(const JSHandle &env, const JSHandle &objFuncClass) const; @@ -147,9 +149,9 @@ private: // for Intl. JSHandle NewIntlConstructor(const JSHandle &env, const JSHandle &prototype, - EcmaEntrypoint ctorFunc, const char *name, int length); + EcmaEntrypoint ctorFunc, std::string_view name, int length); void InitializeIntlCtor(const JSHandle &env, const JSHandle &prototype, - const JSHandle &ctor, const char *name, int length); + const JSHandle &ctor, std::string_view name, int length); void InitializeIntl(const JSHandle &env, const JSHandle &objFuncPrototypeValue); void InitializeLocale(const JSHandle &env); void InitializeDateTimeFormat(const JSHandle &env); @@ -169,13 +171,13 @@ private: void LazyInitializeDisplayNames(const JSHandle &env) const; void LazyInitializeListFormat(const JSHandle &env) const; - void GeneralUpdateError(ErrorParameter *error, EcmaEntrypoint constructor, EcmaEntrypoint method, const char *name, - JSType type) const; + void GeneralUpdateError(ErrorParameter *error, EcmaEntrypoint constructor, EcmaEntrypoint method, + std::string_view name, JSType type) const; - void InitializeSet(const JSHandle &env, const JSHandle &objFuncClass) const; + void InitializeSet(const JSHandle &env, JSHandle objFuncPrototypeVal) const; void LazyInitializeSet(const JSHandle &env); - void InitializeMap(const JSHandle &env, const JSHandle &objFuncClass) const; + void InitializeMap(const JSHandle &env, JSHandle objFuncPrototypeVal) const; void LazyInitializeMap(const JSHandle &env) const; void InitializeWeakMap(const JSHandle &env, const JSHandle &objFuncClass) const; @@ -197,7 +199,7 @@ private: void InitializeJson(const JSHandle &env, const JSHandle &objFuncPrototypeVal) const; - void InitializeString(const JSHandle &env, const JSHandle &primRefObjHClass) const; + void InitializeString(const JSHandle &env, JSHandle objFuncPrototypeVal) const; void InitializeIterator(const JSHandle &env, const JSHandle &objFuncClass) const; @@ -222,7 +224,7 @@ private: void InitializeSharedArrayBuffer(const JSHandle &env, const JSHandle &objFuncClass) const; void LazyInitializeSharedArrayBuffer(const JSHandle &env) const; - void InitializeDataView(const JSHandle &env, const JSHandle &objFuncClass) const; + void InitializeDataView(const JSHandle &env, JSHandle objFuncPrototypeVal) const; void LazyInitializeDataView(const JSHandle &env) const; void InitializeForPromiseFuncClass(const JSHandle &env); @@ -245,7 +247,7 @@ private: void InitializeGenerator(const JSHandle &env, const JSHandle &objFuncClass) const; JSHandle InitializeExoticConstructor(const JSHandle &env, EcmaEntrypoint ctorFunc, - const char *name, int length); + std::string_view name, int length); void InitializePromise(const JSHandle &env, const JSHandle &promiseFuncClass); @@ -261,7 +263,7 @@ private: void InitializeDefaultExportOfScript(const JSHandle &env) const; - void SetFunction(const JSHandle &env, const JSHandle &obj, const char *key, + void SetFunction(const JSHandle &env, const JSHandle &obj, std::string_view key, EcmaEntrypoint func, int length, kungfu::BuiltinsStubCSigns::ID builtinId = kungfu::BuiltinsStubCSigns::INVALID) const; @@ -269,32 +271,58 @@ private: EcmaEntrypoint func, int length, kungfu::BuiltinsStubCSigns::ID builtinId = kungfu::BuiltinsStubCSigns::INVALID) const; + JSHandle SetAndReturnFunction(const JSHandle &env, const JSHandle &obj, + const char *key, EcmaEntrypoint func, int length, + kungfu::BuiltinsStubCSigns::ID builtinId = + kungfu::BuiltinsStubCSigns::INVALID) const ; + + JSHandle SetAndReturnFunction(const JSHandle &env, const JSHandle &obj, + const JSHandle &key, EcmaEntrypoint func, int length, + kungfu::BuiltinsStubCSigns::ID builtinId = + kungfu::BuiltinsStubCSigns::INVALID) const; + void SetFuncToObjAndGlobal(const JSHandle &env, const JSHandle &globalObject, - const JSHandle &obj, const char *key, EcmaEntrypoint func, int length); + const JSHandle &obj, std::string_view key, EcmaEntrypoint func, int length, + kungfu::BuiltinsStubCSigns::ID builtinId = kungfu::BuiltinsStubCSigns::INVALID); template void SetFunctionAtSymbol(const JSHandle &env, const JSHandle &obj, - const JSHandle &symbol, const char *name, EcmaEntrypoint func, + const JSHandle &symbol, std::string_view name, EcmaEntrypoint func, int length) const; - void SetStringTagSymbol(const JSHandle &env, const JSHandle &obj, const char *key) const; - JSHandle CreateGetter(const JSHandle &env, EcmaEntrypoint func, const char *name, - int length) const; + template + JSHandle SetAndReturnFunctionAtSymbol(const JSHandle &env, + const JSHandle &obj, + const JSHandle &symbol, + std::string_view name, + EcmaEntrypoint func, + int length) const; + + void SetStringTagSymbol(const JSHandle &env, const JSHandle &obj, + std::string_view key) const; + JSHandle CreateGetter(const JSHandle &env, EcmaEntrypoint func, + std::string_view name, int length) const; + JSHandle CreateGetter(const JSHandle &env, EcmaEntrypoint func, + JSHandle key, int length) const; - void SetConstant(const JSHandle &obj, const char *key, JSTaggedValue value) const; + void SetConstant(const JSHandle &obj, std::string_view key, JSTaggedValue value) const; - void SetGlobalThis(const JSHandle &obj, const char *key, const JSHandle &globalValue); + void SetGlobalThis(const JSHandle &obj, std::string_view key, + const JSHandle &globalValue); - void SetAttribute(const JSHandle &obj, const char *key, const char *value) const; + void SetAttribute(const JSHandle &obj, std::string_view key, std::string_view value) const; - void SetNoneAttributeProperty(const JSHandle &obj, const char *key, + void SetNoneAttributeProperty(const JSHandle &obj, std::string_view key, const JSHandle &value) const; void StrictModeForbiddenAccessCallerArguments(const JSHandle &env, const JSHandle &prototype) const; - JSHandle CreateSetter(const JSHandle &env, EcmaEntrypoint func, const char *name, - int length); + JSHandle CreateSetter(const JSHandle &env, EcmaEntrypoint func, + std::string_view name, int length) const; + JSHandle CreateSetter(const JSHandle &env, EcmaEntrypoint func, + JSHandle key, int length) const; + void SetArgumentsSharedAccessor(const JSHandle &env); void SetAccessor(const JSHandle &obj, const JSHandle &key, const JSHandle &getter, const JSHandle &setter) const; @@ -304,10 +332,12 @@ private: void InitializeGlobalRegExp(JSHandle &obj) const; // Using to initialize jsapi container JSHandle InitializeArkPrivate(const JSHandle &env) const; - void SetConstantObject(const JSHandle &obj, const char *key, JSHandle &value) const; - void SetFrozenFunction(const JSHandle &env, const JSHandle &obj, const char *key, + void SetConstantObject(const JSHandle &obj, std::string_view key, + JSHandle &value) const; + void SetFrozenFunction(const JSHandle &env, const JSHandle &obj, std::string_view key, EcmaEntrypoint func, int length) const; - void SetNonConstantObject(const JSHandle &obj, const char *key, JSHandle &value) const; + void SetNonConstantObject(const JSHandle &obj, std::string_view key, + JSHandle &value) const; friend class builtins::BuiltinsLazyCallback; }; diff --git a/ecmascript/builtins/builtins_ark_tools.cpp b/ecmascript/builtins/builtins_ark_tools.cpp index e442c22befdb9ca059cd9cdc1c954c1ec8b04116..ade668448f4b4cf56febec5080fb30dc3cbedace 100644 --- a/ecmascript/builtins/builtins_ark_tools.cpp +++ b/ecmascript/builtins/builtins_ark_tools.cpp @@ -24,6 +24,8 @@ #include "ecmascript/mem/tagged_object-inl.h" #include "ecmascript/napi/include/dfx_jsnapi.h" #include "ecmascript/mem/clock_scope.h" +#include "ecmascript/property_detector-inl.h" +#include "ecmascript/jit/jit.h" namespace panda::ecmascript::builtins { using StringHelper = base::StringHelper; @@ -38,6 +40,7 @@ JSTaggedValue BuiltinsArkTools::ObjectDump(EcmaRuntimeCallInfo *info) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle str = JSTaggedValue::ToString(thread, GetCallArg(info, 0)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // The default log level of ace_engine and js_runtime is error LOG_ECMA(ERROR) << ": " << EcmaStringAccessor(str).ToStdString(); @@ -145,6 +148,15 @@ JSTaggedValue BuiltinsArkTools::IsNotHoleProperty(EcmaRuntimeCallInfo *info) return GetTaggedBoolean(attr.IsNotHole()); } +JSTaggedValue BuiltinsArkTools::HiddenStackSourceFile(EcmaRuntimeCallInfo *info) +{ + [[maybe_unused]] DisallowGarbageCollection noGc; + ASSERT(info); + JSThread *thread = info->GetThread(); + thread->SetEnableStackSourceFile(false); + return JSTaggedValue::True(); +} + JSTaggedValue BuiltinsArkTools::ExcutePendingJob(EcmaRuntimeCallInfo *info) { ASSERT(info); @@ -178,6 +190,13 @@ JSTaggedValue BuiltinsArkTools::ForceFullGC(EcmaRuntimeCallInfo *info) return JSTaggedValue::True(); } +JSTaggedValue BuiltinsArkTools::HintGC(EcmaRuntimeCallInfo *info) +{ + ASSERT(info); + return JSTaggedValue(const_cast(info->GetThread()->GetEcmaVM()->GetHeap())-> + CheckAndTriggerHintGC()); +} + JSTaggedValue BuiltinsArkTools::RemoveAOTFlag(EcmaRuntimeCallInfo *info) { ASSERT(info); @@ -209,6 +228,7 @@ JSTaggedValue BuiltinsArkTools::StartCpuProfiler(EcmaRuntimeCallInfo *info) std::string fileName = ""; if (fileNameValue->IsString()) { JSHandle str = JSTaggedValue::ToString(thread, fileNameValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); fileName = EcmaStringAccessor(str).ToStdString() + ".cpuprofile"; } else { fileName = GetProfileName(); @@ -307,9 +327,144 @@ JSTaggedValue BuiltinsArkTools::IsPrototype(EcmaRuntimeCallInfo *info) return JSTaggedValue(objHclass->IsPrototype()); } +JSTaggedValue BuiltinsArkTools::IsRegExpReplaceDetectorValid(EcmaRuntimeCallInfo *info) +{ + ASSERT(info); + JSThread *thread = info->GetThread(); + JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + return JSTaggedValue(PropertyDetector::IsRegExpReplaceDetectorValid(env)); +} + +JSTaggedValue BuiltinsArkTools::IsSymbolIteratorDetectorValid(EcmaRuntimeCallInfo *info) +{ + ASSERT(info); + JSThread *thread = info->GetThread(); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + JSHandle kind = GetCallArg(info, 0); + if (!kind->IsString()) { + return JSTaggedValue::Undefined(); + } + JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle mapString = factory->NewFromUtf8("Map"); + if (JSTaggedValue::Equal(thread, kind, JSHandle(mapString))) { + return JSTaggedValue(PropertyDetector::IsMapIteratorDetectorValid(env)); + } + JSHandle setString = factory->NewFromUtf8("Set"); + if (JSTaggedValue::Equal(thread, kind, JSHandle(setString))) { + return JSTaggedValue(PropertyDetector::IsSetIteratorDetectorValid(env)); + } + JSHandle stringString = factory->NewFromUtf8("String"); + if (JSTaggedValue::Equal(thread, kind, JSHandle(stringString))) { + return JSTaggedValue(PropertyDetector::IsStringIteratorDetectorValid(env)); + } + JSHandle arrayString = factory->NewFromUtf8("Array"); + if (JSTaggedValue::Equal(thread, kind, JSHandle(arrayString))) { + return JSTaggedValue(PropertyDetector::IsArrayIteratorDetectorValid(env)); + } + JSHandle typedarrayString = factory->NewFromUtf8("TypedArray"); + if (JSTaggedValue::Equal(thread, kind, JSHandle(typedarrayString))) { + return JSTaggedValue(PropertyDetector::IsTypedArrayIteratorDetectorValid(env)); + } + return JSTaggedValue::Undefined(); +} + JSTaggedValue BuiltinsArkTools::TimeInUs([[maybe_unused]] EcmaRuntimeCallInfo *info) { ClockScope scope; return JSTaggedValue(scope.GetCurTime()); } +// empty function for regress-xxx test cases +JSTaggedValue BuiltinsArkTools::PrepareFunctionForOptimization([[maybe_unused]] EcmaRuntimeCallInfo *info) +{ + LOG_ECMA(INFO) << "Enter PrepareFunctionForOptimization()"; + return JSTaggedValue::Undefined(); +} + +// empty function for regress-xxx test cases +JSTaggedValue BuiltinsArkTools::OptimizeFunctionOnNextCall([[maybe_unused]] EcmaRuntimeCallInfo *info) +{ + JSThread *thread = info->GetThread(); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + JSHandle thisValue = GetCallArg(info, 0); + if (!thisValue->IsJSFunction()) { + return JSTaggedValue::Undefined(); + } + JSHandle jsFunction(thisValue); + Jit::Compile(thread->GetEcmaVM(), jsFunction); + + return JSTaggedValue::Undefined(); +} + +// empty function for regress-xxx test cases +JSTaggedValue BuiltinsArkTools::OptimizeMaglevOnNextCall([[maybe_unused]] EcmaRuntimeCallInfo *info) +{ + LOG_ECMA(INFO) << "Enter OptimizeMaglevOnNextCall()"; + return JSTaggedValue::Undefined(); +} + +// empty function for regress-xxx test cases +JSTaggedValue BuiltinsArkTools::DeoptimizeFunction([[maybe_unused]] EcmaRuntimeCallInfo *info) +{ + LOG_ECMA(INFO) << "Enter DeoptimizeFunction()"; + return JSTaggedValue::Undefined(); +} + +// empty function for regress-xxx test cases +JSTaggedValue BuiltinsArkTools::OptimizeOsr([[maybe_unused]] EcmaRuntimeCallInfo *info) +{ + LOG_ECMA(INFO) << "Enter OptimizeOsr()"; + return JSTaggedValue::Undefined(); +} + +// empty function for regress-xxx test cases +JSTaggedValue BuiltinsArkTools::NeverOptimizeFunction([[maybe_unused]] EcmaRuntimeCallInfo *info) +{ + LOG_ECMA(INFO) << "Enter NeverOptimizeFunction()"; + return JSTaggedValue::Undefined(); +} + +// empty function for regress-xxx test cases +JSTaggedValue BuiltinsArkTools::HeapObjectVerify([[maybe_unused]] EcmaRuntimeCallInfo *info) +{ + LOG_ECMA(INFO) << "Enter HeapObjectVerify()"; + return JSTaggedValue::Undefined(); +} + +// empty function for regress-xxx test cases +JSTaggedValue BuiltinsArkTools::DisableOptimizationFinalization([[maybe_unused]] EcmaRuntimeCallInfo *info) +{ + LOG_ECMA(INFO) << "Enter DisableOptimizationFinalization()"; + return JSTaggedValue::Undefined(); +} + +// empty function for regress-xxx test cases +JSTaggedValue BuiltinsArkTools::DeoptimizeNow([[maybe_unused]] EcmaRuntimeCallInfo *info) +{ + LOG_ECMA(INFO) << "Enter DeoptimizeNow()"; + return JSTaggedValue::Undefined(); +} + +// empty function for regress-xxx test cases +JSTaggedValue BuiltinsArkTools::WaitForBackgroundOptimization([[maybe_unused]] EcmaRuntimeCallInfo *info) +{ + LOG_ECMA(INFO) << "Enter WaitForBackgroundOptimization()"; + return JSTaggedValue::Undefined(); +} + +// empty function for regress-xxx test cases +JSTaggedValue BuiltinsArkTools::Gc([[maybe_unused]] EcmaRuntimeCallInfo *info) +{ + LOG_ECMA(INFO) << "Enter Gc()"; + return JSTaggedValue::Undefined(); +} + +// empty function for pgoAssertType +JSTaggedValue BuiltinsArkTools::PGOAssertType([[maybe_unused]] EcmaRuntimeCallInfo *info) +{ + LOG_ECMA(INFO) << "Enter PGOAssertType"; + return JSTaggedValue::Undefined(); +} } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_ark_tools.h b/ecmascript/builtins/builtins_ark_tools.h index 94eea9252ff4207498689490c0652d0ac063aa96..989abfe64e757c1efb82dd8eb5b52347bb13932a 100644 --- a/ecmascript/builtins/builtins_ark_tools.h +++ b/ecmascript/builtins/builtins_ark_tools.h @@ -19,6 +19,57 @@ #include "ecmascript/base/builtins_base.h" #include "ecmascript/js_thread.h" +// List of functions in ArkTools, extension of ArkTS engine. +// V(name, func, length, stubIndex) +// where BuiltinsArkTools::func refers to the native implementation of ArkTools[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +#define BUILTIN_ARK_TOOLS_FUNCTIONS_COMMON(V) \ + V("compareHClass", CompareHClass, 2, INVALID) \ + V("dumpHClass", DumpHClass, 1, INVALID) \ + V("excutePendingJob", ExcutePendingJob, 0, INVALID) \ + V("forceFullGC", ForceFullGC, 0, INVALID) \ + V("getHClass", GetHClass, 1, INVALID) \ + V("getLexicalEnv", GetLexicalEnv, 1, INVALID) \ + V("hasTSSubtyping", HasTSSubtyping, 1, INVALID) \ + V("hiddenStackSourceFile", HiddenStackSourceFile, 0, INVALID) \ + V("hintGC", HintGC, 0, INVALID) \ + V("isNotHoleProperty", IsNotHoleProperty, 2, INVALID) \ + V("isPrototype", IsPrototype, 1, INVALID) \ + V("isRegExpReplaceDetectorValid", IsRegExpReplaceDetectorValid, 0, INVALID) \ + V("isSymbolIteratorDetectorValid", IsSymbolIteratorDetectorValid, 1, INVALID) \ + V("isTSHClass", IsTSHClass, 1, INVALID) \ + V("pgoAssertType", PGOAssertType, 2, INVALID) \ + V("print", ObjectDump, 0, INVALID) \ + V("removeAOTFlag", RemoveAOTFlag, 1, INVALID) \ + V("timeInUs", TimeInUs, 0, INVALID) + +#define BUILTIN_ARK_TOOLS_FUNCTIONS_REGRESS(V) \ + V("prepareFunctionForOptimization", PrepareFunctionForOptimization, 1, INVALID) \ + V("optimizeFunctionOnNextCall", OptimizeFunctionOnNextCall, 1, INVALID) \ + V("optimizeMaglevOnNextCall", OptimizeMaglevOnNextCall, 1, INVALID) \ + V("deoptimizeFunction", DeoptimizeFunction, 1, INVALID) \ + V("optimizeOsr", OptimizeOsr, 1, INVALID) \ + V("neverOptimizeFunction", NeverOptimizeFunction, 1, INVALID) \ + V("heapObjectVerify", HeapObjectVerify, 1, INVALID) \ + V("disableOptimizationFinalization", DisableOptimizationFinalization, 0, INVALID) \ + V("deoptimizeNow", DeoptimizeNow, 0, INVALID) \ + V("deoptimize_now", DeoptimizeNow, 0, INVALID) \ + V("waitForBackgroundOptimization", WaitForBackgroundOptimization, 0, INVALID) \ + V("gc", Gc, 0, INVALID) + +#ifdef ECMASCRIPT_SUPPORT_CPUPROFILER +#define BUILTIN_ARK_TOOLS_FUNCTIONS_CPUPROFILER(V) \ + V("startCpuProf", StartCpuProfiler, 0, INVALID) \ + V("stopCpuProf", StopCpuProfiler, 0, INVALID) +#else +#define BUILTIN_ARK_TOOLS_FUNCTIONS_CPUPROFILER(V) // Nothing +#endif + +#define BUILTIN_ARK_TOOLS_FUNCTIONS(V) \ + BUILTIN_ARK_TOOLS_FUNCTIONS_COMMON(V) \ + BUILTIN_ARK_TOOLS_FUNCTIONS_CPUPROFILER(V) \ + BUILTIN_ARK_TOOLS_FUNCTIONS_REGRESS(V) + namespace panda::ecmascript::builtins { class BuiltinsArkTools : public base::BuiltinsBase { public: @@ -45,6 +96,10 @@ public: static JSTaggedValue ForceFullGC(EcmaRuntimeCallInfo *info); + static JSTaggedValue HintGC(EcmaRuntimeCallInfo *info); + + static JSTaggedValue HiddenStackSourceFile(EcmaRuntimeCallInfo *info); + static JSTaggedValue RemoveAOTFlag(EcmaRuntimeCallInfo *info); #if defined(ECMASCRIPT_SUPPORT_CPUPROFILER) @@ -59,8 +114,50 @@ public: // ArkTools.isPrototype(object) static JSTaggedValue IsPrototype(EcmaRuntimeCallInfo *info); + static JSTaggedValue IsRegExpReplaceDetectorValid(EcmaRuntimeCallInfo *info); + + static JSTaggedValue IsSymbolIteratorDetectorValid(EcmaRuntimeCallInfo *info); + static JSTaggedValue TimeInUs(EcmaRuntimeCallInfo *info); + + static JSTaggedValue PrepareFunctionForOptimization(EcmaRuntimeCallInfo *info); + + static JSTaggedValue OptimizeFunctionOnNextCall(EcmaRuntimeCallInfo *info); + + static JSTaggedValue OptimizeMaglevOnNextCall(EcmaRuntimeCallInfo *info); + + static JSTaggedValue DeoptimizeFunction(EcmaRuntimeCallInfo *info); + + static JSTaggedValue OptimizeOsr(EcmaRuntimeCallInfo *info); + + static JSTaggedValue NeverOptimizeFunction(EcmaRuntimeCallInfo *info); + + static JSTaggedValue HeapObjectVerify(EcmaRuntimeCallInfo *info); + + static JSTaggedValue DisableOptimizationFinalization(EcmaRuntimeCallInfo *info); + + static JSTaggedValue DeoptimizeNow(EcmaRuntimeCallInfo *info); + + static JSTaggedValue WaitForBackgroundOptimization(EcmaRuntimeCallInfo *info); + + static JSTaggedValue Gc(EcmaRuntimeCallInfo *info); + + static JSTaggedValue PGOAssertType(EcmaRuntimeCallInfo *info); + + static Span GetArkToolsFunctions() + { + return Span(ARK_TOOLS_FUNCTIONS); + } + +private: +#define BUILTINS_ARK_TOOLS_FUNCTION_ENTRY(name, method, length, id) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsArkTools::method, length, kungfu::BuiltinsStubCSigns::id), + + static constexpr std::array ARK_TOOLS_FUNCTIONS = { + BUILTIN_ARK_TOOLS_FUNCTIONS(BUILTINS_ARK_TOOLS_FUNCTION_ENTRY) + }; +#undef BUILTINS_ARK_TOOLS_FUNCTION_ENTRY }; } // namespace panda::ecmascript::builtins -#endif // ECMASCRIPT_BUILTINS_BUILTINS_ARK_TOOLS_H +#endif // ECMASCRIPT_BUILTINS_BUILTINS_ARK_TOOLS_H \ No newline at end of file diff --git a/ecmascript/builtins/builtins_array.cpp b/ecmascript/builtins/builtins_array.cpp index ebc838c025cd998b720097d84bb290de1ce43c14..dff484913569057dc62e2ab67c18745816c3adca 100644 --- a/ecmascript/builtins/builtins_array.cpp +++ b/ecmascript/builtins/builtins_array.cpp @@ -21,6 +21,7 @@ #include "ecmascript/base/number_helper.h" #include "ecmascript/base/typed_array_helper-inl.h" #include "ecmascript/base/typed_array_helper.h" +#include "ecmascript/ecma_macros.h" #include "ecmascript/ecma_runtime_call_info.h" #include "ecmascript/ecma_string.h" #include "ecmascript/global_env.h" @@ -29,8 +30,10 @@ #include "ecmascript/js_array_iterator.h" #include "ecmascript/js_function.h" #include "ecmascript/js_handle.h" +#include "ecmascript/js_map_iterator.h" #include "ecmascript/js_stable_array.h" #include "ecmascript/js_tagged_number.h" +#include "ecmascript/js_tagged_value.h" #include "ecmascript/object_factory.h" #include "ecmascript/object_fast_operator-inl.h" #include "ecmascript/tagged_array-inl.h" @@ -65,14 +68,14 @@ JSTaggedValue BuiltinsArray::ArrayConstructor(EcmaRuntimeCallInfo *argv) // 22.1.1.1 Array ( ) if (argc == 0) { // 6. Return ArrayCreate(0, proto). - return JSTaggedValue(JSArray::ArrayCreate(thread, JSTaggedNumber(0), newTarget).GetObject()); + return JSArray::ArrayCreate(thread, JSTaggedNumber(0), newTarget).GetTaggedValue(); } // 22.1.1.2 Array(len) if (argc == 1) { // 6. Let array be ArrayCreate(0, proto). - uint32_t newLen = 0; - JSHandle newArrayHandle(JSArray::ArrayCreate(thread, JSTaggedNumber(newLen), newTarget)); + JSHandle newArrayHandle(JSArray::ArrayCreate(thread, JSTaggedNumber(0), newTarget)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle len = GetCallArg(argv, 0); // 7. If Type(len) is not Number, then // a. Let defineStatus be CreateDataProperty(array, "0", len). @@ -83,6 +86,7 @@ JSTaggedValue BuiltinsArray::ArrayConstructor(EcmaRuntimeCallInfo *argv) // b. If intLen ≠ len, throw a RangeError exception. // 9. Let setStatus be Set(array, "length", intLen, true). // 10. Assert: setStatus is not an abrupt completion. + uint32_t newLen = 0; if (!len->IsNumber()) { JSHandle key0 = thread->GlobalConstants()->GetHandledZeroString(); JSObject::CreateDataProperty(thread, newArrayHandle, key0, len); @@ -91,10 +95,10 @@ JSTaggedValue BuiltinsArray::ArrayConstructor(EcmaRuntimeCallInfo *argv) newLen = JSTaggedValue::ToUint32(thread, len); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (JSTaggedNumber(len.GetTaggedValue()).GetNumber() != newLen) { - THROW_RANGE_ERROR_AND_RETURN(thread, "The length is out of range.", JSTaggedValue::Exception()); + THROW_RANGE_ERROR_AND_RETURN(thread, "Invalid array length", JSTaggedValue::Exception()); } } - JSArray::SetCapacity(thread, newArrayHandle, 0, newLen); + JSArray::SetCapacity(thread, newArrayHandle, 0, newLen, true); // 11. Return array. return newArrayHandle.GetTaggedValue(); @@ -107,7 +111,6 @@ JSTaggedValue BuiltinsArray::ArrayConstructor(EcmaRuntimeCallInfo *argv) THROW_TYPE_ERROR_AND_RETURN(thread, "Failed to create array.", JSTaggedValue::Exception()); } JSHandle newArrayHandle(thread, newArray); - // 8. Let k be 0. // 9. Let items be a zero-origined List containing the argument items in order. // 10. Repeat, while k < numberOfArgs @@ -167,6 +170,11 @@ JSTaggedValue BuiltinsArray::From(EcmaRuntimeCallInfo *argv) // 6. If usingIterator is not undefined, then JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); if (!usingIterator->IsUndefined()) { + // Fast path for MapIterator + if (!mapping && items->IsJSMapIterator()) { + return JSMapIterator::MapIteratorToList(thread, items, usingIterator); + } + // a. If IsConstructor(C) is true, then // i. Let A be Construct(C). // b. Else, @@ -209,7 +217,7 @@ JSTaggedValue BuiltinsArray::From(EcmaRuntimeCallInfo *argv) if (next->IsFalse()) { JSTaggedValue::SetProperty(thread, JSHandle::Cast(newArrayHandle), lengthKey, key, true); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - return JSTaggedValue(newArrayHandle.GetTaggedValue()); + return newArrayHandle.GetTaggedValue(); } // v. Let nextValue be IteratorValue(next). JSHandle nextValue = JSIterator::IteratorValue(thread, next); @@ -221,15 +229,15 @@ JSTaggedValue BuiltinsArray::From(EcmaRuntimeCallInfo *argv) // 3. Let mappedValue be mappedValue.[[value]]. // viii. Else, let mappedValue be nextValue. if (mapping) { - const int32_t argsLength = 2; // 2: «nextValue, k» + const uint32_t argsLength = 2; // 2: «nextValue, k» EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, mapfn, thisArgHandle, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(nextValue.GetTaggedValue(), key.GetTaggedValue()); JSTaggedValue callResult = JSFunction::Call(info); - mapValue.Update(callResult); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, - JSIterator::IteratorClose(thread, iterator, mapValue).GetTaggedValue()); + JSIterator::IteratorClose(thread, iterator, mapValue).GetTaggedValue()); + mapValue.Update(callResult); } else { mapValue.Update(nextValue.GetTaggedValue()); } @@ -239,7 +247,7 @@ JSTaggedValue BuiltinsArray::From(EcmaRuntimeCallInfo *argv) JSHandle defineStatus( thread, JSTaggedValue(JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, key, mapValue))); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, - JSIterator::IteratorClose(thread, iterator, defineStatus).GetTaggedValue()); + JSIterator::IteratorClose(thread, iterator, defineStatus).GetTaggedValue()); k++; } } @@ -290,7 +298,7 @@ JSTaggedValue BuiltinsArray::From(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (mapping) { key.Update(JSTaggedValue(k)); - const int32_t argsLength = 2; // 2: «kValue, k» + const uint32_t argsLength = 2; // 2: «kValue, k» EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, mapfn, thisArgHandle, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -298,7 +306,6 @@ JSTaggedValue BuiltinsArray::From(EcmaRuntimeCallInfo *argv) JSTaggedValue callResult = JSFunction::Call(info); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); mapValue.Update(callResult); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } else { mapValue.Update(kValue.GetTaggedValue()); } @@ -404,132 +411,107 @@ JSTaggedValue BuiltinsArray::Concat(EcmaRuntimeCallInfo *argv) BUILTINS_API_TRACE(argv->GetThread(), Array, Concat); JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); - uint32_t argc = argv->GetArgsNumber(); + int argc = static_cast(argv->GetArgsNumber()); // 1. Let O be ToObject(this value). JSHandle thisHandle = GetThis(argv); JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); - // 2. ReturnIfAbrupt(O). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisObjVal(thisObjHandle); - // 3. Let A be ArraySpeciesCreate(O, 0). + // 2. Let A be ArraySpeciesCreate(O, 0). uint32_t arrayLen = 0; JSTaggedValue newArray = JSArray::ArraySpeciesCreate(thread, thisObjHandle, JSTaggedNumber(arrayLen)); - // 4. ReturnIfAbrupt(A). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle newArrayHandle(thread, newArray); - // 5. Let n be 0. + JSHandle lengthKey = thread->GlobalConstants()->GetHandledLengthString(); + // Fast path + int64_t arrLen = ArrayHelper::GetArrayLength(thread, thisObjVal); + if (arrLen == 0 && argc == 1) { + JSHandle argHandle = GetCallArg(argv, 0); + int64_t argLen = ArrayHelper::GetArrayLength(thread, argHandle); + if (argLen == 0 && argHandle->IsJSArray()) { + JSHandle lenHandle(thread, JSTaggedValue(arrLen)); + JSTaggedValue::SetProperty(thread, JSHandle::Cast(newArrayHandle), + lengthKey, lenHandle, true); + return newArrayHandle.GetTaggedValue(); + } + } + + // 3. Let n be 0. int64_t n = 0; + JSMutableHandle ele(thread, JSTaggedValue::Undefined()); JSMutableHandle fromKey(thread, JSTaggedValue::Undefined()); JSMutableHandle toKey(thread, JSTaggedValue::Undefined()); - bool isSpreadable = ArrayHelper::IsConcatSpreadable(thread, thisHandle); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (isSpreadable) { - int64_t thisLen = ArrayHelper::GetArrayLength(thread, thisObjVal); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (n + thisLen > base::MAX_SAFE_INTEGER) { - THROW_TYPE_ERROR_AND_RETURN(thread, "out of range.", JSTaggedValue::Exception()); - } - int64_t k = 0; - if (thisObjVal->IsStableJSArray(thread)) { - JSStableArray::Concat(thread, newArrayHandle, thisObjHandle, k, n); - } - while (k < thisLen) { - fromKey.Update(JSTaggedValue(k)); - toKey.Update(JSTaggedValue(n)); - bool exists = JSTaggedValue::HasProperty(thread, thisObjVal, fromKey); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (exists) { - JSHandle fromValHandle = JSArray::FastGetPropertyByValue(thread, thisObjVal, fromKey); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, toKey, fromValHandle); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - } - n++; - k++; - } - } else { - if (n >= base::MAX_SAFE_INTEGER) { - THROW_TYPE_ERROR_AND_RETURN(thread, "out of range.", JSTaggedValue::Exception()); + // 4. Prepend O to items. + // 5. For each element E of items, do + for (int i = -1; i < argc; i++) { + if (i < 0) { + ele.Update(thisObjHandle.GetTaggedValue()); + } else { + ele.Update(GetCallArg(argv, i)); } - JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, n, thisObjVal); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - n++; - } - // 7. Repeat, while items is not empty - for (uint32_t i = 0; i < argc; i++) { - // a. Remove the first element from items and let E be the value of the element - JSHandle addHandle = GetCallArg(argv, i); + // a. Let spreadable be ? IsConcatSpreadable(E). + bool isSpreadable = ArrayHelper::IsConcatSpreadable(thread, ele); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - JSHandle addObjHandle(addHandle); - - // b. Let spreadable be IsConcatSpreadable(E). - isSpreadable = ArrayHelper::IsConcatSpreadable(thread, addHandle); - // c. ReturnIfAbrupt(spreadable). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // d. If spreadable is true, then + // b. If spreadable is true, then if (isSpreadable) { - // ii. Let len be ToLength(Get(E, "length")). - int64_t len = ArrayHelper::GetArrayLength(thread, JSHandle::Cast(addObjHandle)); - // iii. ReturnIfAbrupt(len). + // i. Let k be 0. + // ii. Let len be ? LengthOfArrayLike(E). + // iii. If n + len > 253 - 1, throw a TypeError exception. + int64_t len = ArrayHelper::GetArrayLength(thread, ele); + int64_t k = 0; RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // iv. If n + len > 253-1, throw a TypeError exception. if (n + len > base::MAX_SAFE_INTEGER) { THROW_TYPE_ERROR_AND_RETURN(thread, "out of range.", JSTaggedValue::Exception()); } - int64_t k = 0; - JSHandle addObjVal(addObjHandle); - if (addObjVal->IsStableJSArray(thread)) { - JSStableArray::Concat(thread, newArrayHandle, addObjHandle, k, n); + + if (ele->IsStableJSArray(thread)) { + JSStableArray::Concat(thread, newArrayHandle, JSHandle::Cast(ele), k, n); } - // v. Repeat, while k < len + // iv. Repeat, while k < len, while (k < len) { - fromKey.Update(JSTaggedValue(k)); - toKey.Update(JSTaggedValue(n)); // 1. Let P be ToString(k). // 2. Let exists be HasProperty(E, P). - // 4. If exists is true, then - bool exists = JSTaggedValue::HasProperty(thread, JSHandle::Cast(addObjHandle), fromKey); + // 3. If exists is true, then + fromKey.Update(JSTaggedValue::ToString(thread, JSTaggedValue(k))); + toKey.Update(JSTaggedValue(n)); + bool exists = JSTaggedValue::HasProperty(thread, ele, fromKey); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (exists) { // a. Let subElement be Get(E, P). JSHandle fromValHandle = - JSArray::FastGetPropertyByValue(thread, addHandle, fromKey); - // b. ReturnIfAbrupt(subElement). + JSArray::FastGetPropertyByValue(thread, ele, fromKey); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // b. Perform ? CreateDataPropertyOrThrow(A, ! ToString(𝔽(n)), subElement). JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, toKey, fromValHandle); - // d. ReturnIfAbrupt(status). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } - // 5. Increase n by 1. - // 6. Increase k by 1. + // 4. Set n to n + 1. + // 5. Set k to k + 1. n++; k++; } - } else { // e. Else E is added as a single item rather than spread, - // i. If n≥253-1, throw a TypeError exception. + //c. Else + } else { + // ii. If n ≥ 253 - 1, throw a TypeError exception. if (n >= base::MAX_SAFE_INTEGER) { THROW_TYPE_ERROR_AND_RETURN(thread, "out of range.", JSTaggedValue::Exception()); } - // ii. Let status be CreateDataPropertyOrThrow (A, ToString(n), E). - JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, n, addHandle); - // iii. ReturnIfAbrupt(status). + // iii. Perform ? CreateDataPropertyOrThrow(A, ! ToString(𝔽(n)), E). + // iv. Set n to n + 1. + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, n, ele); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - - // iv. Increase n by 1. n++; } } - // 8. Let setStatus be Set(A, "length", n, true). - JSHandle lengthKey = thread->GlobalConstants()->GetHandledLengthString(); + // 6. Perform ? Set(A, "length", 𝔽(n), true). JSHandle lenHandle(thread, JSTaggedValue(n)); JSTaggedValue::SetProperty(thread, JSHandle::Cast(newArrayHandle), lengthKey, lenHandle, true); - // 9. ReturnIfAbrupt(setStatus). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // 10. Return A. + // 7. Return A. return newArrayHandle.GetTaggedValue(); } @@ -644,6 +626,7 @@ JSTaggedValue BuiltinsArray::CopyWithin(EcmaRuntimeCallInfo *argv) } else { if (thisObjVal->IsJSProxy()) { toKey.Update(JSTaggedValue::ToString(thread, toKey).GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } JSTaggedValue::DeletePropertyOrThrow(thread, thisObjVal, toKey); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -726,7 +709,7 @@ JSTaggedValue BuiltinsArray::Every(EcmaRuntimeCallInfo *argv) } } JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); - const int32_t argsLength = 3; // 3: «kValue, k, O» + const uint32_t argsLength = 3; // 3: «kValue, k, O» while (k < len) { bool exists = JSTaggedValue::HasProperty(thread, thisObjVal, k); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -762,6 +745,18 @@ JSTaggedValue BuiltinsArray::Fill(EcmaRuntimeCallInfo *argv) // 1. Let O be ToObject(this value). JSHandle thisHandle = GetThis(argv); JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + + if (thisHandle->IsJSArray()) { + bool isDictionary = thisObjHandle->GetJSHClass()->IsDictionaryElement(); + if (isDictionary) { + uint32_t length = JSArray::Cast(*thisObjHandle)->GetLength(); + uint32_t size = thisObjHandle->GetNumberOfElements(); + if (length - size > JSObject::MAX_GAP) { + JSObject::TryOptimizeAsFastElements(thread, thisObjHandle); + } + } + } + // 2. ReturnIfAbrupt(O). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisObjVal(thisObjHandle); @@ -839,6 +834,13 @@ JSTaggedValue BuiltinsArray::Fill(EcmaRuntimeCallInfo *argv) return thisObjHandle.GetTaggedValue(); } } + if (thisHandle->IsTypedArray()) { + bool result = JSTypedArray::FastTypedArrayFill(thread, thisHandle, value, start, end); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (result) { + return thisObjHandle.GetTaggedValue(); + } + } int64_t k = start; while (k < end) { key.Update(JSTaggedValue(k)); @@ -911,7 +913,7 @@ JSTaggedValue BuiltinsArray::Filter(EcmaRuntimeCallInfo *argv) JSStableArray::Filter(newArrayHandle, thisObjHandle, argv, k, toIndex); } JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); - const int32_t argsLength = 3; // 3: «kValue, k, O» + const uint32_t argsLength = 3; // 3: «kValue, k, O» JSTaggedValue callResult = GetTaggedBoolean(true); while (k < len) { bool exists = JSTaggedValue::HasProperty(thread, thisObjVal, k); @@ -925,6 +927,7 @@ JSTaggedValue BuiltinsArray::Filter(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(kValue.GetTaggedValue(), key.GetTaggedValue(), thisObjVal.GetTaggedValue()); callResult = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (callResult.ToBoolean()) { toIndexHandle.Update(JSTaggedValue(toIndex)); JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, toIndexHandle, kValue); @@ -983,16 +986,15 @@ JSTaggedValue BuiltinsArray::Find(EcmaRuntimeCallInfo *argv) JSHandle kValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, k); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); key.Update(JSTaggedValue(k)); - const int32_t argsLength = 3; // 3: «kValue, k, O» + const uint32_t argsLength = 3; // 3: «kValue, k, O» JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, callbackFnHandle, thisArgHandle, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(kValue.GetTaggedValue(), key.GetTaggedValue(), thisObjVal.GetTaggedValue()); JSTaggedValue callResult = JSFunction::Call(info); - bool boolResult = callResult.ToBoolean(); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (boolResult) { + if (callResult.ToBoolean()) { return kValue.GetTaggedValue(); } k++; @@ -1044,13 +1046,14 @@ JSTaggedValue BuiltinsArray::FindIndex(EcmaRuntimeCallInfo *argv) JSTaggedValue callResult = GetTaggedBoolean(true); if (thisObjVal->IsStableJSArray(thread)) { callResult = JSStableArray::HandleFindIndexOfStable(thread, thisObjHandle, callbackFnHandle, thisArgHandle, k); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (callResult.ToBoolean()) { return GetTaggedDouble(k); } } JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); - const int32_t argsLength = 3; // 3: «kValue, k, O» + const uint32_t argsLength = 3; // 3: «kValue, k, O» while (k < len) { JSHandle kValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, k); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -1060,6 +1063,7 @@ JSTaggedValue BuiltinsArray::FindIndex(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(kValue.GetTaggedValue(), key.GetTaggedValue(), thisObjVal.GetTaggedValue()); callResult = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (callResult.ToBoolean()) { return GetTaggedDouble(k); } @@ -1113,9 +1117,10 @@ JSTaggedValue BuiltinsArray::ForEach(EcmaRuntimeCallInfo *argv) JSMutableHandle key(thread, JSTaggedValue::Undefined()); uint32_t k = 0; if (thisObjVal->IsStableJSArray(thread)) { - JSStableArray::HandleforEachOfStable(thread, thisObjHandle, callbackFnHandle, thisArgHandle, k); + JSStableArray::HandleforEachOfStable(thread, thisObjHandle, callbackFnHandle, thisArgHandle, len, k); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } - const int32_t argsLength = 3; // 3: «kValue, k, O» + const uint32_t argsLength = 3; // 3: «kValue, k, O» JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); while (k < len) { bool exists = JSTaggedValue::HasProperty(thread, thisObjVal, k); @@ -1138,89 +1143,91 @@ JSTaggedValue BuiltinsArray::ForEach(EcmaRuntimeCallInfo *argv) return JSTaggedValue::Undefined(); } -// 22.1.3.11 Array.prototype.indexOf ( searchElement [ , fromIndex ] ) -JSTaggedValue BuiltinsArray::IndexOf(EcmaRuntimeCallInfo *argv) +JSTaggedValue BuiltinsArray::IndexOfStable( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle) { - ASSERT(argv); - BUILTINS_API_TRACE(argv->GetThread(), Array, IndexOf); - JSThread *thread = argv->GetThread(); - [[maybe_unused]] EcmaHandleScope handleScope(thread); - + int64_t length = JSHandle::Cast(thisHandle)->GetArrayLength(); + if (length == 0) { + return JSTaggedValue(-1); + } + int64_t fromIndex = 0; uint32_t argc = argv->GetArgsNumber(); + // 2: [target, fromIndex]. Note that fromIndex is missing in most usage cases. + if (UNLIKELY(argc >= 2)) { + JSHandle fromIndexHandle = argv->GetCallArg(1); + fromIndex = ArrayHelper::GetStartIndex(thread, fromIndexHandle, length); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // Slow path when fromIndex is obtained from an ECMAObject + // due to potential side effects in its 'toString' and 'valueOf' methods which modify the array object. + if (UNLIKELY(fromIndexHandle->IsECMAObject())) { + return IndexOfSlowPath(argv, thread, thisHandle, length, fromIndex); + } + } + if (fromIndex >= length) { + return JSTaggedValue(-1); + } + JSHandle target = GetCallArg(argv, 0); + return JSStableArray::IndexOf( + thread, thisHandle, target, static_cast(fromIndex), static_cast(length)); +} +JSTaggedValue BuiltinsArray::IndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle) +{ // 1. Let O be ToObject(this value). - JSHandle thisHandle = GetThis(argv); JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); // 2. ReturnIfAbrupt(O). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisObjVal(thisObjHandle); - - JSHandle searchElement = GetCallArg(argv, 0); - // 3. Let len be ToLength(Get(O, "length")). - int64_t len = ArrayHelper::GetLength(thread, thisObjVal); + int64_t length = ArrayHelper::GetLength(thread, thisObjVal); // 4. ReturnIfAbrupt(len). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // 5. If len is 0, return −1. - if (len == 0) { - return GetTaggedInt(-1); + if (length == 0) { + return JSTaggedValue(-1); } - // 6. If argument fromIndex was passed let n be ToInteger(fromIndex); else let n be 0. - double fromIndex = 0; - if (argc > 1) { - JSHandle msg1 = GetCallArg(argv, 1); - JSTaggedNumber fromIndexTemp = JSTaggedValue::ToNumber(thread, msg1); - // 7. ReturnIfAbrupt(n). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - fromIndex = base::NumberHelper::TruncateDouble(fromIndexTemp.GetNumber()); - } + int64_t fromIndex = ArrayHelper::GetStartIndexFromArgs(thread, argv, 1, length); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + return IndexOfSlowPath(argv, thread, thisObjVal, length, fromIndex); +} - // 8. If n ≥ len, return −1. - if (fromIndex >= len) { - return GetTaggedInt(-1); +JSTaggedValue BuiltinsArray::IndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisObjVal, + int64_t length, int64_t fromIndex) +{ + if (fromIndex >= length) { + return JSTaggedValue(-1); } - - // 9. If n ≥ 0, then - // a. Let k be n. - // 10. Else n<0, - // a. Let k be len - abs(n). - // b. If k < 0, let k be 0. - int64_t from = (fromIndex >= 0) ? fromIndex : ((len + fromIndex) >= 0 ? len + fromIndex : 0); - - // if it is stable array, we can go to fast path - if (thisObjVal->IsStableJSArray(thread)) { - return JSStableArray::IndexOf(thread, thisObjVal, searchElement, static_cast(from), - static_cast(len)); - } - - // 11. Repeat, while k key(thread, JSTaggedValue::Undefined()); - while (from < len) { - key.Update(JSTaggedValue(from)); - bool exists = (thisHandle->IsTypedArray() || JSTaggedValue::HasProperty(thread, thisObjVal, key)); + JSMutableHandle keyHandle(thread, JSTaggedValue::Undefined()); + JSHandle target = GetCallArg(argv, 0); + // 11. Repeat, while k < len + for (int64_t curIndex = fromIndex; curIndex < length; ++curIndex) { + keyHandle.Update(JSTaggedValue(curIndex)); + bool found = ArrayHelper::ElementIsStrictEqualTo(thread, thisObjVal, keyHandle, target); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (exists) { - JSHandle kValueHandle = JSArray::FastGetPropertyByValue(thread, thisObjVal, key); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (JSTaggedValue::StrictEqual(thread, searchElement, kValueHandle)) { - return GetTaggedDouble(from); - } + if (UNLIKELY(found)) { + return JSTaggedValue(curIndex); } - from++; } - // 12. Return -1. - return GetTaggedInt(-1); + return JSTaggedValue(-1); +} + +// 22.1.3.11 Array.prototype.indexOf ( searchElement [ , fromIndex ] ) +JSTaggedValue BuiltinsArray::IndexOf(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, IndexOf); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + JSHandle thisHandle = GetThis(argv); + if (thisHandle->IsStableJSArray(thread)) { + return IndexOfStable(argv, thread, thisHandle); + } + return IndexOfSlowPath(argv, thread, thisHandle); } // 22.1.3.12 Array.prototype.join (separator) @@ -1231,21 +1238,26 @@ JSTaggedValue BuiltinsArray::Join(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisHandle = GetThis(argv); + auto factory = thread->GetEcmaVM()->GetFactory(); + auto context = thread->GetCurrentEcmaContext(); + bool noCircular = context->JoinStackPushFastPath(thisHandle); + if (!noCircular) { + return factory->GetEmptyString().GetTaggedValue(); + } if (thisHandle->IsStableJSArray(thread)) { return JSStableArray::Join(JSHandle::Cast(thisHandle), argv); } - auto factory = thread->GetEcmaVM()->GetFactory(); // 1. Let O be ToObject(this value). JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); // 2. ReturnIfAbrupt(O). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + RETURN_EXCEPTION_AND_POP_JOINSTACK(thread, thisHandle); JSHandle thisObjVal(thisObjHandle); // 3. Let len be ToLength(Get(O, "length")). int64_t len = ArrayHelper::GetLength(thread, thisObjVal); // 4. ReturnIfAbrupt(len). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + RETURN_EXCEPTION_AND_POP_JOINSTACK(thread, thisHandle); // 5. If separator is undefined, let separator be the single-element String ",". // 6. Let sep be ToString(separator). @@ -1258,7 +1270,7 @@ JSTaggedValue BuiltinsArray::Join(EcmaRuntimeCallInfo *argv) JSHandle sepStringHandle = JSTaggedValue::ToString(thread, sepHandle); // 7. ReturnIfAbrupt(sep). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + RETURN_EXCEPTION_AND_POP_JOINSTACK(thread, thisHandle); std::u16string sepStr = EcmaStringAccessor(sepStringHandle).ToU16String(); // 8. If len is zero, return the empty String. @@ -1278,22 +1290,19 @@ JSTaggedValue BuiltinsArray::Join(EcmaRuntimeCallInfo *argv) // e. Let R be a String value produced by concatenating S and next. // f. Increase k by 1. std::u16string concatStr; - std::u16string concatStrNew; for (int64_t k = 0; k < len; k++) { std::u16string nextStr; JSHandle element = JSArray::FastGetPropertyByValue(thread, thisObjVal, k); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + RETURN_EXCEPTION_AND_POP_JOINSTACK(thread, thisHandle); if (!element->IsUndefined() && !element->IsNull()) { JSHandle nextStringHandle = JSTaggedValue::ToString(thread, element); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + RETURN_EXCEPTION_AND_POP_JOINSTACK(thread, thisHandle); nextStr = EcmaStringAccessor(nextStringHandle).ToU16String(); } if (k > 0) { - concatStrNew = base::StringHelper::Append(concatStr, sepStr); - concatStr = base::StringHelper::Append(concatStrNew, nextStr); - continue; + concatStr.append(sepStr); } - concatStr = base::StringHelper::Append(concatStr, nextStr); + concatStr.append(nextStr); } // 14. Return R. @@ -1301,6 +1310,7 @@ JSTaggedValue BuiltinsArray::Join(EcmaRuntimeCallInfo *argv) auto *char16tData = const_cast(constChar16tData); auto *uint16tData = reinterpret_cast(char16tData); uint32_t u16strSize = concatStr.size(); + context->JoinStackPopFastPath(thisHandle); return factory->NewFromUtf16Literal(uint16tData, u16strSize).GetTaggedValue(); } @@ -1321,82 +1331,90 @@ JSTaggedValue BuiltinsArray::Keys(EcmaRuntimeCallInfo *argv) return iter.GetTaggedValue(); } -// 22.1.3.14 Array.prototype.lastIndexOf ( searchElement [ , fromIndex ] ) -JSTaggedValue BuiltinsArray::LastIndexOf(EcmaRuntimeCallInfo *argv) +JSTaggedValue BuiltinsArray::LastIndexOfStable( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle) { - ASSERT(argv); - BUILTINS_API_TRACE(argv->GetThread(), Array, LastIndexOf); - JSThread *thread = argv->GetThread(); - [[maybe_unused]] EcmaHandleScope handleScope(thread); - + int64_t length = JSHandle::Cast(thisHandle)->GetArrayLength(); + if (length == 0) { + return JSTaggedValue(-1); + } + int64_t fromIndex = length - 1; uint32_t argc = argv->GetArgsNumber(); + // 2: [target, fromIndex]. Note that fromIndex is missing in most usage cases. + if (UNLIKELY(argc >= 2)) { + JSHandle fromIndexHandle = argv->GetCallArg(1); + fromIndex = ArrayHelper::GetLastStartIndex(thread, fromIndexHandle, length); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // Slow path when fromIndex is obtained from an ECMAObject + // due to potential side effects in its 'toString' and 'valueOf' methods which modify the array object. + if (UNLIKELY(fromIndexHandle->IsECMAObject())) { + return LastIndexOfSlowPath(argv, thread, thisHandle, fromIndex); + } + } + if (fromIndex < 0) { + return JSTaggedValue(-1); + } + JSHandle target = GetCallArg(argv, 0); + return JSStableArray::LastIndexOf( + thread, thisHandle, target, static_cast(fromIndex), static_cast(length)); +} +JSTaggedValue BuiltinsArray::LastIndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle) +{ // 1. Let O be ToObject(this value). - JSHandle thisHandle = GetThis(argv); JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); // 2. ReturnIfAbrupt(O). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisObjVal(thisObjHandle); - - JSHandle searchElement = GetCallArg(argv, 0); - // 3. Let len be ToLength(Get(O, "length")). - int64_t len = ArrayHelper::GetLength(thread, thisObjVal); + int64_t length = ArrayHelper::GetLength(thread, thisObjVal); // 4. ReturnIfAbrupt(len). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // 5. If len is 0, return −1. - if (len == 0) { - return GetTaggedInt(-1); + if (length == 0) { + return JSTaggedValue(-1); } + // 6. If argument fromIndex was passed let n be ToInteger(fromIndex); else let n be 0. + int64_t fromIndex = ArrayHelper::GetLastStartIndexFromArgs(thread, argv, 1, length); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + return LastIndexOfSlowPath(argv, thread, thisObjVal, fromIndex); +} - // 6. If argument fromIndex was passed let n be ToInteger(fromIndex); else let n be len-1. - double fromIndex = len - 1; - if (argc > 1) { - JSHandle msg1 = GetCallArg(argv, 1); - JSTaggedNumber fromIndexTemp = JSTaggedValue::ToNumber(thread, msg1); - // 7. ReturnIfAbrupt(n). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - fromIndex = base::NumberHelper::TruncateDouble(fromIndexTemp.GetNumber()); +JSTaggedValue BuiltinsArray::LastIndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisObjVal, int64_t fromIndex) +{ + if (fromIndex < 0) { + return JSTaggedValue(-1); } - - // 8. If n ≥ 0, let k be min(n, len – 1). - // 9. Else n < 0, - // a. Let k be len - abs(n). - int64_t from = 0; - if (fromIndex >= 0) { - from = (len - 1) < fromIndex ? len - 1 : fromIndex; - } else { - double tempFrom = len + fromIndex; - from = tempFrom >= 0 ? tempFrom : -1; - } - - // 10. Repeat, while k≥ 0 - // a. Let kPresent be HasProperty(O, ToString(k)). - // b. ReturnIfAbrupt(kPresent). - // c. If kPresent is true, then - // i. Let elementK be Get(O, ToString(k)). - // ii. ReturnIfAbrupt(elementK). - // iii. Let same be the result of performing Strict Equality Comparison searchElement === elementK. - // iv. If same is true, return k. - // d. Decrease k by 1. - JSMutableHandle key(thread, JSTaggedValue::Undefined()); - while (from >= 0) { - key.Update(JSTaggedValue(from)); - bool exists = (thisHandle->IsTypedArray() || JSTaggedValue::HasProperty(thread, thisObjVal, key)); + JSMutableHandle keyHandle(thread, JSTaggedValue::Undefined()); + JSHandle target = base::BuiltinsBase::GetCallArg(argv, 0); + // 11. Repeat, while k < len + for (int64_t curIndex = fromIndex; curIndex >= 0; --curIndex) { + keyHandle.Update(JSTaggedValue(curIndex)); + bool found = ArrayHelper::ElementIsStrictEqualTo(thread, thisObjVal, keyHandle, target); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (exists) { - JSHandle kValueHandle = JSArray::FastGetPropertyByValue(thread, thisObjVal, key); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (JSTaggedValue::StrictEqual(thread, searchElement, kValueHandle)) { - return GetTaggedDouble(from); - } + if (UNLIKELY(found)) { + return JSTaggedValue(curIndex); } - from--; } + // 12. Return -1. + return JSTaggedValue(-1); +} - // 11. Return -1. - return GetTaggedInt(-1); +// 22.1.3.14 Array.prototype.lastIndexOf ( searchElement [ , fromIndex ] ) +JSTaggedValue BuiltinsArray::LastIndexOf(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + BUILTINS_API_TRACE(argv->GetThread(), Array, IndexOf); + JSThread *thread = argv->GetThread(); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + JSHandle thisHandle = GetThis(argv); + if (thisHandle->IsStableJSArray(thread)) { + return LastIndexOfStable(argv, thread, thisHandle); + } + return LastIndexOfSlowPath(argv, thread, thisHandle); } // 22.1.3.15 Array.prototype.map ( callbackfn [ , thisArg ] ) @@ -1459,7 +1477,7 @@ JSTaggedValue BuiltinsArray::Map(EcmaRuntimeCallInfo *argv) JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSMutableHandle mapResultHandle(thread, JSTaggedValue::Undefined()); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); - const int32_t argsLength = 3; // 3: «kValue, k, O» + const uint32_t argsLength = 3; // 3: «kValue, k, O» while (k < len) { bool exists = JSTaggedValue::HasProperty(thread, thisObjVal, k); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -1663,6 +1681,10 @@ JSTaggedValue BuiltinsArray::Reduce(EcmaRuntimeCallInfo *argv) } } + if (thisObjVal->IsStableJSArray(thread)) { + JSStableArray::Reduce(thread, thisObjHandle, callbackFnHandle, accumulator, k, len); + } + // 10. Repeat, while k < len // a. Let Pk be ToString(k). // b. Let kPresent be HasProperty(O, Pk). @@ -1826,6 +1848,10 @@ JSTaggedValue BuiltinsArray::Reverse(EcmaRuntimeCallInfo *argv) int64_t len = ArrayHelper::GetLength(thread, thisObjVal); // 4. ReturnIfAbrupt(len). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // Fast path for stable array. Returns thisValue. + if (thisObjVal->IsStableJSArray(thread)) { + return JSStableArray::Reverse(thread, thisObjHandle, len); + } // 5. Let middle be floor(len/2). int64_t middle = std::floor(len / 2); @@ -1869,9 +1895,6 @@ JSTaggedValue BuiltinsArray::Reverse(EcmaRuntimeCallInfo *argv) JSMutableHandle upperP(thread, JSTaggedValue::Undefined()); JSHandle lowerValueHandle(thread, JSTaggedValue::Undefined()); JSHandle upperValueHandle(thread, JSTaggedValue::Undefined()); - if (thisObjVal->IsStableJSArray(thread)) { - JSStableArray::Reverse(thread, thisObjHandle, thisHandle, lower, len); - } while (lower != middle) { int64_t upper = len - lower - 1; lowerP.Update(JSTaggedValue(lower)); @@ -2068,25 +2091,7 @@ JSTaggedValue BuiltinsArray::Slice(EcmaRuntimeCallInfo *argv) int64_t count = final > k ? (final - k) : 0; if (thisHandle->IsStableJSArray(thread) && !thisObjHandle->GetJSHClass()->HasConstructor()) { - ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - JSHandle destElements = factory->NewTaggedArray(count); - JSHandle newArrayHandle = factory->NewJSStableArrayWithElements(destElements); - TaggedArray *srcElements = TaggedArray::Cast(thisObjHandle->GetElements().GetTaggedObject()); - - uint32_t length = srcElements->GetLength(); - if (length > k + count) { - for (uint32_t idx = 0; idx < count; idx++) { - destElements->Set(thread, idx, srcElements->Get(k + idx)); - } - } else { - for (uint32_t idx = 0; idx < count; idx++) { - uint32_t index = static_cast(k) + idx; - JSTaggedValue value = length > index ? srcElements->Get(index) : JSTaggedValue::Hole(); - destElements->Set(thread, idx, value); - } - } - - return newArrayHandle.GetTaggedValue(); + return JSStableArray::Slice(thread, thisObjHandle, k, count); } // 12. Let A be ArraySpeciesCreate(O, count). @@ -2190,16 +2195,15 @@ JSTaggedValue BuiltinsArray::Some(EcmaRuntimeCallInfo *argv) key.Update(JSTaggedValue(k)); JSHandle kValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, key); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - const int32_t argsLength = 3; // 3: «kValue, k, O» + const uint32_t argsLength = 3; // 3: «kValue, k, O» JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, callbackFnHandle, thisArgHandle, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(kValue.GetTaggedValue(), key.GetTaggedValue(), thisObjVal.GetTaggedValue()); JSTaggedValue callResult = JSFunction::Call(info); - bool boolResult = callResult.ToBoolean(); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (boolResult) { + if (callResult.ToBoolean()) { return GetTaggedBoolean(true); } } @@ -2214,62 +2218,27 @@ JSTaggedValue BuiltinsArray::Some(EcmaRuntimeCallInfo *argv) JSTaggedValue BuiltinsArray::Sort(EcmaRuntimeCallInfo *argv) { ASSERT(argv); - BUILTINS_API_TRACE(argv->GetThread(), Array, Sort); JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, Sort); [[maybe_unused]] EcmaHandleScope handleScope(thread); - // 1. Let obj be ToObject(this value). - JSHandle thisHandle = GetThis(argv); - JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - + // 1. If comparefn is not undefined and IsCallable(comparefn) is false, throw a TypeError exception. JSHandle callbackFnHandle = GetCallArg(argv, 0); if (!callbackFnHandle->IsUndefined() && !callbackFnHandle->IsCallable()) { THROW_TYPE_ERROR_AND_RETURN(thread, "Callable is false", JSTaggedValue::Exception()); } - // 2. Let len be ToLength(Get(obj, "length")). - int64_t len = ArrayHelper::GetArrayLength(thread, JSHandle(thisObjHandle)); - // 3. ReturnIfAbrupt(len). + // 2. Let obj be ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - JSMutableHandle presentValue(thread, JSTaggedValue::Undefined()); - JSMutableHandle middleValue(thread, JSTaggedValue::Undefined()); - JSMutableHandle previousValue(thread, JSTaggedValue::Undefined()); - for (int i = 1; i < len; i++) { - int beginIndex = 0; - int endIndex = i; - presentValue.Update(ObjectFastOperator::FastGetPropertyByIndex(thread, thisObjHandle.GetTaggedValue(), i)); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - while (beginIndex < endIndex) { - int middleIndex = (beginIndex + endIndex) / 2; // 2 : half - middleValue.Update( - ObjectFastOperator::FastGetPropertyByIndex(thread, thisObjHandle.GetTaggedValue(), middleIndex)); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - double compareResult = ArrayHelper::SortCompare(thread, callbackFnHandle, middleValue, presentValue); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (compareResult > 0) { - endIndex = middleIndex; - } else { - beginIndex = middleIndex + 1; - } - } - - if (endIndex >= 0 && endIndex < i) { - for (int j = i; j > endIndex; j--) { - previousValue.Update( - ObjectFastOperator::FastGetPropertyByIndex(thread, thisObjHandle.GetTaggedValue(), j - 1)); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - ObjectFastOperator::FastSetPropertyByIndex(thread, thisObjHandle.GetTaggedValue(), j, - previousValue.GetTaggedValue()); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - } - ObjectFastOperator::FastSetPropertyByIndex(thread, thisObjHandle.GetTaggedValue(), endIndex, - presentValue.GetTaggedValue()); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - } + // Array sort + if (thisHandle->IsStableJSArray(thread) && callbackFnHandle->IsUndefined()) { + JSStableArray::Sort(thread, thisObjHandle, callbackFnHandle); + } else { + JSArray::Sort(thread, JSHandle::Cast(thisObjHandle), callbackFnHandle); } - return thisObjHandle.GetTaggedValue(); } @@ -2369,6 +2338,7 @@ JSTaggedValue BuiltinsArray::Splice(EcmaRuntimeCallInfo *argv) toKey.Update(JSTaggedValue(k)); if (newArrayHandle->IsJSProxy()) { toKey.Update(JSTaggedValue::ToString(thread, toKey).GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, toKey, fromValue); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -2502,17 +2472,6 @@ JSTaggedValue BuiltinsArray::ToLocaleString(EcmaRuntimeCallInfo *argv) // 4. ReturnIfAbrupt(len). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // 5. Let separator be the String value for the list-separator String appropriate for the host environment’s - // current locale (this is derived in an implementation-defined way). - JSHandle sepHandle; - if ((GetCallArg(argv, 0)->IsUndefined())) { - sepHandle = JSHandle::Cast(ecmaVm->GetFactory()->NewFromASCII(",")); - } else { - sepHandle = GetCallArg(argv, 0); - } - JSHandle sepStringHandle = JSTaggedValue::ToString(thread, sepHandle); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - CString sepString = ConvertToString(*sepStringHandle); // 6. If len is zero, return the empty String. if (len == 0) { return GetTaggedString(thread, ""); @@ -2564,7 +2523,7 @@ JSTaggedValue BuiltinsArray::ToLocaleString(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); CString nextString = ConvertToString(*nextStringHandle); if (k > 0) { - concatStr += sepString; + concatStr += STRING_SEPERATOR; concatStr += nextString; continue; } @@ -2797,12 +2756,13 @@ JSTaggedValue BuiltinsArray::Flat(EcmaRuntimeCallInfo *argv) // b. If depthNum < 0, set depthNum to 0. if (argc > 0) { JSHandle msg1 = GetCallArg(argv, 0); - JSTaggedNumber fromIndexTemp = JSTaggedValue::ToNumber(thread, msg1); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - depthNum = base::NumberHelper::TruncateDouble(fromIndexTemp.GetNumber()); - depthNum = depthNum < 0 ? 0 : depthNum; + if (!msg1->IsUndefined()) { + JSTaggedNumber fromIndexTemp = JSTaggedValue::ToNumber(thread, msg1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + depthNum = base::NumberHelper::TruncateDouble(fromIndexTemp.GetNumber()); + depthNum = depthNum < 0 ? 0 : depthNum; + } } - // 5. Let A be ? ArraySpeciesCreate(O, 0). uint32_t arrayLen = 0; JSTaggedValue newArray = JSArray::ArraySpeciesCreate(thread, thisObjHandle, JSTaggedNumber(arrayLen)); @@ -2914,6 +2874,7 @@ JSTaggedValue BuiltinsArray::Includes(EcmaRuntimeCallInfo *argv) while (from < len) { JSHandle handledFrom(thread, JSTaggedValue(from)); fromStr = JSTaggedValue::ToString(thread, handledFrom); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); key.Update(fromStr.GetTaggedValue()); kValueHandle.Update(JSArray::FastGetPropertyByValue(thread, thisObjVal, key).GetTaggedValue()); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -2936,6 +2897,9 @@ JSTaggedValue BuiltinsArray::At(EcmaRuntimeCallInfo *argv) // 1. Let O be ToObject(this value). JSHandle thisHandle = GetThis(argv); + if (thisHandle->IsStableJSArray(thread)) { + return JSStableArray::At(JSHandle::Cast(thisHandle), argv); + } JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); // ReturnIfAbrupt(O). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -2973,4 +2937,441 @@ JSTaggedValue BuiltinsArray::At(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return element.GetTaggedValue(); } + +// 23.1.3.39 Array.prototype.with ( index, value ) +// NOLINTNEXTLINE(readability-function-size) +JSTaggedValue BuiltinsArray::With(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, With); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. Let O be ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle thisObjVal(thisObjHandle); + // 2. Let len be ? LengthOfArrayLike(O). + int64_t len = ArrayHelper::GetLength(thread, thisObjVal); + // ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // 3. Let relativeIndex be ? ToIntegerOrInfinity(relativeIndex). + JSTaggedNumber index = JSTaggedValue::ToInteger(thread, GetCallArg(argv, 0)); + // ReturnIfAbrupt(index). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + int64_t relativeIndex = index.GetNumber(); + int64_t actualIndex = 0; + JSHandle value = GetCallArg(argv, 1); + // 4. If relativeIndex ≥ 0, let actualIndex be relativeIndex. + // 5. Else, let actualIndex be len + relativeIndex. + // 6. If actualIndex ≥ len or actualIndex < 0, throw a RangeError exception. + if (relativeIndex >= 0) { + actualIndex = relativeIndex; + } else { + actualIndex = len + relativeIndex; + } + if (actualIndex >= len || actualIndex < 0) { + THROW_RANGE_ERROR_AND_RETURN(thread, "out of range.", JSTaggedValue::Exception()); + } + // 7. Let A be ? ArrayCreate(len). + JSTaggedValue newArray = + JSArray::ArrayCreate(thread, JSTaggedNumber(static_cast(len))).GetTaggedValue(); + // ReturnIfAbrupt(A). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle newArrayHandle(thread, newArray); + if (thisHandle->IsStableJSArray(thread) && !thisObjHandle->GetJSHClass()->HasConstructor()) { + return JSStableArray::With(thread, JSHandle::Cast(thisHandle), len, actualIndex, value); + } + // 8. Let k be 0. + int64_t k = 0; + // 9. Repeat, while k < len, + // a. Let Pk be ! ToString(𝔽(k)). + // b. If k is actualIndex, let fromValue be value. + // c. Else, let fromValue be ? Get(O, Pk). + // d. Perform ! CreateDataPropertyOrThrow(A, Pk, fromValue). + // e. Set k to k + 1. + JSMutableHandle fromKey(thread, JSTaggedValue::Undefined()); + JSHandle fromValue; + while (k < len) { + fromKey.Update(JSTaggedValue(k)); + if (k == actualIndex) { + fromValue = value; + } else { + fromValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, fromKey); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, fromKey, fromValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ++k; + } + // 10. Return A. + return newArrayHandle.GetTaggedValue(); +} + +// 23.1.3.34 Array.prototype.toSorted ( comparefn ) +JSTaggedValue BuiltinsArray::ToSorted(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, ToSorted); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. If comparefn is not undefined and IsCallable(comparefn) is false, throw a TypeError exception. + JSHandle callbackFnHandle = GetCallArg(argv, 0); + if (!callbackFnHandle->IsUndefined() && !callbackFnHandle->IsCallable()) { + THROW_TYPE_ERROR_AND_RETURN(thread, "Callable is false", JSTaggedValue::Exception()); + } + + // 2. Let obj be ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + // 3. Let len be ToLength(Get(obj, "length")). + int64_t len = ArrayHelper::GetArrayLength(thread, JSHandle(thisObjHandle)); + // ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + // 4. Let A be ? ArrayCreate(len). + JSTaggedValue newArray = JSArray::ArrayCreate(thread, JSTaggedNumber(static_cast(len))).GetTaggedValue(); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle newArrayHandle(thread, newArray); + + // 5. Let SortCompare be a new Abstract Closure with parameters (x, y) that captures comparefn and performs + // the following steps when called: + // a. Return ? CompareArrayElements(x, y, comparefn). + // 6. Let sortedList be ? SortIndexedProperties(O, len, SortCompare, read-through-holes). + JSHandle sortedList = + ArrayHelper::SortIndexedProperties(thread, JSHandle::Cast(thisObjHandle), len, callbackFnHandle, + base::HolesType::READ_THROUGH_HOLES); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + //7. Let j be 0. + int64_t j = 0; + // 8. Repeat, while j < len, + // a. Perform ! CreateDataPropertyOrThrow(A, ! ToString(𝔽(j)), sortedList[j]). + // b. Set j to j + 1. + JSMutableHandle itemValue(thread, JSTaggedValue::Undefined()); + while (j < len) { + itemValue.Update(sortedList->Get(j)); + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, j, itemValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ++j; + } + // 9. Return A. + return newArrayHandle.GetTaggedValue(); +} + +// 23.1.3.35 Array.prototype.toSpliced ( start, skipCount, ...items ) +JSTaggedValue BuiltinsArray::ToSpliced(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, ToSpliced); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + uint32_t argc = argv->GetArgsNumber(); + // 1. Let O be ? ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + // ReturnIfAbrupt(O). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle thisObjVal(thisObjHandle); + // 2. Let len be ? LengthOfArrayLike(O). + int64_t len = ArrayHelper::GetArrayLength(thread, thisObjVal); + // ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + int64_t actualStart = 0; + int64_t actualSkipCount = 0; + int64_t newLen = 0; + int64_t insertCount = 0; + // 3. Let relativeStart be ? ToIntegerOrInfinity(start). + if (argc > 0) { + JSTaggedNumber argStart = JSTaggedValue::ToInteger(thread, GetCallArg(argv, 0)); + // ReturnIfAbrupt(relativeStart). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + double relativeStart = argStart.GetNumber(); + // 4. If relativeStart = -∞, let k be 0. + // 5. Else if relativeStart < 0, let k be max(len + relativeStart, 0). + // 6. Else, let k be min(relativeStart, len). + if (relativeStart < 0) { + double tempStart = relativeStart + len; + actualStart = tempStart > 0 ? tempStart : 0; + } else { + actualStart = relativeStart < len ? relativeStart : len; + } + actualSkipCount = len - actualStart; + } + // 7. Let insertCount be the number of elements in items. + // 8. If start is not present, then + // a. Let actualSkipCount be 0. + // 9. Else if skipCount is not present, then + // a. Let actualSkipCount be len - actualStart. + // 10. Else, + // a. Let sc be ? ToIntegerOrInfinity(skipCount). + // b. Let actualSkipCount be the result of clamping sc between 0 and len - actualStart. + if (argc > 1) { + insertCount = argc - 2; // 2:2 means there two arguments before the insert items. + JSTaggedNumber argSkipCount = JSTaggedValue::ToInteger(thread, GetCallArg(argv, 1)); + // ReturnIfAbrupt(argSkipCount). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + double skipCount = argSkipCount.GetNumber(); + skipCount = skipCount > 0 ? skipCount : 0; + actualSkipCount = skipCount < (len - actualStart) ? skipCount : len - actualStart; + } + // 11. Let newLen be len + insertCount - actualSkipCount. + newLen = len + insertCount - actualSkipCount; + // 12. If newLen > 2^53 - 1, throw a TypeError exception. + if (newLen > base::MAX_SAFE_INTEGER) { + THROW_TYPE_ERROR_AND_RETURN(thread, "out of range.", JSTaggedValue::Exception()); + } + if (thisHandle->IsStableJSArray(thread) && !thisObjHandle->GetJSHClass()->HasConstructor()) { + return JSStableArray::ToSpliced(JSHandle::Cast(thisHandle), argv, argc, actualStart, + actualSkipCount, newLen); + } + // 13. Let A be ? ArrayCreate(newLen). + JSHandle newJsTaggedArray = + JSArray::ArrayCreate(thread, JSTaggedNumber(static_cast(newLen))); + // ReturnIfAbrupt(newArray). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle newArrayHandle(thread, newJsTaggedArray.GetTaggedValue()); + // 14. Let i be 0. + int64_t i = 0; + // 15. Let r be actualStart + actualSkipCount. + int64_t r = actualStart + actualSkipCount; + // 16. Repeat, while i < actualStart, + // a. Let Pi be ! ToString(𝔽(i)). + // b. Let iValue be ? Get(O, Pi). + // c. Perform ! CreateDataPropertyOrThrow(A, Pi, iValue). + // d. Set i to i + 1. + while (i < actualStart) { + JSHandle iValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, i); + // ReturnIfAbrupt(iValue). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, i, iValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ++i; + } + // 17. For each element E of items, do + // a. Let Pi be ! ToString(𝔽(i)). + // b. Perform ! CreateDataPropertyOrThrow(A, Pi, E). + // c. Set i to i + 1. + JSMutableHandle pi(thread, JSTaggedValue::Undefined()); + for (int64_t pos = 2; pos < argc; ++pos) { // 2:2 means there two arguments before the insert items. + pi.Update(JSTaggedValue(i)); + JSHandle element = GetCallArg(argv, pos); + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, pi, element); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ++i; + } + // 18. Repeat, while i < newLen, + // a. Let Pi be ! ToString(𝔽(i)). + // b. Let from be ! ToString(𝔽(r)). + // c. Let fromValue be ? Get(O, from). + // d. Perform ! CreateDataPropertyOrThrow(A, Pi, fromValue). + // e. Set i to i + 1. + // f. Set r to r + 1. + JSMutableHandle from(thread, JSTaggedValue::Undefined()); + while (i < newLen) { + pi.Update(JSTaggedValue(i)); + from.Update(JSTaggedValue(r)); + JSHandle fromValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, from); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, pi, fromValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ++i; + ++r; + } + JSHandle lengthKey = thread->GlobalConstants()->GetHandledLengthString(); + JSHandle newLenHandle(thread, JSTaggedValue(newLen)); + JSTaggedValue::SetProperty(thread, newJsTaggedArray, lengthKey, newLenHandle, true); + // ReturnIfAbrupt(setStatus). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // 19. Return A. + return newArrayHandle.GetTaggedValue(); +} + +// 23.1.3.11 Array.prototype.findLast ( predicate [ , thisArg ] ) +JSTaggedValue BuiltinsArray::FindLast(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, FindLast); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. Let O be ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + // 2. ReturnIfAbrupt(O). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle thisObjVal(thisObjHandle); + + // 3. Let len be ToLength(Get(O, "length")). + int64_t len = ArrayHelper::GetLength(thread, thisObjVal); + // 4. ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + // 5. If IsCallable(predicate) is false, throw a TypeError exception. + JSHandle callbackFnHandle = GetCallArg(argv, 0); + if (!callbackFnHandle->IsCallable()) { + THROW_TYPE_ERROR_AND_RETURN(thread, "the predicate is not callable.", JSTaggedValue::Exception()); + } + + // 6. If thisArg was supplied, let T be thisArg; else let T be undefined. + JSHandle thisArgHandle = GetCallArg(argv, 1); + + // 7. Let k be (len - 1). + // 8. Repeat, while k >= 0 + // a. Let Pk be ToString(k). + // b. Let kValue be Get(O, Pk). + // c. ReturnIfAbrupt(kValue). + // d. Let testResult be ToBoolean(Call(predicate, T, «kValue, k, O»)). + // e. ReturnIfAbrupt(testResult). + // f. If testResult is true, return kValue. + // g. Decrease k by 1. + JSMutableHandle key(thread, JSTaggedValue::Undefined()); + int64_t k = len - 1; + JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); + const uint32_t argsLength = 3; // 3: «kValue, k, O» + while (k >= 0) { + JSHandle kValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, k); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + key.Update(JSTaggedValue(k)); + EcmaRuntimeCallInfo *info = + EcmaInterpreter::NewRuntimeCallInfo(thread, callbackFnHandle, thisArgHandle, undefined, argsLength); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + info->SetCallArg(kValue.GetTaggedValue(), key.GetTaggedValue(), thisObjVal.GetTaggedValue()); + JSTaggedValue callResult = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (callResult.ToBoolean()) { + return kValue.GetTaggedValue(); + } + k--; + } + + // 9. Return undefined. + return JSTaggedValue::Undefined(); +} + +// 23.1.3.12 Array.prototype.findLastIndex ( predicate [ , thisArg ] ) +JSTaggedValue BuiltinsArray::FindLastIndex(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, FindLastIndex); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. Let O be ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + // 2. ReturnIfAbrupt(O). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle thisObjVal(thisObjHandle); + + // 3. Let len be ToLength(Get(O, "length")). + int64_t len = ArrayHelper::GetLength(thread, thisObjVal); + // 4. ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + // 5. If IsCallable(predicate) is false, throw a TypeError exception. + JSHandle callbackFnHandle = GetCallArg(argv, 0); + if (!callbackFnHandle->IsCallable()) { + THROW_TYPE_ERROR_AND_RETURN(thread, "the predicate is not callable.", JSTaggedValue::Exception()); + } + + // 6. If thisArg was supplied, let T be thisArg; else let T be undefined. + JSHandle thisArgHandle = GetCallArg(argv, 1); + + // 7. Let k be (len - 1). + // 8. Repeat, while k >=0 + // a. Let Pk be ToString(k). + // b. Let kValue be Get(O, Pk). + // c. ReturnIfAbrupt(kValue). + // d. Let testResult be ToBoolean(Call(predicate, T, «kValue, k, O»)). + // e. ReturnIfAbrupt(testResult). + // f. If testResult is true, return k. + // g. Decrease k by 1. + int64_t k = len - 1; + JSTaggedValue callResult = GetTaggedBoolean(true); + if (thisObjVal->IsStableJSArray(thread)) { + callResult = + JSStableArray::HandleFindLastIndexOfStable(thread, thisObjHandle, callbackFnHandle, thisArgHandle, k); + if (callResult.ToBoolean()) { + return GetTaggedDouble(k); + } + } + JSMutableHandle key(thread, JSTaggedValue::Undefined()); + JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); + const uint32_t argsLength = 3; // 3: «kValue, k, O» + while (k >= 0) { + JSHandle kValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, k); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + key.Update(JSTaggedValue(k)); + EcmaRuntimeCallInfo *info = + EcmaInterpreter::NewRuntimeCallInfo(thread, callbackFnHandle, thisArgHandle, undefined, argsLength); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + info->SetCallArg(kValue.GetTaggedValue(), key.GetTaggedValue(), thisObjVal.GetTaggedValue()); + callResult = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (callResult.ToBoolean()) { + return GetTaggedDouble(k); + } + k--; + } + + // 9. Return -1. + return GetTaggedDouble(-1); +} + +// 23.1.3.33 Array.prototype.toReversed ( ) +JSTaggedValue BuiltinsArray::ToReversed(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, Array, ToReversed); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. Let O be ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + // ReturnIfAbrupt(O). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle thisObjVal(thisObjHandle); + + // 2. Let len be ? LengthOfArrayLike(O). + int64_t len = ArrayHelper::GetLength(thread, thisObjVal); + // ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (thisHandle->IsStableJSArray(thread) && !thisObjHandle->GetJSHClass()->HasConstructor()) { + return JSStableArray::ToReversed(thread, JSHandle::Cast(thisHandle), len); + } + // 3. Let A be ? ArrayCreate(len). + JSTaggedValue newArray = JSArray::ArrayCreate(thread, JSTaggedNumber(static_cast(len))).GetTaggedValue(); + // ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle newArrayHandle(thread, newArray); + + // 4. Let k be 0. + // 5. Repeat, while k < len, + // a. Let from be ! ToString(𝔽(len - k - 1)). + // b. Let Pk be ! ToString(𝔽(k)). + // c. Let fromValue be ? Get(O, from). + // d. Perform ! CreateDataPropertyOrThrow(A, Pk, fromValue). + // e. Set k to k + 1. + JSMutableHandle fromKey(thread, JSTaggedValue::Undefined()); + JSMutableHandle toKey(thread, JSTaggedValue::Undefined()); + int64_t k = 0; + while (k < len) { + int64_t from = len - k - 1; + fromKey.Update(JSTaggedValue(from)); + toKey.Update(JSTaggedValue(k)); + JSHandle fromValue = JSArray::FastGetPropertyByValue(thread, thisObjVal, fromKey); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSObject::CreateDataPropertyOrThrow(thread, newArrayHandle, toKey, fromValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + k++; + } + // 6. Return A. + return newArrayHandle.GetTaggedValue(); +} } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_array.h b/ecmascript/builtins/builtins_array.h index e63cd3e969579bdda31fcb2fe7acc097c16d5a02..82fba75e4882988ce5e648a701ff480607ef9f73 100644 --- a/ecmascript/builtins/builtins_array.h +++ b/ecmascript/builtins/builtins_array.h @@ -18,9 +18,103 @@ #include "ecmascript/base/builtins_base.h" +// List of functions in Array, excluding the '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsArray::func refers to the native implementation of Array[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +#define BUILTIN_ARRAY_FUNCTIONS(V) \ + /* Array.from ( items [ , mapfn [ , thisArg ] ] ) */ \ + V("from", From, 1, INVALID) \ + /* Array.isArray ( arg ) */ \ + V("isArray", IsArray, 1, INVALID) \ + /* Array.of ( ...items ) */ \ + V("of", Of, 0, INVALID) + +// List of functions in Array.prototype, excluding the constructor and '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsArray::func refers to the native implementation of Array.prototype[name]. +#define BUILTIN_ARRAY_PROTOTYPE_FUNCTIONS(V) \ + /* Array.prototype.at ( index ) */ \ + V("at", At, 1, INVALID) \ + /* Array.prototype.concat ( ...items ) */ \ + V("concat", Concat, 1, ArrayConcat) \ + /* Array.prototype.copyWithin ( target, start [ , end ] ) */ \ + V("copyWithin", CopyWithin, 2, INVALID) \ + /* Array.prototype.entries ( ) */ \ + V("entries", Entries, 0, INVALID) \ + /* Array.prototype.every ( callbackfn [ , thisArg ] ) */ \ + V("every", Every, 1, INVALID) \ + /* Array.prototype.fill ( value [ , start [ , end ] ] ) */ \ + V("fill", Fill, 1, INVALID) \ + /* Array.prototype.filter ( callbackfn [ , thisArg ] ) */ \ + V("filter", Filter, 1, ArrayFilter) \ + /* Array.prototype.find ( predicate [ , thisArg ] ) */ \ + V("find", Find, 1, INVALID) \ + /* Array.prototype.findIndex ( predicate [ , thisArg ] ) */ \ + V("findIndex", FindIndex, 1, INVALID) \ + /* Array.prototype.findLast ( predicate [ , thisArg ] ) */ \ + V("findLast", FindLast, 1, INVALID) \ + /* Array.prototype.findLastIndex ( predicate [ , thisArg ] ) */ \ + V("findLastIndex", FindLastIndex, 1, INVALID) \ + /* Array.prototype.flat ( [ depth ] ) */ \ + V("flat", Flat, 0, INVALID) \ + /* Array.prototype.flatMap ( mapperFunction [ , thisArg ] ) */ \ + V("flatMap", FlatMap, 1, INVALID) \ + /* Array.prototype.forEach ( callbackfn [ , thisArg ] ) */ \ + V("forEach", ForEach, 1, ArrayForEach) \ + /* Array.prototype.includes ( searchElement [ , fromIndex ] ) */ \ + V("includes", Includes, 1, INVALID) \ + /* Array.prototype.indexOf ( searchElement [ , fromIndex ] ) */ \ + V("indexOf", IndexOf, 1, ArrayIndexOf) \ + /* Array.prototype.join ( separator ) */ \ + V("join", Join, 1, INVALID) \ + /* Array.prototype.keys ( ) */ \ + V("keys", Keys, 0, INVALID) \ + /* Array.prototype.lastIndexOf ( searchElement [ , fromIndex ] ) */ \ + V("lastIndexOf", LastIndexOf, 1, ArrayLastIndexOf) \ + /* Array.prototype.map ( callbackfn [ , thisArg ] ) */ \ + V("map", Map, 1, INVALID) \ + /* Array.prototype.pop ( ) */ \ + V("pop", Pop, 0, INVALID) \ + /* Array.prototype.push ( ...items ) */ \ + V("push", Push, 1, ArrayPush) \ + /* Array.prototype.reduce ( callbackfn [ , initialValue ] ) */ \ + V("reduce", Reduce, 1, INVALID) \ + /* Array.prototype.reduceRight ( callbackfn [ , initialValue ] ) */ \ + V("reduceRight", ReduceRight, 1, INVALID) \ + /* Array.prototype.reverse ( ) */ \ + V("reverse", Reverse, 0, ArrayReverse) \ + /* Array.prototype.shift ( ) */ \ + V("shift", Shift, 0, INVALID) \ + /* Array.prototype.slice ( start, end ) */ \ + V("slice", Slice, 2, ArraySlice) \ + /* Array.prototype.some ( callbackfn [ , thisArg ] ) */ \ + V("some", Some, 1, INVALID) \ + /* Array.prototype.sort ( comparefn ) */ \ + V("sort", Sort, 1, SORT) \ + /* Array.prototype.splice ( start, deleteCount, ...items ) */ \ + V("splice", Splice, 2, INVALID) \ + /* Array.prototype.toLocaleString ( [ reserved1 [ , reserved2 ] ] ) */ \ + V("toLocaleString", ToLocaleString, 0, INVALID) \ + /* Array.prototype.toReversed ( ) */ \ + V("toReversed", ToReversed, 0, INVALID) \ + /* Array.prototype.toSorted ( comparefn ) */ \ + V("toSorted", ToSorted, 1, INVALID) \ + /* Array.prototype.toSpliced ( start, skipCount, ...items ) */ \ + V("toSpliced", ToSpliced, 2, INVALID) \ + /* Array.prototype.toString ( ) */ \ + V("toString", ToString, 0, INVALID) \ + /* Array.prototype.unshift ( ...items ) */ \ + V("unshift", Unshift, 1, INVALID) \ + /* Array.prototype.values ( ) */ \ + V("values", Values, 0, INVALID) \ + /* Array.prototype.with ( index, value ) */ \ + V("with", With, 2, INVALID) + namespace panda::ecmascript::builtins { static constexpr uint8_t INDEX_TWO = 2; static constexpr uint8_t INDEX_THREE = 3; +static const CString STRING_SEPERATOR = ","; class BuiltinsArray : public base::BuiltinsBase { public: // 22.1.1 @@ -102,7 +196,68 @@ public: static JSTaggedValue FlatMap(EcmaRuntimeCallInfo *argv); // 23.1.3.1 Array.prototype.at ( index ) static JSTaggedValue At(EcmaRuntimeCallInfo *argv); + // 23.1.3.33 Array.prototype.toReversed ( ) + static JSTaggedValue ToReversed(EcmaRuntimeCallInfo *argv); + // 23.1.3.39 Array.prototype.with ( index, value ) + static JSTaggedValue With(EcmaRuntimeCallInfo *argv); + // 23.1.3.34 Array.prototype.toSorted ( comparefn ) + static JSTaggedValue ToSorted(EcmaRuntimeCallInfo *argv); + // 23.1.3.11 + static JSTaggedValue FindLast(EcmaRuntimeCallInfo *argv); + // 23.1.3.12 + static JSTaggedValue FindLastIndex(EcmaRuntimeCallInfo *argv); + // 23.1.3.35 Array.prototype.toSpliced ( start, skipCount, ...items ) + static JSTaggedValue ToSpliced(EcmaRuntimeCallInfo *argv); + + // Excluding the '@@' internal properties + static Span GetArrayFunctions() + { + return Span(ARRAY_FUNCTIONS); + } + + // Excluding the constructor and '@@' internal properties. + static Span GetArrayPrototypeFunctions() + { + return Span(ARRAY_PROTOTYPE_FUNCTIONS); + } + + static size_t GetNumPrototypeInlinedProperties() + { + // 4 : 4 More inlined entries in Array.prototype for the following functions/accessors: + // (1) 'length' accessor + // (2) Array.prototype.constructor, i.e. Array() + // (3) Array.prototype[@@iterator]() + // (4) Array.prototype[@@unscopables]() + return GetArrayPrototypeFunctions().Size() + 4; + } + +private: +#define BUILTIN_ARRAY_FUNCTION_ENTRY(name, method, length, id) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsArray::method, length, kungfu::BuiltinsStubCSigns::id), + + static constexpr std::array ARRAY_FUNCTIONS = { + BUILTIN_ARRAY_FUNCTIONS(BUILTIN_ARRAY_FUNCTION_ENTRY) + }; + static constexpr std::array ARRAY_PROTOTYPE_FUNCTIONS = { + BUILTIN_ARRAY_PROTOTYPE_FUNCTIONS(BUILTIN_ARRAY_FUNCTION_ENTRY) + }; +#undef BUILTIN_ARRAY_FUNCTION_ENTRY + + static JSTaggedValue IndexOfStable( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle); + static JSTaggedValue IndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle); + static JSTaggedValue IndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisObjVal, + int64_t length, int64_t fromIndex); + + static JSTaggedValue LastIndexOfStable( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle); + static JSTaggedValue LastIndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisHandle); + static JSTaggedValue LastIndexOfSlowPath( + EcmaRuntimeCallInfo *argv, JSThread *thread, const JSHandle &thisObjVal, int64_t fromIndex); }; } // namespace panda::ecmascript::builtins -#endif // ECMASCRIPT_BUILTINS_BUILTINS_ARRAY_H \ No newline at end of file +#endif // ECMASCRIPT_BUILTINS_BUILTINS_ARRAY_H diff --git a/ecmascript/builtins/builtins_arraybuffer.cpp b/ecmascript/builtins/builtins_arraybuffer.cpp index d01e5644aa8ee3b9fe66a4fc353c412bf30bba93..0373de8b430a25d0fa5c5d0848404f4b9de5c3b8 100644 --- a/ecmascript/builtins/builtins_arraybuffer.cpp +++ b/ecmascript/builtins/builtins_arraybuffer.cpp @@ -87,7 +87,7 @@ JSTaggedValue BuiltinsArrayBuffer::GetByteLength(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); BUILTINS_API_TRACE(thread, ArrayBuffer, GetByteLength); [[maybe_unused]] EcmaHandleScope handleScope(thread); - + // 1. Let O be the this value. JSHandle thisHandle = GetThis(argv); // 2. If Type(O) is not Object, throw a TypeError exception. @@ -180,6 +180,7 @@ JSTaggedValue BuiltinsArrayBuffer::Slice(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(JSTaggedValue(newLen)); JSTaggedValue taggedNewArrBuf = JSFunction::Construct(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle newArrBuf(thread, taggedNewArrBuf); // 16. ReturnIfAbrupt(new). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -461,9 +462,9 @@ T BuiltinsArrayBuffer::LittleEndianToBigEndian64Bit(T liValue) template JSTaggedValue BuiltinsArrayBuffer::GetValueFromBufferForInteger(uint8_t *block, uint32_t byteIndex, bool littleEndian) { - static_assert(std::is_integral_v, "T must be integral"); - static_assert(sizeof(T) == size, "Invalid number size"); - static_assert(sizeof(T) >= sizeof(uint16_t), "T must have a size more than uint8"); + ASSERT_PRINT(std::is_integral_v, "T must be integral"); + ASSERT_PRINT(sizeof(T) == size, "Invalid number size"); + ASSERT_PRINT(sizeof(T) >= sizeof(uint16_t), "T must have a size more than uint8"); ASSERT(size >= NumberSize::UINT16 || size <= NumberSize::FLOAT64); T res = *reinterpret_cast(block + byteIndex); @@ -485,38 +486,50 @@ JSTaggedValue BuiltinsArrayBuffer::GetValueFromBufferForInteger(uint8_t *block, template JSTaggedValue BuiltinsArrayBuffer::GetValueFromBufferForFloat(uint8_t *block, uint32_t byteIndex, bool littleEndian) { - static_assert(std::is_same_v || std::is_same_v, "T must be correct type"); - static_assert(sizeof(T) == size, "Invalid number size"); + ASSERT_PRINT((std::is_same_v || std::is_same_v), "T must be correct type"); + ASSERT_PRINT(sizeof(T) == size, "Invalid number size"); UnionType unionValue = {0}; // NOLINTNEXTLINE(readability-braces-around-statements) if constexpr (std::is_same_v) { unionValue.uValue = *reinterpret_cast(block + byteIndex); - if (std::isnan(unionValue.value)) { - return GetTaggedDouble(unionValue.value); - } - if (!littleEndian) { - uint32_t res = LittleEndianToBigEndian(unionValue.uValue); - return GetTaggedDouble(base::bit_cast(res)); - } + uint32_t res = LittleEndianToBigEndian(unionValue.uValue); + return CommonConvert(unionValue.value, res, littleEndian); } else if constexpr (std::is_same_v) { // NOLINTNEXTLINE(readability-braces-around-statements) unionValue.uValue = *reinterpret_cast(block + byteIndex); - if (std::isnan(unionValue.value) && !JSTaggedValue::IsImpureNaN(unionValue.value)) { - return GetTaggedDouble(unionValue.value); - } - if (!littleEndian) { - uint64_t res = LittleEndianToBigEndian64Bit(unionValue.uValue); - return GetTaggedDouble(base::bit_cast(res)); - } + uint64_t res = LittleEndianToBigEndian64Bit(unionValue.uValue); + return CommonConvert(unionValue.value, res, littleEndian); } return GetTaggedDouble(unionValue.value); } + +template +JSTaggedValue BuiltinsArrayBuffer::CommonConvert(T1 &value, T2 &res, bool littleEndian) +{ + if (std::isnan(value) && !JSTaggedValue::IsImpureNaN(value)) { + return GetTaggedDouble(value); + } + if (!littleEndian) { + T1 d = base::bit_cast(res); + if (JSTaggedValue::IsImpureNaN(d)) { + return GetTaggedDouble(base::NAN_VALUE); + } + return GetTaggedDouble(d); + } else { + if (JSTaggedValue::IsImpureNaN(value)) { + return GetTaggedDouble(base::NAN_VALUE); + } + } + return GetTaggedDouble(value); +} + + template JSTaggedValue BuiltinsArrayBuffer::GetValueFromBufferForBigInt(JSThread *thread, uint8_t *block, uint32_t byteIndex, bool littleEndian) { - static_assert(std::is_same_v || std::is_same_v, "T must be uint64_t/int64_t"); + ASSERT_PRINT((std::is_same_v || std::is_same_v), "T must be uint64_t/int64_t"); auto pTmp = *reinterpret_cast(block + byteIndex); if (!littleEndian) { pTmp = LittleEndianToBigEndian64Bit(pTmp); @@ -531,7 +544,7 @@ JSTaggedValue BuiltinsArrayBuffer::GetValueFromBufferForBigInt(JSThread *thread, template void BuiltinsArrayBuffer::SetValueInBufferForByte(double val, uint8_t *block, uint32_t byteIndex) { - static_assert(std::is_same_v || std::is_same_v, "T must be int8/uint8"); + ASSERT_PRINT((std::is_same_v || std::is_same_v), "T must be int8/uint8"); T res; if (std::isnan(val) || std::isinf(val)) { res = 0; @@ -562,8 +575,8 @@ void BuiltinsArrayBuffer::SetValueInBufferForUint8Clamped(double val, uint8_t *b template void BuiltinsArrayBuffer::SetValueInBufferForInteger(double val, uint8_t *block, uint32_t byteIndex, bool littleEndian) { - static_assert(std::is_integral_v, "T must be integral"); - static_assert(sizeof(T) >= sizeof(uint16_t), "T must have a size more than uint8"); + ASSERT_PRINT(std::is_integral_v, "T must be integral"); + ASSERT_PRINT(sizeof(T) >= sizeof(uint16_t), "T must have a size more than uint8"); T res; if (std::isnan(val) || std::isinf(val)) { res = 0; @@ -590,7 +603,7 @@ void BuiltinsArrayBuffer::SetValueInBufferForInteger(double val, uint8_t *block, template void BuiltinsArrayBuffer::SetValueInBufferForFloat(double val, uint8_t *block, uint32_t byteIndex, bool littleEndian) { - static_assert(std::is_same_v || std::is_same_v, "T must be float type"); + ASSERT_PRINT((std::is_same_v || std::is_same_v), "T must be float type"); auto data = static_cast(val); if (std::isnan(val)) { SetTypeData(block, data, byteIndex); @@ -614,7 +627,7 @@ void BuiltinsArrayBuffer::SetValueInBufferForBigInt(JSThread *thread, JSHandle &arrBuf, uint32_t byteIndex, bool littleEndian) { - static_assert(std::is_same_v || std::is_same_v, "T must be int64_t/uint64_t"); + ASSERT_PRINT((std::is_same_v || std::is_same_v), "T must be int64_t/uint64_t"); T value = 0; bool lossless = true; if constexpr(std::is_same_v) { @@ -636,7 +649,7 @@ void BuiltinsArrayBuffer::SetValueInBufferForBigInt(JSThread *thread, double val, uint8_t *block, uint32_t byteIndex, bool littleEndian) { - static_assert(std::is_same_v || std::is_same_v, "T must be int64_t/uint64_t"); + ASSERT_PRINT((std::is_same_v || std::is_same_v), "T must be int64_t/uint64_t"); T value = 0; bool lossless = true; @@ -711,14 +724,15 @@ void *BuiltinsArrayBuffer::GetDataPointFromBuffer(JSTaggedValue arrBuf, uint32_t { if (arrBuf.IsByteArray()) { return reinterpret_cast(ToUintPtr(ByteArray::Cast(arrBuf.GetTaggedObject())->GetData()) + byteOffset); - } else { - JSArrayBuffer *arrayBuffer = JSArrayBuffer::Cast(arrBuf.GetTaggedObject()); - if (arrayBuffer->GetArrayBufferByteLength() == 0) { - return nullptr; - } - JSTaggedValue data = arrayBuffer->GetArrayBufferData(); - return reinterpret_cast(ToUintPtr(JSNativePointer::Cast(data.GetTaggedObject()) - ->GetExternalPointer()) + byteOffset); } + + JSArrayBuffer *arrayBuffer = JSArrayBuffer::Cast(arrBuf.GetTaggedObject()); + if (arrayBuffer->GetArrayBufferByteLength() == 0) { + return nullptr; + } + + JSTaggedValue data = arrayBuffer->GetArrayBufferData(); + return reinterpret_cast(ToUintPtr(JSNativePointer::Cast(data.GetTaggedObject()) + ->GetExternalPointer()) + byteOffset); } } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_arraybuffer.h b/ecmascript/builtins/builtins_arraybuffer.h index 0b67283f64b335bab734589774ec4e17d6bebf45..80cda3a205571fa773a41980875245ecf7d7727d 100644 --- a/ecmascript/builtins/builtins_arraybuffer.h +++ b/ecmascript/builtins/builtins_arraybuffer.h @@ -92,6 +92,8 @@ private: template static JSTaggedValue GetValueFromBufferForFloat(uint8_t *block, uint32_t byteIndex, bool littleEndian); + template + static JSTaggedValue CommonConvert(T1 &value, T2 &res, bool littleEndian); template static JSTaggedValue GetValueFromBufferForBigInt(JSThread *thread, uint8_t *block, uint32_t byteIndex, bool littleEndian); diff --git a/ecmascript/builtins/builtins_async_from_sync_iterator.cpp b/ecmascript/builtins/builtins_async_from_sync_iterator.cpp index 8ff19fd15f3e67a38094385597a8f4b984faa179..ae63f7c4b821b67d60a91dd5b80b999cb010e749 100644 --- a/ecmascript/builtins/builtins_async_from_sync_iterator.cpp +++ b/ecmascript/builtins/builtins_async_from_sync_iterator.cpp @@ -44,6 +44,7 @@ JSTaggedValue BuiltinsAsyncFromSyncIterator::Next(EcmaRuntimeCallInfo *argv) // 3.Let promiseCapability be ! NewPromiseCapability(%Promise%). JSHandle pcap = JSPromise::NewPromiseCapability(thread, JSHandle::Cast(env->GetPromiseFunction())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 4.Let syncIteratorRecord be O.[[SyncIteratorRecord]]. JSHandle asyncIterator(thisValue); JSHandle syncIteratorRecord(thread, asyncIterator->GetSyncIteratorRecord()); @@ -79,6 +80,7 @@ JSTaggedValue BuiltinsAsyncFromSyncIterator::Throw(EcmaRuntimeCallInfo *argv) JSHandle env = vm->GetGlobalEnv(); JSHandle pcap = JSPromise::NewPromiseCapability(thread, JSHandle::Cast(env->GetPromiseFunction())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 4.Let syncIterator be O.[[SyncIteratorRecord]].[[Iterator]]. JSHandle syncIteratorRecord(thread, asyncIterator->GetSyncIteratorRecord()); JSHandle syncIterator(thread, syncIteratorRecord->GetIterator()); @@ -91,9 +93,9 @@ JSTaggedValue BuiltinsAsyncFromSyncIterator::Throw(EcmaRuntimeCallInfo *argv) // 7.If throw is undefined, then if (throwResult->IsUndefined()) { JSHandle iterResult = JSIterator::CreateIterResultObject(thread, value, true); - JSHandle resolve(thread, pcap->GetResolve()); + JSHandle reject(thread, pcap->GetReject()); EcmaRuntimeCallInfo *info = - EcmaInterpreter::NewRuntimeCallInfo(thread, resolve, undefinedValue, undefinedValue, 1); + EcmaInterpreter::NewRuntimeCallInfo(thread, reject, undefinedValue, undefinedValue, 1); info->SetCallArg(iterResult.GetTaggedValue()); return pcap->GetPromise(); } @@ -102,10 +104,12 @@ JSTaggedValue BuiltinsAsyncFromSyncIterator::Throw(EcmaRuntimeCallInfo *argv) if (value->IsNull()) { EcmaRuntimeCallInfo *callInfo = EcmaInterpreter::NewRuntimeCallInfo(thread, throwResult, syncIterator, undefinedValue, 0); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, throwResult, pcap); ret = JSFunction::Call(callInfo); } else { EcmaRuntimeCallInfo *callInfo = EcmaInterpreter::NewRuntimeCallInfo(thread, throwResult, syncIterator, undefinedValue, 1); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, throwResult, pcap); callInfo->SetCallArg(value.GetTaggedValue()); ret = JSFunction::Call(callInfo); } @@ -148,6 +152,7 @@ JSTaggedValue BuiltinsAsyncFromSyncIterator::Return(EcmaRuntimeCallInfo *argv) JSHandle env = vm->GetGlobalEnv(); JSHandle pcap = JSPromise::NewPromiseCapability(thread, JSHandle::Cast(env->GetPromiseFunction())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 4.Let syncIterator be O.[[SyncIteratorRecord]].[[Iterator]]. JSHandle asyncIterator(thisValue); JSHandle syncIteratorRecord(thread, asyncIterator->GetSyncIteratorRecord()); @@ -174,10 +179,12 @@ JSTaggedValue BuiltinsAsyncFromSyncIterator::Return(EcmaRuntimeCallInfo *argv) if (value->IsNull()) { EcmaRuntimeCallInfo *callInfo = EcmaInterpreter::NewRuntimeCallInfo(thread, returnResult, syncIterator, undefinedValue, 0); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, returnResult, pcap); ret = JSFunction::Call(callInfo); } else { EcmaRuntimeCallInfo *callInfo = EcmaInterpreter::NewRuntimeCallInfo(thread, returnResult, syncIterator, undefinedValue, 1); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, returnResult, pcap); callInfo->SetCallArg(value.GetTaggedValue()); ret = JSFunction::Call(callInfo); } diff --git a/ecmascript/builtins/builtins_async_iterator.cpp b/ecmascript/builtins/builtins_async_iterator.cpp index a71b187534ef30494a7fedd02de5c743e7cef679..f4f31c4e1728779113489392464fceee83402f74 100644 --- a/ecmascript/builtins/builtins_async_iterator.cpp +++ b/ecmascript/builtins/builtins_async_iterator.cpp @@ -48,13 +48,16 @@ JSTaggedValue BuiltinsAsyncIterator::Return(EcmaRuntimeCallInfo *argv) JSHandle promiseFunc = env->GetPromiseFunction(); JSHandle value = GetCallArg(argv, 0); JSHandle pcap = JSPromise::NewPromiseCapability(thread, promiseFunc); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle iterResult = JSIterator::CreateIterResultObject(thread, value, true); JSHandle iterResultVal(iterResult); JSHandle resolve(thread, pcap->GetResolve()); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo* info = EcmaInterpreter::NewRuntimeCallInfo(thread, resolve, undefined, undefined, 1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(iterResultVal.GetTaggedValue()); JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return pcap->GetPromise(); } diff --git a/ecmascript/builtins/builtins_atomics.cpp b/ecmascript/builtins/builtins_atomics.cpp index b0e43f65f806cc0d27a3b62fd2650c2e826b8559..6d0e39240b12e40e5551eb6dcebacca95dcb65cf 100644 --- a/ecmascript/builtins/builtins_atomics.cpp +++ b/ecmascript/builtins/builtins_atomics.cpp @@ -183,6 +183,9 @@ JSTaggedValue BuiltinsAtomics::Wait(EcmaRuntimeCallInfo *argv) // 5. Otherwise, let v be ? ToInt32(value). int64_t v = 0; if (array->IsJSBigInt64Array()) { + if (value->IsBoolean()) { + value = JSHandle(thread, JSTaggedValue::ToBigInt64(thread, value)); + } v = JSHandle::Cast(value)->ToInt64(); } else { v = static_cast(JSTaggedValue::ToInt32(thread, value)); @@ -469,6 +472,7 @@ JSTaggedValue BuiltinsAtomics::HandleWithBigInt64(JSThread *thread, uint32_t siz int64_t val = 0; bool lossless = true; BigInt::BigIntToInt64(thread, value, &val, &lossless); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (size == 3) { // the number of parameters is 3 auto result = op(reinterpret_cast(block + indexedPosition), &val); return BigInt::Int64ToBigInt(thread, result).GetTaggedValue(); @@ -476,6 +480,7 @@ JSTaggedValue BuiltinsAtomics::HandleWithBigInt64(JSThread *thread, uint32_t siz JSHandle newValue = BuiltinsBase::GetCallArg(argv, BuiltinsBase::ArgsPosition::FOURTH); int64_t newVal = 0; BigInt::BigIntToInt64(thread, newValue, &newVal, &lossless); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); int64_t arg[ARGS_NUMBER] = {0}; arg[0] = val; arg[1] = newVal; diff --git a/ecmascript/builtins/builtins_atomics.h b/ecmascript/builtins/builtins_atomics.h index 3645a7b4d6444a1f0745e4b25e83807122d0fb87..943990edcfd058eea1c96061d17fee62823bcbaf 100644 --- a/ecmascript/builtins/builtins_atomics.h +++ b/ecmascript/builtins/builtins_atomics.h @@ -20,6 +20,38 @@ #include "ecmascript/js_dataview.h" #include "ecmascript/waiter_list.h" +// List of functions in Atomics. +// V(name, func, length, stubIndex) +// where BuiltinsAtomics::func refers to the native implementation of Atomics[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +// The following functions are not implemented yet: +// - Atomics.waitAsync ( typedArray, index, value, timeout ) +#define BUILTIN_ATOMICS_FUNCTIONS(V) \ + /* Atomics.add ( typedArray, index, value ) */ \ + V("add", Add, 3, INVALID) \ + /* Atomics.and ( typedArray, index, value ) */ \ + V("and", And, 3, INVALID) \ + /* Atomics.compareExchange ( typedArray, index, expectedValue, replacementValue ) */ \ + V("compareExchange", CompareExchange, 4, INVALID) \ + /* Atomics.exchange ( typedArray, index, value ) */ \ + V("exchange", Exchange, 3, INVALID) \ + /* Atomics.isLockFree ( size ) */ \ + V("isLockFree", IsLockFree, 1, INVALID) \ + /* Atomics.load ( typedArray, index ) */ \ + V("load", Load, 2, INVALID) \ + /* Atomics.notify ( typedArray, index, count ) */ \ + V("notify", Notify, 3, INVALID) \ + /* Atomics.or ( typedArray, index, value ) */ \ + V("or", Or, 3, INVALID) \ + /* Atomics.store ( typedArray, index, value ) */ \ + V("store", Store, 3, INVALID) \ + /* Atomics.sub ( typedArray, index, value ) */ \ + V("sub", Sub, 3, INVALID) \ + /* Atomics.wait ( typedArray, index, value, timeout ) */ \ + V("wait", Wait, 4, INVALID) \ + /* Atomics.xor ( typedArray, index, value ) */ \ + V("xor", Xor, 3, INVALID) + namespace panda::ecmascript::builtins { enum class WaitResult: uint8_t {OK = 0, NOT_EQ, TIME_OUT}; @@ -50,7 +82,20 @@ public: // 25.4.13 Atomics.xor ( typedArray, index, value ) static JSTaggedValue Xor(EcmaRuntimeCallInfo *argv); + static Span GetAtomicsFunctions() + { + return Span(ATOMICS_FUNCTIONS); + } + private: +#define BUILTINS_ATOMICS_FUNCTION_ENTRY(name, method, length, id) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsAtomics::method, length, kungfu::BuiltinsStubCSigns::id), + + static constexpr std::array ATOMICS_FUNCTIONS = { + BUILTIN_ATOMICS_FUNCTIONS(BUILTINS_ATOMICS_FUNCTION_ENTRY) + }; +#undef BUILTINS_ATOMICS_FUNCTION_ENTRY + static uint32_t Signal(JSHandle &arrayBuffer, const size_t &index, double wakeCount); template static WaitResult DoWait(JSThread *thread, JSHandle &arrayBuffer, @@ -91,4 +136,4 @@ private: static constexpr int ARGS_NUMBER = 2; }; } // namespace panda::ecmascript::builtins -#endif // ECMASCRIPT_BUILTINS_BUILTINS_MATH_H \ No newline at end of file +#endif // ECMASCRIPT_BUILTINS_BUILTINS_MATH_H diff --git a/ecmascript/builtins/builtins_cjs_module.cpp b/ecmascript/builtins/builtins_cjs_module.cpp index 0cb9085ebb87a8203af45b6bbe7c98970178aad5..043bbb00a0c5e18214204dd345bdfee33cb43fde 100644 --- a/ecmascript/builtins/builtins_cjs_module.cpp +++ b/ecmascript/builtins/builtins_cjs_module.cpp @@ -16,14 +16,14 @@ #include "ecmascript/builtins/builtins_cjs_module.h" #include "ecmascript/base/builtins_base.h" -#include "ecmascript/base/path_helper.h" #include "ecmascript/interpreter/interpreter-inl.h" +#include "ecmascript/module/module_path_helper.h" #include "ecmascript/platform/file.h" #include "ecmascript/require/js_cjs_module.h" #include "ecmascript/require/js_require_manager.h" namespace panda::ecmascript::builtins { -using PathHelper = base::PathHelper; + JSTaggedValue BuiltinsCjsModule::CjsModuleConstructor(EcmaRuntimeCallInfo *argv) { JSThread *thread = argv->GetThread(); @@ -63,7 +63,7 @@ JSTaggedValue BuiltinsCjsModule::ResolveFilename(EcmaRuntimeCallInfo *argv) JSMutableHandle parent(thread, JSTaggedValue::Undefined()); JSMutableHandle dirname(thread, JSTaggedValue::Undefined()); const JSPandaFile *jsPandaFile = EcmaInterpreter::GetNativeCallPandafile(thread); - PathHelper::ResolveCurrentPath(thread, parent, dirname, jsPandaFile); + ModulePathHelper::ResolveCurrentPath(thread, parent, dirname, jsPandaFile); if (length != 1) { // strange arg's number LOG_ECMA(FATAL) << "BuiltinsCjsModule::Load : can only accept one argument"; diff --git a/ecmascript/builtins/builtins_cjs_require.cpp b/ecmascript/builtins/builtins_cjs_require.cpp index 9e109db9df3955e2f768063256b1537af2ea72bc..c9e002e982e338b27a7d4b9ff994b5a83fa9a703 100644 --- a/ecmascript/builtins/builtins_cjs_require.cpp +++ b/ecmascript/builtins/builtins_cjs_require.cpp @@ -43,6 +43,7 @@ JSTaggedValue BuiltinsCjsRequire::CjsRequireConstructor(EcmaRuntimeCallInfo *arg } JSHandle requestName = JSHandle::Cast(GetCallArg(argv, 0)); result = CjsModule::Load(thread, requestName); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return result.GetTaggedValue(); } diff --git a/ecmascript/builtins/builtins_dataview.cpp b/ecmascript/builtins/builtins_dataview.cpp index a99ba165982e12cb963c620d4e34aa2ef5735b23..877e465c49ea33395ed66fdff9973e4688022642 100644 --- a/ecmascript/builtins/builtins_dataview.cpp +++ b/ecmascript/builtins/builtins_dataview.cpp @@ -347,7 +347,8 @@ JSTaggedValue BuiltinsDataView::SetBigUint64(EcmaRuntimeCallInfo *argv) // 24.2.1.1 JSTaggedValue BuiltinsDataView::GetViewValue(JSThread *thread, const JSHandle &view, - const JSHandle &requestIndex, JSTaggedValue littleEndian, + const JSHandle &requestIndex, + const JSHandle &littleEndian, DataViewType type) { BUILTINS_API_TRACE(thread, DataView, GetViewValue); @@ -359,11 +360,18 @@ JSTaggedValue BuiltinsDataView::GetViewValue(JSThread *thread, const JSHandleIsDataView()) { THROW_TYPE_ERROR_AND_RETURN(thread, "view is not dataview", JSTaggedValue::Exception()); } - // 3. Let numberIndex be ToNumber(requestIndex). - JSTaggedNumber numberIndex = JSTaggedValue::ToNumber(thread, requestIndex); - // 5. ReturnIfAbrupt(getIndex). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - int32_t indexInt = base::NumberHelper::DoubleInRangeInt32(numberIndex.GetNumber()); + + int32_t indexInt = 0; + if (requestIndex->IsInt()) { + // fast get index if requestIndex is int + indexInt = requestIndex->GetInt(); + } else { + // 3. Let numberIndex be ToNumber(requestIndex). + JSTaggedNumber numberIndex = JSTaggedValue::ToNumber(thread, requestIndex); + // 5. ReturnIfAbrupt(getIndex). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + indexInt = base::NumberHelper::DoubleInRangeInt32(numberIndex.GetNumber()); + } // 6. If numberIndex ≠ getIndex or getIndex < 0, throw a RangeError exception. if (indexInt < 0) { THROW_RANGE_ERROR_AND_RETURN(thread, "getIndex < 0", JSTaggedValue::Exception()); @@ -371,10 +379,10 @@ JSTaggedValue BuiltinsDataView::GetViewValue(JSThread *thread, const JSHandle(indexInt); // 7. Let isLittleEndian be ToBoolean(isLittleEndian). bool isLittleEndian = false; - if (littleEndian.IsUndefined()) { + if (littleEndian->IsUndefined()) { isLittleEndian = false; } else { - isLittleEndian = littleEndian.ToBoolean(); + isLittleEndian = littleEndian->ToBoolean(); } // 8. Let buffer be the value of view’s [[ViewedArrayBuffer]] internal slot. JSHandle dataView(view); @@ -401,7 +409,8 @@ JSTaggedValue BuiltinsDataView::GetViewValue(JSThread *thread, const JSHandle &view, - const JSHandle &requestIndex, JSTaggedValue littleEndian, + const JSHandle &requestIndex, + const JSHandle &littleEndian, DataViewType type, const JSHandle &value) { // 1. If Type(view) is not Object, throw a TypeError exception. @@ -413,23 +422,32 @@ JSTaggedValue BuiltinsDataView::SetViewValue(JSThread *thread, const JSHandleIsDataView()) { THROW_TYPE_ERROR_AND_RETURN(thread, "view is not dataview", JSTaggedValue::Exception()); } - // 3. Let numberIndex be ToNumber(requestIndex). - JSTaggedNumber numberIndex = JSTaggedValue::ToIndex(thread, requestIndex); - // 5. ReturnIfAbrupt(getIndex). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - int64_t index = base::NumberHelper::DoubleInRangeInt32(numberIndex.GetNumber()); + int64_t index = 0; + if (requestIndex->IsInt()) { + // fast get index if requestIndex is int + index = requestIndex->GetInt(); + } else { + // 3. Let numberIndex be ToNumber(requestIndex). + JSTaggedNumber numberIndex = JSTaggedValue::ToIndex(thread, requestIndex); + // 5. ReturnIfAbrupt(getIndex). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + index = base::NumberHelper::DoubleInRangeInt32(numberIndex.GetNumber()); + } // 6. If numberIndex ≠ getIndex or getIndex < 0, throw a RangeError exception. if (index < 0) { THROW_RANGE_ERROR_AND_RETURN(thread, "getIndex < 0", JSTaggedValue::Exception()); } - JSHandle numValueHandle = JSTaggedValue::ToNumeric(thread, value); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSMutableHandle numValueHandle = JSMutableHandle(thread, value); + if (!value->IsNumber()) { + numValueHandle.Update(JSTaggedValue::ToNumeric(thread, value)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } // 7. Let isLittleEndian be ToBoolean(isLittleEndian). bool isLittleEndian = false; - if (littleEndian.IsUndefined()) { + if (littleEndian->IsUndefined()) { isLittleEndian = false; } else { - isLittleEndian = littleEndian.ToBoolean(); + isLittleEndian = littleEndian->ToBoolean(); } // 8. Let buffer be the value of view’s [[ViewedArrayBuffer]] internal slot. JSHandle dataView(view); @@ -461,11 +479,12 @@ JSTaggedValue BuiltinsDataView::GetTypedValue(EcmaRuntimeCallInfo *argv, DataVie [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisHandle = GetThis(argv); JSHandle offsetHandle = GetCallArg(argv, 0); + JSHandle trueHandle(thread, JSTaggedValue::True()); if (type == DataViewType::UINT8 || type == DataViewType::INT8) { - return GetViewValue(thread, thisHandle, offsetHandle, JSTaggedValue::True(), type); + return GetViewValue(thread, thisHandle, offsetHandle, trueHandle, type); } JSHandle littleEndianHandle = GetCallArg(argv, 1); - return GetViewValue(thread, thisHandle, offsetHandle, littleEndianHandle.GetTaggedValue(), type); + return GetViewValue(thread, thisHandle, offsetHandle, littleEndianHandle, type); } JSTaggedValue BuiltinsDataView::SetTypedValue(EcmaRuntimeCallInfo *argv, DataViewType type) @@ -476,10 +495,11 @@ JSTaggedValue BuiltinsDataView::SetTypedValue(EcmaRuntimeCallInfo *argv, DataVie JSHandle thisHandle = GetThis(argv); JSHandle offsetHandle = GetCallArg(argv, 0); JSHandle value = GetCallArg(argv, 1); + JSHandle trueHandle(thread, JSTaggedValue::True()); if (type == DataViewType::UINT8 || type == DataViewType::INT8) { - return SetViewValue(thread, thisHandle, offsetHandle, JSTaggedValue::True(), type, value); + return SetViewValue(thread, thisHandle, offsetHandle, trueHandle, type, value); } JSHandle littleEndianHandle = GetCallArg(argv, BuiltinsBase::ArgsPosition::THIRD); - return SetViewValue(thread, thisHandle, offsetHandle, littleEndianHandle.GetTaggedValue(), type, value); + return SetViewValue(thread, thisHandle, offsetHandle, littleEndianHandle, type, value); } } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_dataview.h b/ecmascript/builtins/builtins_dataview.h index 4337c2b0bf3bb21805b4f8377da6044a324041b8..f57d8609c2c989e4212890ff5bd47bca6c431d0d 100644 --- a/ecmascript/builtins/builtins_dataview.h +++ b/ecmascript/builtins/builtins_dataview.h @@ -19,6 +19,40 @@ #include "ecmascript/base/builtins_base.h" #include "ecmascript/js_dataview.h" +// List of functions in DataView, excluding the constructor and '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsDataView::func refers to the native implementation of DataView.prototype[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +#define BUILTIN_DATA_VIEW_PROTOTYPE_FUNCTIONS(V) \ + /* For %Type% of 1 byte: */ \ + /* DataView.prototype.get%Type% ( byteOffset ) */ \ + /* For %Type% of 2 or more bytes: */ \ + /* DataView.prototype.get%Type% ( byteOffset [ , littleEndian ] ) */ \ + V("getFloat32", GetFloat32, 1, INVALID) \ + V("getFloat64", GetFloat64, 1, INVALID) \ + V("getInt8", GetInt8, 1, INVALID) \ + V("getInt16", GetInt16, 1, INVALID) \ + V("getInt32", GetInt32, 1, INVALID) \ + V("getBigInt64", GetBigInt64, 1, INVALID) \ + V("getUint16", GetUint16, 1, INVALID) \ + V("getUint32", GetUint32, 1, INVALID) \ + V("getUint8", GetUint8, 1, INVALID) \ + V("getBigUint64", GetBigUint64, 1, INVALID) \ + /* For %Type% of 1 bytes: */ \ + /* DataView.prototype.setInt8 ( byteOffset, value ) */ \ + /* For %Type% of 2 or more bytes: */ \ + /* DataView.prototype.setInt16 ( byteOffset, value [ , littleEndian ] ) */ \ + V("setFloat32", SetFloat32, 2, INVALID) \ + V("setFloat64", SetFloat64, 2, INVALID) \ + V("setInt8", SetInt8, 2, INVALID) \ + V("setInt16", SetInt16, 2, INVALID) \ + V("setInt32", SetInt32, 2, INVALID) \ + V("setBigInt64", SetBigInt64, 2, INVALID) \ + V("setUint8", SetUint8, 2, INVALID) \ + V("setUint16", SetUint16, 2, INVALID) \ + V("setUint32", SetUint32, 2, INVALID) \ + V("setBigUint64", SetBigUint64, 2, INVALID) + namespace panda::ecmascript::builtins { using DataViewType = ecmascript::DataViewType; class BuiltinsDataView : public base::BuiltinsBase { @@ -72,13 +106,41 @@ public: // 25.3.4.16 DataView.prototype.setBigUint64 ( byteOffset, value [ , littleEndian ] ) static JSTaggedValue SetBigUint64(EcmaRuntimeCallInfo *argv); + // Excluding the '@@' internal properties. + static Span GetDataViewPrototypeFunctions() + { + return Span(DATA_VIEW_PROTOTYPE_FUNCTIONS); + } + + static size_t GetNumPrototypeInlinedProperties() + { + // 5 : 5 more inline properties in DataView.prototype: + // (1) DataView.prototype.constructor + // (2) DataView.prototype [ @@toStringTag ] + // (3) get buffer + // (4) get byteLength + // (5) get byteOffset + return GetDataViewPrototypeFunctions().Size() + 5; + } + private: +#define BUILTIN_DATA_VIEW_FUNCTION_ENTRY(name, func, length, id) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsDataView::func, length, kungfu::BuiltinsStubCSigns::id), + + static constexpr std::array DATA_VIEW_PROTOTYPE_FUNCTIONS = { + BUILTIN_DATA_VIEW_PROTOTYPE_FUNCTIONS(BUILTIN_DATA_VIEW_FUNCTION_ENTRY) + }; + +#undef BUILTIN_DATA_VIEW_FUNCTION_ENTRY + // 24.2.1.1 GetViewValue ( view, requestIndex, isLittleEndian, type ) static JSTaggedValue GetViewValue(JSThread *thread, const JSHandle &view, - const JSHandle &requestIndex, JSTaggedValue littleEndian, + const JSHandle &requestIndex, + const JSHandle &littleEndian, DataViewType type); static JSTaggedValue SetViewValue(JSThread *thread, const JSHandle &view, - const JSHandle &requestIndex, JSTaggedValue littleEndian, + const JSHandle &requestIndex, + const JSHandle &littleEndian, DataViewType type, const JSHandle &value); static JSTaggedValue GetTypedValue(EcmaRuntimeCallInfo *argv, DataViewType type); diff --git a/ecmascript/builtins/builtins_date.h b/ecmascript/builtins/builtins_date.h index 7d83a3a211d6f59270d662923f0aa74cab1284ba..4a2da82e4032fbe99940cdcaa7257c6e8958f93c 100644 --- a/ecmascript/builtins/builtins_date.h +++ b/ecmascript/builtins/builtins_date.h @@ -19,9 +19,114 @@ #include "ecmascript/base/builtins_base.h" #include "ecmascript/js_date.h" +// List of functions in Date, excluding the '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsDate::func refers to the native implementation of Date[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +#define BUILTIN_DATE_FUNCTIONS(V) \ + /* Date.now ( ) */ \ + V("now", Now, 0, INVALID) \ + /* Date.parse ( string ) */ \ + V("parse", Parse, 1, INVALID) \ + /* Date.UTC ( year [ , month [ , date [ , hours [ , minutes [ , seconds [ , ms ] ] ] ] ] ] ) */ \ + V("UTC", UTC, ::panda::ecmascript::builtins::BuiltinsDate::UTC_LENGTH, INVALID) + +// List of functions in Date.prototype, excluding the constructor and '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsDate::func refers to the native implementation of Date.prototype[name]. +#define BUILTIN_DATE_PROTOTYPE_FUNCTIONS(V) \ + /* Date.prototype.getDate ( ) */ \ + V("getDate", GetDate, 0, INVALID) \ + /* Date.prototype.getDay ( ) */ \ + V("getDay", GetDay, 0, INVALID) \ + /* Date.prototype.getFullYear ( ) */ \ + V("getFullYear", GetFullYear, 0, INVALID) \ + /* Date.prototype.getHours ( ) */ \ + V("getHours", GetHours, 0, INVALID) \ + /* Date.prototype.getMilliseconds ( ) */ \ + V("getMilliseconds", GetMilliseconds, 0, INVALID) \ + /* Date.prototype.getMinutes ( ) */ \ + V("getMinutes", GetMinutes, 0, INVALID) \ + /* Date.prototype.getMonth ( ) */ \ + V("getMonth", GetMonth, 0, INVALID) \ + /* Date.prototype.getSeconds ( ) */ \ + V("getSeconds", GetSeconds, 0, INVALID) \ + /* Date.prototype.getTime ( ) */ \ + V("getTime", GetTime, 0, INVALID) \ + /* Date.prototype.getTimezoneOffset ( ) */ \ + V("getTimezoneOffset", GetTimezoneOffset, 0, INVALID) \ + /* Date.prototype.getUTCDate ( ) */ \ + V("getUTCDate", GetUTCDate, 0, INVALID) \ + /* Date.prototype.getUTCDay ( ) */ \ + V("getUTCDay", GetUTCDay, 0, INVALID) \ + /* Date.prototype.getUTCFullYear ( ) */ \ + V("getUTCFullYear", GetUTCFullYear, 0, INVALID) \ + /* Date.prototype.getUTCHours ( ) */ \ + V("getUTCHours", GetUTCHours, 0, INVALID) \ + /* Date.prototype.getUTCMilliseconds ( ) */ \ + V("getUTCMilliseconds", GetUTCMilliseconds, 0, INVALID) \ + /* Date.prototype.getUTCMinutes ( ) */ \ + V("getUTCMinutes", GetUTCMinutes, 0, INVALID) \ + /* Date.prototype.getUTCMonth ( ) */ \ + V("getUTCMonth", GetUTCMonth, 0, INVALID) \ + /* Date.prototype.getUTCSeconds ( ) */ \ + V("getUTCSeconds", GetUTCSeconds, 0, INVALID) \ + /* Date.prototype.setDate ( date ) */ \ + V("setDate", SetDate, 1, INVALID) \ + /* Date.prototype.setFullYear ( year [ , month [ , date ] ] ) */ \ + V("setFullYear", SetFullYear, 3, INVALID) \ + /* Date.prototype.setHours ( hour [ , min [ , sec [ , ms ] ] ] ) */ \ + V("setHours", SetHours, 4, INVALID) \ + /* Date.prototype.setMilliseconds ( ms ) */ \ + V("setMilliseconds", SetMilliseconds, 1, INVALID) \ + /* Date.prototype.setMinutes ( min [ , sec [ , ms ] ] ) */ \ + V("setMinutes", SetMinutes, 3, INVALID) \ + /* Date.prototype.setMonth ( month [ , date ] ) */ \ + V("setMonth", SetMonth, 2, INVALID) \ + /* Date.prototype.setSeconds ( sec [ , ms ] ) */ \ + V("setSeconds", SetSeconds, 2, INVALID) \ + /* Date.prototype.setTime ( time ) */ \ + V("setTime", SetTime, 1, INVALID) \ + /* Date.prototype.setUTCDate ( date ) */ \ + V("setUTCDate", SetUTCDate, 1, INVALID) \ + /* Date.prototype.setUTCFullYear ( year [ , month [ , date ] ] ) */ \ + V("setUTCFullYear", SetUTCFullYear, 3, INVALID) \ + /* Date.prototype.setUTCHours ( hour [ , min [ , sec [ , ms ] ] ] ) */ \ + V("setUTCHours", SetUTCHours, 4, INVALID) \ + /* Date.prototype.setUTCMilliseconds ( ms ) */ \ + V("setUTCMilliseconds", SetUTCMilliseconds, 1, INVALID) \ + /* Date.prototype.setUTCMinutes ( min [ , sec [ , ms ] ] ) */ \ + V("setUTCMinutes", SetUTCMinutes, 3, INVALID) \ + /* Date.prototype.setUTCMonth ( month [ , date ] ) */ \ + V("setUTCMonth", SetUTCMonth, 2, INVALID) \ + /* Date.prototype.setUTCSeconds ( sec [ , ms ] ) */ \ + V("setUTCSeconds", SetUTCSeconds, 2, INVALID) \ + /* Date.prototype.toDateString ( ) */ \ + V("toDateString", ToDateString, 0, INVALID) \ + /* Date.prototype.toISOString ( ) */ \ + V("toISOString", ToISOString, 0, INVALID) \ + /* Date.prototype.toJSON ( key ) */ \ + V("toJSON", ToJSON, 1, INVALID) \ + /* Date.prototype.toLocaleDateString ( [ reserved1 [ , reserved2 ] ] ) */ \ + V("toLocaleDateString", ToLocaleDateString, 0, INVALID) \ + /* Date.prototype.toLocaleString ( [ reserved1 [ , reserved2 ] ] ) */ \ + V("toLocaleString", ToLocaleString, 0, INVALID) \ + /* Date.prototype.toLocaleTimeString ( [ reserved1 [ , reserved2 ] ] ) */ \ + V("toLocaleTimeString", ToLocaleTimeString, 0, INVALID) \ + /* Date.prototype.toString ( ) */ \ + V("toString", ToString, 0, INVALID) \ + /* Date.prototype.toTimeString ( ) */ \ + V("toTimeString", ToTimeString, 0, INVALID) \ + /* Date.prototype.toUTCString ( ) */ \ + V("toUTCString", ToUTCString, 0, INVALID) \ + /* Date.prototype.valueOf ( ) */ \ + V("valueOf", ValueOf, 0, INVALID) + namespace panda::ecmascript::builtins { class BuiltinsDate : public base::BuiltinsBase { public: + static constexpr int UTC_LENGTH = 7; + // 20.4.2 The Date Constructor static JSTaggedValue DateConstructor(EcmaRuntimeCallInfo *argv); @@ -165,7 +270,38 @@ public: // 20.4.4.45 Date.prototype [ @@toPrimitive ] static JSTaggedValue ToPrimitive(EcmaRuntimeCallInfo *argv); + // Excluding the '@@' internal properties + static Span GetDateFunctions() + { + return Span(DATE_FUNCTIONS); + } + + // Excluding the constructor and '@@' internal properties. + static Span GetDatePrototypeFunctions() + { + return Span(DATE_PROTOTYPE_FUNCTIONS); + } + + static size_t GetNumPrototypeInlinedProperties() + { + // 2 : 2 more inline properties in Date.prototype: + // (1) Date.prototype.constructor + // (2) Date.prototype [ @@toPrimitive ] + return GetDatePrototypeFunctions().Size() + 2; + } + private: +#define BUILTIN_DATE_FUNCTION_ENTRY(name, func, length, builtinId) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsDate::func, length, kungfu::BuiltinsStubCSigns::builtinId), + + static constexpr std::array DATE_FUNCTIONS = { + BUILTIN_DATE_FUNCTIONS(BUILTIN_DATE_FUNCTION_ENTRY) + }; + static constexpr std::array DATE_PROTOTYPE_FUNCTIONS = { + BUILTIN_DATE_PROTOTYPE_FUNCTIONS(BUILTIN_DATE_FUNCTION_ENTRY) + }; +#undef BUILTIN_DATE_FUNCTION_ENTRY + // definition for set data code. static constexpr uint32_t CODE_SET_DATE = 0x32; static constexpr uint32_t CODE_SET_MILLISECONDS = 0x76; diff --git a/ecmascript/builtins/builtins_displaynames.cpp b/ecmascript/builtins/builtins_displaynames.cpp index 3cac9430bcc6520550635cc8f3428cf1447dbda4..02c853165e8814fafc4f5aa95b184d379e652bda 100644 --- a/ecmascript/builtins/builtins_displaynames.cpp +++ b/ecmascript/builtins/builtins_displaynames.cpp @@ -102,6 +102,7 @@ JSTaggedValue BuiltinsDisplayNames::Of(EcmaRuntimeCallInfo *argv) JSHandle displayNames = JSHandle::Cast(thisValue); TypednsOption typeOpt = displayNames->GetType(); JSHandle code = JSDisplayNames::CanonicalCodeForDisplayNames(thread, displayNames, typeOpt, codeTemp); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); std::string codeString = intl::LocaleHelper::ConvertToStdString(code); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (codeString.size()) { diff --git a/ecmascript/builtins/builtins_errors.cpp b/ecmascript/builtins/builtins_errors.cpp index 8af25081c31d7043bb8e9ff72cd39d595e6fd0eb..351eedf8c30c3cc0ba83be364a2980469a88cbb9 100644 --- a/ecmascript/builtins/builtins_errors.cpp +++ b/ecmascript/builtins/builtins_errors.cpp @@ -159,6 +159,22 @@ JSTaggedValue BuiltinsAggregateError::AggregateErrorConstructor(EcmaRuntimeCallI PropertyDescriptor msgDesc(thread, JSHandle::Cast(handleStr), true, false, true); JSTaggedValue::DefinePropertyOrThrow(thread, taggedObj, msgKey, msgDesc); } + // InstallErrorCause + JSHandle options = BuiltinsBase::GetCallArg(argv, 2); // 2 : Third parameter + // If options is an Object and ? HasProperty(options, "cause") is true, then + // a. Let cause be ? Get(options, "cause"). + // b. Perform CreateNonEnumerableDataPropertyOrThrow(O, "cause", cause). + if (options->IsECMAObject()) { + JSHandle causeKey = globalConst->GetHandledCauseString(); + bool causePresent = JSTaggedValue::HasProperty(thread, options, causeKey); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (causePresent) { + JSHandle cause = JSObject::GetProperty(thread, options, causeKey).GetValue(); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + PropertyDescriptor causeDesc(thread, cause, true, false, true); + JSTaggedValue::DefinePropertyOrThrow(thread, taggedObj, causeKey, causeDesc); + } + } // 4. Let errorsList be ? IterableToList(errors). JSHandle errorsList = JSObject::IterableToList(thread, errors); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -168,6 +184,7 @@ JSTaggedValue BuiltinsAggregateError::AggregateErrorConstructor(EcmaRuntimeCallI JSHandle errorsValues(JSArray::CreateArrayFromList(thread, errorsArray)); PropertyDescriptor msgDesc(thread, errorsValues, true, false, true); JSTaggedValue::DefinePropertyOrThrow(thread, taggedObj, errorsKey, msgDesc); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 6. Return O. return taggedObj.GetTaggedValue(); } @@ -190,4 +207,17 @@ JSTaggedValue BuiltinsOOMError::ToString(EcmaRuntimeCallInfo *argv) BUILTINS_API_TRACE(argv->GetThread(), Error, OOMErrorToString); return ErrorHelper::ErrorCommonToString(argv, ErrorType::OOM_ERROR); } + +// TerminationError +JSTaggedValue BuiltinsTerminationError::TerminationErrorConstructor(EcmaRuntimeCallInfo *argv) +{ + BUILTINS_API_TRACE(argv->GetThread(), Error, TerminationErrorConstructor); + return ErrorHelper::ErrorCommonConstructor(argv, ErrorType::TERMINATION_ERROR); +} + +JSTaggedValue BuiltinsTerminationError::ToString(EcmaRuntimeCallInfo *argv) +{ + BUILTINS_API_TRACE(argv->GetThread(), Error, TerminationErrorToString); + return ErrorHelper::ErrorCommonToString(argv, ErrorType::TERMINATION_ERROR); +} } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_errors.h b/ecmascript/builtins/builtins_errors.h index 7c75c771c70571f76c84fe96cb076cfe62fc1d9b..c50ada7dc9c1a83d58219aa78f8abca636ef4279 100644 --- a/ecmascript/builtins/builtins_errors.h +++ b/ecmascript/builtins/builtins_errors.h @@ -92,5 +92,12 @@ public: static JSTaggedValue ToString(EcmaRuntimeCallInfo *argv); }; + +class BuiltinsTerminationError : public base::BuiltinsBase { +public: + static JSTaggedValue TerminationErrorConstructor(EcmaRuntimeCallInfo *argv); + + static JSTaggedValue ToString(EcmaRuntimeCallInfo *argv); +}; } // namespace panda::ecmascript::builtins #endif // ECMASCRIPT_BUILTINS_BUILTINS_ERRORS_H diff --git a/ecmascript/builtins/builtins_finalization_registry.cpp b/ecmascript/builtins/builtins_finalization_registry.cpp index 60c8cd9acd5af1d7bacbb42f5dcef8109e58a114..ed87c399477a9e2e2a307563b16f0d612424ac46 100644 --- a/ecmascript/builtins/builtins_finalization_registry.cpp +++ b/ecmascript/builtins/builtins_finalization_registry.cpp @@ -75,19 +75,19 @@ JSTaggedValue BuiltinsFinalizationRegistry::Register(EcmaRuntimeCallInfo *argv) THROW_TYPE_ERROR_AND_RETURN(thread, "thisValue is not object or does not have an internalSlot internal slot", JSTaggedValue::Exception()); } - // 3. If Type(target) is not Object, throw a TypeError exception. - if (!target->IsECMAObject()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "target is not object", JSTaggedValue::Exception()); + // 3. If CanBeHeldWeakly(target) is false, throw a TypeError exception. + if (!JSTaggedValue::CanBeHeldWeakly(thread, target)) { + THROW_TYPE_ERROR_AND_RETURN(thread, "target invalid", JSTaggedValue::Exception()); } // 4. If SameValue(target, heldValue) is true, throw a TypeError exception. if (JSTaggedValue::SameValue(target, heldValue)) { THROW_TYPE_ERROR_AND_RETURN(thread, "target and heldValue should not be equal", JSTaggedValue::Exception()); } - // 5. If Type(unregisterToken) is not Object, then + // 5. If CanBeHeldWeakly(unregisterToken) is false, then // a. If unregisterToken is not undefined, throw a TypeError exception. // b. Set unregisterToken to empty. - if (!unregisterToken->IsECMAObject() && !unregisterToken->IsUndefined()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "unregisterToken should be object", JSTaggedValue::Exception()); + if (!JSTaggedValue::CanBeHeldWeakly(thread, unregisterToken) && !unregisterToken->IsUndefined()) { + THROW_TYPE_ERROR_AND_RETURN(thread, "unregisterToken invalid", JSTaggedValue::Exception()); } // 6. Let cell be the Record { [[WeakRefTarget]]: target, // [[HeldValue]]: heldValue, [[UnregisterToken]]: unregisterToken }. @@ -112,9 +112,9 @@ JSTaggedValue BuiltinsFinalizationRegistry::Unregister(EcmaRuntimeCallInfo *argv THROW_TYPE_ERROR_AND_RETURN(thread, "thisValue is not object or does not have an internalSlot internal slot", JSTaggedValue::Exception()); } - // 3. If Type(unregisterToken) is not Object, throw a TypeError exception. - if (!unregisterToken->IsECMAObject()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "unregisterToken should be object", JSTaggedValue::Exception()); + // 3. If CanBeHeldWeakly(unregisterToken) is false, throw a TypeError exception. + if (!JSTaggedValue::CanBeHeldWeakly(thread, unregisterToken)) { + THROW_TYPE_ERROR_AND_RETURN(thread, "unregisterToken invalid", JSTaggedValue::Exception()); } // 4. Let removed be false. // 5. For each Record { [[WeakRefTarget]], [[HeldValue]], [[UnregisterToken]] } cell of diff --git a/ecmascript/builtins/builtins_function.cpp b/ecmascript/builtins/builtins_function.cpp index becda3bf9182733078023a2e202d5ab3c8d74555..1a9e6c7815d5b09083260238caf71bc1adc0c529 100644 --- a/ecmascript/builtins/builtins_function.cpp +++ b/ecmascript/builtins/builtins_function.cpp @@ -121,18 +121,19 @@ JSTaggedValue BuiltinsFunction::FunctionPrototypeApply(EcmaRuntimeCallInfo *argv JSHandle arrayObj = GetCallArg(argv, 1); std::pair argumentsList = BuildArgumentsListFast(thread, arrayObj); if (!argumentsList.first) { - JSHandle argList = JSHandle::Cast( - JSObject::CreateListFromArrayLike(thread, arrayObj)); + JSHandle num = JSObject::CreateListFromArrayLike(thread, arrayObj); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle argList = JSHandle::Cast(num); // 4. ReturnIfAbrupt(argList). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - const int32_t argsLength = static_cast(argList->GetLength()); + const uint32_t argsLength = argList->GetLength(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, func, thisArg, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(argsLength, argList); return JSFunction::Call(info); } // 6. Return Call(func, thisArg, argList). - const int32_t argsLength = static_cast(argumentsList.second); + const uint32_t argsLength = static_cast(argumentsList.second); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, func, thisArg, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(argsLength, argumentsList.first); @@ -206,6 +207,7 @@ JSTaggedValue BuiltinsFunction::FunctionPrototypeBind(EcmaRuntimeCallInfo *argv) PropertyDescriptor desc(thread, JSHandle(thread, JSTaggedValue(lengthValue)), false, false, true); [[maybe_unused]] bool status = JSTaggedValue::DefinePropertyOrThrow(thread, JSHandle(boundFunction), lengthKey, desc); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 11. Assert: status is not an abrupt completion. ASSERT_PRINT(status, "DefinePropertyOrThrow failed"); @@ -269,8 +271,16 @@ JSTaggedValue BuiltinsFunction::FunctionPrototypeToString(EcmaRuntimeCallInfo *a [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisValue = GetThis(argv); if (thisValue->IsJSObject() && thisValue->IsCallable()) { - JSHandle func = JSHandle::Cast(thisValue); - JSHandle method(thread, func->GetMethod()); + JSHandle method; + if (thisValue->IsBoundFunction()) { + JSHandle func = JSHandle::Cast(thisValue); + JSHandle methodHandle(thread, func->GetMethod()); + method = JSHandle::Cast(methodHandle); + } else { + JSHandle func = JSHandle::Cast(thisValue); + JSHandle methodHandle(thread, func->GetMethod()); + method = JSHandle::Cast(methodHandle); + } if (method->IsNativeWithCallField()) { JSHandle nameKey = thread->GlobalConstants()->GetHandledNameString(); JSHandle methodName(JSObject::GetProperty(thread, thisValue, nameKey).GetValue()); diff --git a/ecmascript/builtins/builtins_generator.cpp b/ecmascript/builtins/builtins_generator.cpp index 30850bb61e9b04529e74b09924bd40b20985c8f6..4d2e1a152293c65d81e7c9101a65b717f6803450 100644 --- a/ecmascript/builtins/builtins_generator.cpp +++ b/ecmascript/builtins/builtins_generator.cpp @@ -40,6 +40,7 @@ JSTaggedValue BuiltinsGenerator::GeneratorPrototypeNext(EcmaRuntimeCallInfo *arg THROW_TYPE_ERROR_AND_RETURN(thread, "Not a generator object.", JSTaggedValue::Exception()); } JSHandle generator(thread, JSGeneratorObject::Cast(*JSTaggedValue::ToObject(thread, msg))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle value = GetCallArg(argv, 0); // 2.Return ? GeneratorResume(g, value). @@ -60,7 +61,7 @@ JSTaggedValue BuiltinsGenerator::GeneratorPrototypeReturn(EcmaRuntimeCallInfo *a THROW_TYPE_ERROR_AND_RETURN(thread, "Not a generator object.", JSTaggedValue::Exception()); } JSHandle generator(thread, JSGeneratorObject::Cast(*JSTaggedValue::ToObject(thread, msg))); - + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 2.Let C be Completion { [[Type]]: return, [[Value]]: value, [[Target]]: empty }. JSHandle value = GetCallArg(argv, 0); ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); @@ -85,7 +86,7 @@ JSTaggedValue BuiltinsGenerator::GeneratorPrototypeThrow(EcmaRuntimeCallInfo *ar THROW_TYPE_ERROR_AND_RETURN(thread, "Not a generator object.", JSTaggedValue::Exception()); } JSHandle generator(thread, JSGeneratorObject::Cast(*JSTaggedValue::ToObject(thread, msg))); - + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 2.Let C be ThrowCompletion(exception). JSHandle exception = GetCallArg(argv, 0); ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); diff --git a/ecmascript/builtins/builtins_global.cpp b/ecmascript/builtins/builtins_global.cpp index 2534ef4c4d0fb4e91bb234dcf4a79c8bb6abf817..918631474f6af660f3b2baf893b4b0520c3ed3f2 100644 --- a/ecmascript/builtins/builtins_global.cpp +++ b/ecmascript/builtins/builtins_global.cpp @@ -25,12 +25,18 @@ #include "ecmascript/ecma_macros.h" #include "ecmascript/js_function.h" #include "ecmascript/mem/c_containers.h" +#include "ecmascript/module/js_module_deregister.h" #include "ecmascript/stubs/runtime_stubs.h" #include "ecmascript/tagged_array-inl.h" namespace panda::ecmascript::builtins { using NumberHelper = base::NumberHelper; using StringHelper = base::StringHelper; +std::u16string g_asciiWordChars(u"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_"); +std::u16string g_escapeWordChars(u"@*+-./"); +constexpr std::uint16_t CHAR16_PERCENT_SIGN = 0x0025; // u'%' +constexpr std::uint16_t CHAR16_LATIN_SMALL_LETTER_U = 0x0075; // u'u'; +constexpr std::uint16_t CHAR16_LETTER_NULL = u'\0'; // 18.2.1 JSTaggedValue BuiltinsGlobal::NotSupportEval(EcmaRuntimeCallInfo *msg) @@ -289,6 +295,126 @@ uint8_t BuiltinsGlobal::GetValueFromTwoHex(uint16_t front, uint16_t behind) return res; } +uint16_t BuiltinsGlobal::GetValueFromHexString(const JSHandle &string) +{ + uint32_t size = EcmaStringAccessor(string).GetLength(); + ASSERT(size > 0 && size <= 4); // NOLINT 4: means 4 hex digits + std::u16string hexString(u"0123456789ABCDEF"); + + uint16_t ret = 0; + for (uint32_t i = 0; i < size; ++i) { + uint16_t ch = EcmaStringAccessor(string).Get(i); + size_t idx = StringHelper::FindFromU16ToUpper(hexString, &ch); + ret = ((ret << 4U) | idx) & BIT_MASK_4F; // NOLINT 4: means shift left by 4 + } + return ret; +} + +// 22.1.3.17.2 StringPad ( S, maxLength, fillString, placement ) +EcmaString *BuiltinsGlobal::StringPad(JSThread *thread, const JSHandle &source, + uint32_t maxLength, const JSHandle &fillString, + Placement placement) +{ + // 1. Let stringLength be the length of S. + uint32_t stringLength = EcmaStringAccessor(source).GetLength(); + // 2. If maxLength ≤ stringLength, return S. + if (maxLength <= stringLength) { + return *source; + } + // 3. If fillString is the empty String, return S. + uint32_t targetStrLen = EcmaStringAccessor(fillString).GetLength(); + if (targetStrLen == 0) { + return *source; + } + // 4. Let fillLen be maxLength - stringLength. + uint32_t fillLen = maxLength - stringLength; + EcmaVM *vm = thread->GetEcmaVM(); + //5. Let truncatedStringFiller be the String value consisting of repeated concatenations + // of fillString truncated to length fillLen. + uint32_t repeatTimes = std::ceil(fillLen / targetStrLen); + EcmaString *p = nullptr; + JSHandle stringFiller = vm->GetFactory()->NewFromStdString(std::string("\0")); + for (uint32_t k = 0; k < repeatTimes; ++k) { + p = EcmaStringAccessor::Concat(vm, stringFiller, fillString); + stringFiller = JSHandle(thread, p); + } + JSHandle truncatedStringFiller(thread, + EcmaStringAccessor::FastSubString(vm, stringFiller, 0, fillLen)); + // 6. If placement is start, return the string-concatenation of truncatedStringFiller and S. + // 7. Else, return the string-concatenation of S and truncatedStringFiller. + if (placement == Placement::START) { + return EcmaStringAccessor::Concat(vm, truncatedStringFiller, source); + } else { + return EcmaStringAccessor::Concat(vm, source, truncatedStringFiller); + } +} + +// Static Semantics: UTF16SurrogatePairToCodePoint ( lead, trail ) +uint16_t BuiltinsGlobal::UTF16SurrogatePairToCodePoint(uint16_t lead, uint16_t trail) +{ + // 1. Assert: lead is a leading surrogate and trail is a trailing surrogate. + ASSERT(IsUTF16HighSurrogate(lead) && IsUTF16LowSurrogate(trail)); + // 2. Let cp be (lead - 0xD800) × 0x400 + (trail - 0xDC00) + 0x10000. + uint16_t cp = ((lead - 0xD800) << 10UL) + (trail - 0xDC00) + 0x10000; + // 3. Return the code point cp. + return cp; +} + +// 11.1.5 Static Semantics: StringToCodePoints ( string ) +EcmaString *BuiltinsGlobal::StringToCodePoints(JSThread *thread, const JSHandle &string) +{ + // 1. Let codePoints be a new empty List. + std::u16string codePoints; + // 2. Let size be the length of string. + uint32_t size = EcmaStringAccessor(string).GetLength(); + // 3. Let position be 0. + uint32_t position = 0; + // 4. Repeat, while position < size, + // a. Let cp be CodePointAt(string, position). + // b. Append cp.[[CodePoint]] to codePoints. + // c. Set position to position + cp.[[CodeUnitCount]]. + while (position < size) { + // i.Let first be the code unit at index position within string. + uint16_t first = EcmaStringAccessor(string).Get(position); + uint16_t cp = first - CHAR16_LETTER_NULL; + uint8_t codeUnitCount = 0; + bool isUnpairedSurrogate = false; + // ii. If first is neither a leading surrogate nor a trailing surrogate, then + // a. Return the Record { [[CodePoint]]: cp, [[CodeUnitCount]]: 1, [[IsUnpairedSurrogate]]: false }. + if (!IsUTF16HighSurrogate(first) && !IsUTF16LowSurrogate(first)) { + codeUnitCount = 1; // 1 means: code unit count + isUnpairedSurrogate = false; + } else if (IsUTF16HighSurrogate(first) || position + 1 == size) { + // iii. If first is a trailing surrogate or position + 1 = size, then + // a. Return the Record { [[CodePoint]]: cp, [[CodeUnitCount]]: 1, [[IsUnpairedSurrogate]]: true }. + codeUnitCount = 1; + isUnpairedSurrogate = true; + } else { + // iv. Let second be the code unit at index position + 1 within string. + uint16_t second = EcmaStringAccessor(string).Get(position + 1); + // v. If second is not a trailing surrogate, then + // a. Return the Record { [[CodePoint]]: cp, [[CodeUnitCount]]: 1, [[IsUnpairedSurrogate]]: true }. + if (!IsUTF16LowSurrogate(second)) { + codeUnitCount = 1; // 1 means: code unit count + isUnpairedSurrogate = true; + } else { + // vi. Set cp to UTF16SurrogatePairToCodePoint(first, second). + // vii. Return the Record { [[CodePoint]]: cp, [[CodeUnitCount]]: 2, [[IsUnpairedSurrogate]]: false }. + cp = UTF16SurrogatePairToCodePoint(first, second); + codeUnitCount = 2; // 2 means: code unit count + isUnpairedSurrogate = false; + } + } + codePoints.push_back(cp); + position = position + codeUnitCount; + } + // 5. Return codePoints. + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + uint16_t *ptr = reinterpret_cast(codePoints.data()); + JSHandle codePointsString = factory->NewFromUtf16Literal(ptr, codePoints.size()); + return *codePointsString; +} + // Runtime Semantics JSTaggedValue BuiltinsGlobal::Decode(JSThread *thread, const JSHandle &str, judgURIFunc IsInURISet) { @@ -501,6 +627,26 @@ JSTaggedValue BuiltinsGlobal::PrintEntrypoint(EcmaRuntimeCallInfo *msg) return JSTaggedValue::Undefined(); } +JSTaggedValue BuiltinsGlobal::MarkModuleCollectable(EcmaRuntimeCallInfo *msg) +{ + ASSERT(msg); + JSThread *thread = msg->GetThread(); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + uint32_t numArgs = msg->GetArgsNumber(); + if (numArgs != 1) { + LOG_FULL(ERROR) << "The number of parameters received by markModuleCollectable is incorrect."; + return JSTaggedValue::False(); + } + JSHandle module = GetCallArg(msg, 0); + if (!module->IsModuleNamespace()) { + return JSTaggedValue::False(); + } + + ModuleDeregister::ProcessModuleReference(thread, module); + return JSTaggedValue::True(); +} + JSTaggedValue BuiltinsGlobal::CallJsBoundFunction(EcmaRuntimeCallInfo *msg) { JSThread *thread = msg->GetThread(); @@ -574,4 +720,150 @@ JSTaggedValue BuiltinsGlobal::PrintFunctionCallStat(EcmaRuntimeCallInfo *msg) return JSTaggedValue::Undefined(); } #endif + +// B.2.1.1 escape ( string ) +JSTaggedValue BuiltinsGlobal::Escape(EcmaRuntimeCallInfo *msg) +{ + ASSERT(msg); + JSThread *thread = msg->GetThread(); + BUILTINS_API_TRACE(thread, Global, Escape); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + // 1. Set string to ? ToString(string). + JSHandle string = JSTaggedValue::ToString(thread, GetCallArg(msg, 0)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + EcmaVM *vm = thread->GetEcmaVM(); + // 2. Let len be the length of string. + uint32_t len = EcmaStringAccessor(string).GetLength(); + // 3. Let R be the empty String. + std::u16string r; + // 4. Let unescapedSet be the string-concatenation of the ASCII word characters and "@*+-./". + std::u16string unescapedSet = g_asciiWordChars + g_escapeWordChars; + // 5. Let k be 0. + uint32_t k = 0; + // 6. Repeat, while k < len, + // a. Let C be the code unit at index k within string. + // b. If unescapedSet contains C, then + // i. Let S be C. + // c. Else, + // i. Let n be the numeric value of C. + // ii. If n < 256, then + // 1. Let hex be the String representation of n, formatted as an uppercase hexadecimal number. + // 2. Let S be the string-concatenation of "%" and StringPad(hex, 2, "0", start). + // iii. Else, + // 1. Let hex be the String representation of n, formatted as an uppercase hexadecimal number. + // 2. Let S be the string-concatenation of "%u" and StringPad(hex, 4, "0", start). + // d. Set R to the string-concatenation of R and S. + // e. Set k to k + 1. + while (k < len) { + uint16_t c = EcmaStringAccessor(string).Get(k); + if (unescapedSet.find(c) != std::u16string::npos) { + r.push_back(c); + } else { + uint16_t n = c - CHAR16_LETTER_NULL; + std::ostringstream oss; + oss << std::uppercase << std::hex << n; + JSHandle hex = factory->NewFromStdString(oss.str()); + JSHandle fillString = factory->NewFromStdString(std::string("0")); + EcmaString *temp = nullptr; + JSHandle hexStringHandle = factory->NewFromStdString(std::string("\0")); + if (n <= std::numeric_limits::max()) { + EcmaString *hexEcmaString = + StringPad(thread, hex, 2, fillString, Placement::START); // NOLINT 2: means max string length + hexStringHandle = JSHandle(thread, hexEcmaString); + temp = EcmaStringAccessor::Concat(vm, factory->NewFromStdString("%"), hexStringHandle); + } else { + EcmaString *hexEcmaString = + StringPad(thread, hex, 4, fillString, Placement::START); // NOLINT 4: means max string length + hexStringHandle = JSHandle(thread, hexEcmaString); + temp = EcmaStringAccessor::Concat(vm, factory->NewFromStdString("%u"), hexStringHandle); + } + JSHandle s = JSHandle(thread, temp); + r = r + EcmaStringAccessor(s).ToU16String(); + } + ++k; + } + // 7. Return R. + auto *returnData = reinterpret_cast(r.data()); + uint32_t retSize = r.size(); + return factory->NewFromUtf16Literal(returnData, retSize).GetTaggedValue(); +} + +// B.2.1.2 unescape ( string ) +JSTaggedValue BuiltinsGlobal::Unescape(EcmaRuntimeCallInfo *msg) +{ + ASSERT(msg); + JSThread *thread = msg->GetThread(); + BUILTINS_API_TRACE(thread, Global, Unescape); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + // 1. Set string to ? ToString(string). + JSHandle string = JSTaggedValue::ToString(thread, GetCallArg(msg, 0)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // 2. Let len be the length of string. + uint32_t len = EcmaStringAccessor(string).GetLength(); + // 3. Let R be the empty String. + EcmaVM *vm = thread->GetEcmaVM(); + ObjectFactory *factory = vm->GetFactory(); + std::u16string r; + // 4. Let k be 0. + uint32_t k = 0; + // 5. Repeat, while k < len, + // a. Let C be the code unit at index k within string. + // b. If C is the code unit 0x0025 (PERCENT SIGN), then + // i. Let hexDigits be the empty String. + // ii. Let optionalAdvance be 0. + // iii. If k + 5 < len and the code unit at index k + 1 within string is the code unit + // 0x0075 (LATIN SMALL LETTER U), then + // 1. Set hexDigits to the substring of string from k + 2 to k + 6. + // 2. Set optionalAdvance to 5. + // iv. Else if k + 3 ≤ len, then + // 1. Set hexDigits to the substring of string from k + 1 to k + 3. + // 2. Set optionalAdvance to 2. + // v. Let parseResult be ParseText(StringToCodePoints(hexDigits), HexDigits[~Sep]). + // vi. If parseResult is a Parse Node, then + // 1. Let n be the MV of parseResult. + // 2. Set C to the code unit whose numeric value is n. + // 3. Set k to k + optionalAdvance. + // c. Set R to the string-concatenation of R and C. + // d. Set k to k + 1. + while (k < len) { + uint16_t c = EcmaStringAccessor(string).Get(k); + JSHandle hexDigitsString; + if (c == CHAR16_PERCENT_SIGN) { + EcmaString *hexDigits = nullptr; + uint16_t optionalAdvance = 0; + if (k + 5 < len && // NOLINT 5: means offset by 5 + EcmaStringAccessor(string).Get(k + 1) == CHAR16_LATIN_SMALL_LETTER_U) { // NOLINT 1: means offset by 1 + hexDigits = EcmaStringAccessor(string).FastSubString(vm, string, + k + 2, 4); // NOLINT 2: means offset 4: means len + optionalAdvance = optionalAdvance + 5; // NOLINT 5: means plus 5 + } else if (k + 3 <= len) { // NOLINT 3: means offset + hexDigits = EcmaStringAccessor(string).FastSubString(vm, string, k + 1, 2); // NOLINT 2:means len + optionalAdvance = optionalAdvance + 2; // NOLINT 2: means plus 2 + } + if (hexDigits != nullptr) { + hexDigitsString = JSHandle(thread, hexDigits); + EcmaString *codePoints = StringToCodePoints(thread, hexDigitsString); + JSHandle codePointString = JSHandle(thread, codePoints); + bool isHex = true; + for (uint32_t i = 0; i < EcmaStringAccessor(codePointString).GetLength(); ++i) { + if (!IsHexDigits(EcmaStringAccessor(codePointString).Get(i))) { + isHex = false; + } + } + if (isHex) { + uint16_t n = GetValueFromHexString(codePointString); + c = n; + k = k + optionalAdvance; + } + } + } + r.push_back(c); + ++k; + } + // 7. Return R. + auto *returnData = reinterpret_cast(r.data()); + uint32_t retSize = r.size(); + return factory->NewFromUtf16Literal(returnData, retSize).GetTaggedValue(); +} } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_global.h b/ecmascript/builtins/builtins_global.h index 433219a931ed4c9deaf27e2adbd8600225cd55f1..39a8a0474d4181ce1df9b139738cc6c88dafb657 100644 --- a/ecmascript/builtins/builtins_global.h +++ b/ecmascript/builtins/builtins_global.h @@ -19,16 +19,93 @@ #include "ecmascript/base/builtins_base.h" #include "ecmascript/js_thread.h" +#define BUILTIN_GLOBAL_CONSTANTS(V) \ + V("Infinity", INFINITY_VALUE) \ + V("NaN", NAN_VALUE) \ + V("undefined", UNDEFINED_VALUE) + +// List of functions in the global object. +// V(name, func, length, stubIndex) +// where BuiltinsGlobal::func refers to the native implementation of globalThis[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +// The following global object properties are not implemented yet: +// - Encode ( string, extraUnescaped ) +// - Decode ( string, preserveEscapeSet ) +// - ParseHexOctet ( string, position ) +// The following global object properties are not listed here: +// - parseFloat ( string ), listed in builtins_number.h instead. +// - parseInt ( string ), listed in builtins_number.h instead. +#define BUILTIN_GLOBAL_FUNCTIONS_COMMON(V) \ + /* decodeURI ( encodedURI ) */ \ + V("decodeURI", DecodeURI, 1, INVALID) \ + /* decodeURIComponent ( encodedURIComponent ) */ \ + V("decodeURIComponent", DecodeURIComponent, 1, INVALID) \ + /* encodeURI ( uri ) */ \ + V("encodeURI", EncodeURI, 1, INVALID) \ + /* encodeURIComponent ( uriComponent ) */ \ + V("encodeURIComponent", EncodeURIComponent, 1, INVALID) \ + /* escape ( string ), defined in B.2.1 */ \ + V("escape", Escape, 1, INVALID) \ + /* eval ( x ), which is NOT supported in ArkTS engine */ \ + V("eval", NotSupportEval, 1, INVALID) \ + /* isFinite ( number ) */ \ + V("isFinite", IsFinite, 1, INVALID) \ + /* isNaN ( number ) */ \ + V("isNaN", IsNaN, 1, INVALID) \ + /* unescape ( string )*/ \ + V("unescape", Unescape, 1, INVALID) \ + /* The following are ArkTS extensions */ \ + V("markModuleCollectable", MarkModuleCollectable, 0, INVALID) \ + V("print", PrintEntrypoint, 0, INVALID) + +#if ECMASCRIPT_ENABLE_RUNTIME_STAT +#define BUILTIN_GLOBAL_FUNCTIONS_RUNTIME_STAT(V) \ + V("startRuntimeStat", StartRuntimeStat, 0, INVALID) \ + V("stopRuntimeStat", StopRuntimeStat, 0, INVALID) +#else +#define BUILTIN_GLOBAL_FUNCTIONS_RUNTIME_STAT(V) // Nothing +#endif + +#if ECMASCRIPT_ENABLE_OPT_CODE_PROFILER +#define BUILTIN_GLOBAL_FUNCTIONS_OPT_CODE_PROFILER(V) \ + V("printOptStat", PrintOptStat, 0, INVALID) +#else +#define BUILTIN_GLOBAL_FUNCTIONS_OPT_CODE_PROFILER(V) // Nothing +#endif + +#if ECMASCRIPT_ENABLE_FUNCTION_CALL_TIMER +#define BUILTIN_GLOBAL_FUNCTIONS_FUNCTION_CALL_TIMER(V) \ + V("printFunctionCallStat", PrintFunctionCallStat, 0, INVALID) +#else +#define BUILTIN_GLOBAL_FUNCTIONS_FUNCTION_CALL_TIMER(V) // Nothing +#endif + +#define BUILTIN_GLOBAL_FUNCTIONS(V) \ + BUILTIN_GLOBAL_FUNCTIONS_COMMON(V) \ + BUILTIN_GLOBAL_FUNCTIONS_RUNTIME_STAT(V) \ + BUILTIN_GLOBAL_FUNCTIONS_OPT_CODE_PROFILER(V) \ + BUILTIN_GLOBAL_FUNCTIONS_FUNCTION_CALL_TIMER(V) + namespace panda::ecmascript::builtins { static constexpr uint8_t BIT_MASK = 0x0F; static constexpr uint8_t BIT_MASK_FF = 0xFF; +static constexpr uint16_t BIT_MASK_4F = 0xFFFF; static constexpr uint16_t BIT16_MASK = 0x3FF; static constexpr uint8_t BIT_MASK_ONE = 0x80; static constexpr uint8_t BIT_MASK_TWO = 0xC0; using judgURIFunc = bool (*)(uint16_t); +enum class Placement { + START = 0, + END, +}; + class BuiltinsGlobal : public base::BuiltinsBase { public: + static const inline JSTaggedValue INFINITY_VALUE = JSTaggedValue(base::POSITIVE_INFINITY); + static const inline JSTaggedValue NAN_VALUE = JSTaggedValue(base::NAN_VALUE); + static const inline JSTaggedValue UNDEFINED_VALUE = JSTaggedValue::Undefined(); + // 18.2.1 static JSTaggedValue NotSupportEval(EcmaRuntimeCallInfo *msg); // 18.2.2 @@ -42,6 +119,7 @@ public: static JSTaggedValue EncodeURIComponent(EcmaRuntimeCallInfo *msg); static JSTaggedValue PrintEntrypoint(EcmaRuntimeCallInfo *msg); + static JSTaggedValue MarkModuleCollectable(EcmaRuntimeCallInfo *msg); static JSTaggedValue CallJsBoundFunction(EcmaRuntimeCallInfo *msg); static JSTaggedValue CallJsProxy(EcmaRuntimeCallInfo *msg); #if ECMASCRIPT_ENABLE_RUNTIME_STAT @@ -56,8 +134,36 @@ public: #if ECMASCRIPT_ENABLE_FUNCTION_CALL_TIMER static JSTaggedValue PrintFunctionCallStat(EcmaRuntimeCallInfo *msg); #endif + // B.2.1.1 escape ( string ) + static JSTaggedValue Escape(EcmaRuntimeCallInfo *msg); + // B.2.1.2 unescape ( string ) + static JSTaggedValue Unescape(EcmaRuntimeCallInfo *msg); + + static Span GetGlobalConstants() + { + return Span(GLOBAL_CONSTANTS); + } + + static Span GetGlobalFunctions() + { + return Span(GLOBAL_FUNCTIONS); + } private: +#define BUILTIN_GLOBAL_CONSTANT_ENTRY(name, var) \ + base::BuiltinConstantEntry::Create(name, BuiltinsGlobal::var), +#define BUILTIN_GLOBAL_FUNCTION_ENTRY(name, func, length, id) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsGlobal::func, length, kungfu::BuiltinsStubCSigns::id), + + static inline std::array GLOBAL_CONSTANTS = { + BUILTIN_GLOBAL_CONSTANTS(BUILTIN_GLOBAL_CONSTANT_ENTRY) + }; + static constexpr std::array GLOBAL_FUNCTIONS = { + BUILTIN_GLOBAL_FUNCTIONS(BUILTIN_GLOBAL_FUNCTION_ENTRY) + }; +#undef BUILTIN_GLOBAL_CONSTANT_ENTRY +#undef BUILTIN_GLOBAL_FUNCTION_ENTRY + static void PrintString(JSThread *thread, EcmaString *string); static void PrintValue(int64_t value, int64_t tag); static JSTaggedValue Encode(JSThread *thread, const JSHandle &str, judgURIFunc IsInURISet); @@ -69,6 +175,27 @@ private: static bool IsInMarkURISet(uint16_t ch); static bool IsHexDigits(uint16_t ch); static uint8_t GetValueFromTwoHex(uint16_t front, uint16_t behind); + static uint16_t GetValueFromHexString(const JSHandle &string); + // 22.1.3.17.2 StringPad ( S, maxLength, fillString, placement ) + static EcmaString *StringPad(JSThread *thread, + const JSHandle &string, + uint32_t maxLength, + const JSHandle &fillString, + Placement placement = Placement::START); + static bool IsUTF16HighSurrogate(uint16_t ch) + { + return base::utf_helper::DECODE_LEAD_LOW <= ch && ch <= base::utf_helper::DECODE_LEAD_HIGH; + } + + static bool IsUTF16LowSurrogate(uint16_t ch) + { + return base::utf_helper::DECODE_TRAIL_LOW <= ch && ch <= base::utf_helper::DECODE_TRAIL_HIGH; + } + + // 11.1.3 Static Semantics: UTF16SurrogatePairToCodePoint ( lead, trail ) + static uint16_t UTF16SurrogatePairToCodePoint(uint16_t lead, uint16_t trail); + // 11.1.5 Static Semantics: StringToCodePoints ( string ) + static EcmaString *StringToCodePoints(JSThread *thread, const JSHandle &string); }; } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_json.cpp b/ecmascript/builtins/builtins_json.cpp index 9f28884818f1b957e35899bdf1420bbb63e99a71..22989faeec41eb847fe6aa393957378d893bcb74 100644 --- a/ecmascript/builtins/builtins_json.cpp +++ b/ecmascript/builtins/builtins_json.cpp @@ -15,6 +15,7 @@ #include "ecmascript/builtins/builtins_json.h" +#include "ecmascript/base/fast_json_stringifier.h" #include "ecmascript/base/json_parser.h" #include "ecmascript/base/json_stringifier.h" #include "ecmascript/base/number_helper.h" @@ -47,11 +48,11 @@ JSTaggedValue BuiltinsJson::Parse(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle result; if (EcmaStringAccessor(parseString).IsUtf8()) { - panda::ecmascript::base::JsonParser parser(thread); - result = parser.ParseUtf8(*parseString); + panda::ecmascript::base::Utf8JsonParser parser(thread); + result = parser.Parse(*parseString); } else { - panda::ecmascript::base::JsonParser parser(thread); - result = parser.ParseUtf16(*parseString); + panda::ecmascript::base::Utf16JsonParser parser(thread); + result = parser.Parse(*parseString); } RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -86,6 +87,12 @@ JSTaggedValue BuiltinsJson::Stringify(EcmaRuntimeCallInfo *argv) uint32_t argc = argv->GetArgsNumber(); JSTaggedValue value = GetCallArg(argv, 0).GetTaggedValue(); + if (argc == 1 && thread->GetCurrentEcmaContext()->IsAotEntry()) { + JSHandle handleValue(thread, value); + panda::ecmascript::base::FastJsonStringifier stringifier(thread); + JSHandle result = stringifier.Stringify(handleValue); + return result.GetTaggedValue(); + } JSTaggedValue replacer = JSTaggedValue::Undefined(); JSTaggedValue gap = JSTaggedValue::Undefined(); diff --git a/ecmascript/builtins/builtins_lazy_callback.cpp b/ecmascript/builtins/builtins_lazy_callback.cpp index e6a543b628a287d7258dae7a3a50cff00641e4e5..3b89bcb8f3d45ffab96dbf9ef211cb73ae0926be 100644 --- a/ecmascript/builtins/builtins_lazy_callback.cpp +++ b/ecmascript/builtins/builtins_lazy_callback.cpp @@ -29,9 +29,10 @@ JSTaggedValue BuiltinsLazyCallback::Date(JSThread *thread, const JSHandleGetFactory(); auto env = vm->GetGlobalEnv(); ResetLazyInternalAttr(thread, obj, "Date"); - JSHandle objFuncClass(env->GetObjectFunctionClass()); + + JSHandle objFuncPrototypeVal = env->GetObjectFunctionPrototype(); Builtins builtin(thread, factory, vm); - builtin.InitializeDate(env, objFuncClass); + builtin.InitializeDate(env, objFuncPrototypeVal); return env->GetDateFunction().GetTaggedValue(); } @@ -43,9 +44,10 @@ JSTaggedValue BuiltinsLazyCallback::Set(JSThread *thread, const JSHandleGetFactory(); auto env = vm->GetGlobalEnv(); ResetLazyInternalAttr(thread, obj, "Set"); + Builtins builtin(thread, factory, vm); - JSHandle objFuncClass(env->GetObjectFunctionClass()); - builtin.InitializeSet(env, objFuncClass); + JSHandle objFuncPrototypeVal = env->GetObjectFunctionPrototype(); + builtin.InitializeSet(env, objFuncPrototypeVal); return env->GetBuiltinsSetFunction().GetTaggedValue(); } @@ -57,9 +59,10 @@ JSTaggedValue BuiltinsLazyCallback::Map(JSThread *thread, const JSHandleGetFactory(); auto env = vm->GetGlobalEnv(); ResetLazyInternalAttr(thread, obj, "Map"); + Builtins builtin(thread, factory, vm); - JSHandle objFuncClass(env->GetObjectFunctionClass()); - builtin.InitializeMap(env, objFuncClass); + JSHandle objFuncPrototypeVal = env->GetObjectFunctionPrototype(); + builtin.InitializeMap(env, objFuncPrototypeVal); return env->GetBuiltinsMapFunction().GetTaggedValue(); } @@ -133,9 +136,10 @@ JSTaggedValue BuiltinsLazyCallback::TypedArray(JSThread *thread, const JSHandle< ITERATE_TYPED_ARRAY(RESET_TYPED_ARRAY_INTERNAL_ATTR) #undef RESET_TYPED_ARRAY_INTERNAL_ATTR + Builtins builtin(thread, factory, vm); - JSHandle objFuncClass(env->GetObjectFunctionClass()); - builtin.InitializeTypedArray(env, objFuncClass); + JSHandle objFuncPrototypeVal = env->GetObjectFunctionPrototype(); + builtin.InitializeTypedArray(env, objFuncPrototypeVal); return env->GetTypedArrayFunction().GetTaggedValue(); } @@ -175,9 +179,10 @@ JSTaggedValue BuiltinsLazyCallback::DataView(JSThread *thread, const JSHandleGetFactory(); auto env = vm->GetGlobalEnv(); ResetLazyInternalAttr(thread, obj, "DataView"); + Builtins builtin(thread, factory, vm); - JSHandle objFuncClass(env->GetObjectFunctionClass()); - builtin.InitializeDataView(env, objFuncClass); + JSHandle objFuncPrototypeVal = env->GetObjectFunctionPrototype(); + builtin.InitializeDataView(env, objFuncPrototypeVal); return env->GetDataViewFunction().GetTaggedValue(); } diff --git a/ecmascript/builtins/builtins_list_format.cpp b/ecmascript/builtins/builtins_list_format.cpp index 781aacfaf8fbcb660fb089090e15cad33816e820..b9608aae523db04b4c6b7d8d5dd5b13b27a6e620 100644 --- a/ecmascript/builtins/builtins_list_format.cpp +++ b/ecmascript/builtins/builtins_list_format.cpp @@ -12,7 +12,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - + #include "ecmascript/builtins/builtins_list_format.h" #include "ecmascript/intl/locale_helper.h" @@ -130,6 +130,7 @@ JSTaggedValue BuiltinsListFormat::FormatToParts(EcmaRuntimeCallInfo *argv) JSHandle array = JSHandle::Cast(listArray); JSHandle result = JSListFormat::FormatListToParts(thread, listFormat, array); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return result.GetTaggedValue(); } diff --git a/ecmascript/builtins/builtins_locale.cpp b/ecmascript/builtins/builtins_locale.cpp index 7ef01c85bb87fe266ba951d35d828b9a30c9b9b8..ff0d044feaedcf12f5aa43276abc7171fe66e588 100644 --- a/ecmascript/builtins/builtins_locale.cpp +++ b/ecmascript/builtins/builtins_locale.cpp @@ -19,7 +19,7 @@ #include "ecmascript/ecma_vm.h" #include "ecmascript/global_env.h" #include "ecmascript/js_locale.h" -#include "ecmascript/object_factory.h" +#include "ecmascript/object_factory-inl.h" namespace panda::ecmascript::builtins { // 10.1.3 Intl.Locale( tag [, options] ) @@ -56,9 +56,11 @@ JSTaggedValue BuiltinsLocale::LocaleConstructor(EcmaRuntimeCallInfo *argv) JSHandle localeString = factory->GetEmptyString(); if (!tag->IsJSLocale()) { localeString = JSTaggedValue::ToString(thread, tag); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } else { icu::Locale *icuLocale = (JSHandle::Cast(tag))->GetIcuLocale(); localeString = intl::LocaleHelper::ToLanguageTag(thread, *icuLocale); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } // 10. If options is undefined, then // a.Let options be ! ObjectCreate(null). diff --git a/ecmascript/builtins/builtins_map.cpp b/ecmascript/builtins/builtins_map.cpp index 47b1cf9f1007a0c56320a3d933962d3814a49491..7700f093ee3bfb59692724a3d96572fdf9362d80 100644 --- a/ecmascript/builtins/builtins_map.cpp +++ b/ecmascript/builtins/builtins_map.cpp @@ -133,7 +133,7 @@ JSTaggedValue BuiltinsMap::Has(EcmaRuntimeCallInfo *argv) if (!self->IsJSMap()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSMap", JSTaggedValue::Exception()); } - JSMap *jsMap = JSMap::Cast(*JSTaggedValue::ToObject(thread, self)); + JSMap *jsMap = JSMap::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle key = GetCallArg(argv, 0); bool flag = jsMap->Has(key.GetTaggedValue()); return GetTaggedBoolean(flag); @@ -150,7 +150,7 @@ JSTaggedValue BuiltinsMap::Get(EcmaRuntimeCallInfo *argv) if (!self->IsJSMap()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSMap", JSTaggedValue::Exception()); } - JSMap *jsMap = JSMap::Cast(*JSTaggedValue::ToObject(thread, self)); + JSMap *jsMap = JSMap::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle key = GetCallArg(argv, 0); JSTaggedValue value = jsMap->Get(key.GetTaggedValue()); return value; @@ -178,7 +178,7 @@ JSTaggedValue BuiltinsMap::ForEach(EcmaRuntimeCallInfo *argv) JSHandle thisArg = GetCallArg(argv, 1); JSMutableHandle hashMap(thread, map->GetLinkedMap()); - const int32_t argsLength = 3; + const uint32_t argsLength = 3; int index = 0; int totalElements = hashMap->NumberOfElements() + hashMap->NumberOfDeletedElements(); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); @@ -227,8 +227,8 @@ JSTaggedValue BuiltinsMap::GetSize(EcmaRuntimeCallInfo *argv) if (!self->IsJSMap()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSMap", JSTaggedValue::Exception()); } - JSMap *jsMap = JSMap::Cast(*JSTaggedValue::ToObject(thread, self)); - int count = jsMap->GetSize(); + JSMap *jsMap = JSMap::Cast(self.GetTaggedValue().GetTaggedObject()); + uint32_t count = jsMap->GetSize(); return JSTaggedValue(count); } @@ -239,6 +239,7 @@ JSTaggedValue BuiltinsMap::Entries(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle self = GetThis(argv); JSHandle iter = JSMapIterator::CreateMapIterator(thread, self, IterationKind::KEY_AND_VALUE); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return iter.GetTaggedValue(); } @@ -249,6 +250,7 @@ JSTaggedValue BuiltinsMap::Keys(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle self = GetThis(argv); JSHandle iter = JSMapIterator::CreateMapIterator(thread, self, IterationKind::KEY); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return iter.GetTaggedValue(); } @@ -259,6 +261,7 @@ JSTaggedValue BuiltinsMap::Values(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle self = GetThis(argv); JSHandle iter = JSMapIterator::CreateMapIterator(thread, self, IterationKind::VALUE); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return iter.GetTaggedValue(); } @@ -308,7 +311,7 @@ JSTaggedValue BuiltinsMap::AddEntriesFromIterable(JSThread *thread, const JSHand if (thread->HasPendingException()) { return JSIterator::IteratorCloseAndReturn(thread, iter); } - const int32_t argsLength = 2; // 2: key and value pair + const uint32_t argsLength = 2; // 2: key and value pair JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, adder, JSHandle(target), undefined, argsLength); diff --git a/ecmascript/builtins/builtins_map.h b/ecmascript/builtins/builtins_map.h index 60346f2ec5eb2eaf859c0b4e5fd0e51aad940e29..86bfcb8b94b41d411d7afb3a04a746bc6a048abb 100644 --- a/ecmascript/builtins/builtins_map.h +++ b/ecmascript/builtins/builtins_map.h @@ -19,6 +19,30 @@ #include "ecmascript/base/builtins_base.h" #include "ecmascript/ecma_runtime_call_info.h" +// List of functions in Map.prototype, excluding the constructor and '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsMap::func refers to the native implementation of Map.prototype[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +#define BUILTIN_MAP_PROTOTYPE_FUNCTIONS(V) \ + /* Map.prototype.clear ( ) */ \ + V("clear", Clear, 0, MapClear) \ + /* Map.prototype.delete ( key ) */ \ + V("delete", Delete, 1, MapDelete) \ + /* Map.prototype.entries ( ) */ \ + V("entries", Entries, 0, MapEntries) \ + /* Map.prototype.forEach ( callbackfn [ , thisArg ] ) */ \ + V("forEach", ForEach, 1, MapForEach) \ + /* Map.prototype.get ( key ) */ \ + V("get", Get, 1, INVALID) \ + /* Map.prototype.has ( key ) */ \ + V("has", Has, 1, MapHas) \ + /* Map.prototype.keys ( ) */ \ + V("keys", Keys, 0, MapKeys) \ + /* Map.prototype.set ( key, value ) */ \ + V("set", Set, 2, MapSet) \ + /* Map.prototype.values ( ) */ \ + V("values", Values, 0, MapValues) + namespace panda::ecmascript::builtins { class BuiltinsMap : public base::BuiltinsBase { public: @@ -51,6 +75,32 @@ public: static JSTaggedValue AddEntriesFromIterable(JSThread *thread, const JSHandle &target, const JSHandle &iterable, const JSHandle &adder, ObjectFactory *factory); + + // Excluding the constructor and '@@' internal properties. + static Span GetMapPrototypeFunctions() + { + return Span(MAP_PROTOTYPE_FUNCTIONS); + } + + static size_t GetNumPrototypeInlinedProperties() + { + // 4 : 4 more inline properties in Map.prototype + // (1) Map.prototype.constructor + // (2) Map.prototype [ @@toStringTag ] + // (3) Map.prototype [ @@iterator ] ( ) + // (4) get Map.prototype.size + return GetMapPrototypeFunctions().Size() + 4; + } + +private: +#define BUILTIN_MAP_FUNCTION_ENTRY(name, func, length, id) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsMap::func, length, kungfu::BuiltinsStubCSigns::id), + + static constexpr std::array MAP_PROTOTYPE_FUNCTIONS = { + BUILTIN_MAP_PROTOTYPE_FUNCTIONS(BUILTIN_MAP_FUNCTION_ENTRY) + }; + +#undef BUILTIN_MAP_FUNCTION_ENTRY }; } // namespace panda::ecmascript::builtins #endif // ECMASCRIPT_BUILTINS_BUILTINS_MAP_H diff --git a/ecmascript/builtins/builtins_math.cpp b/ecmascript/builtins/builtins_math.cpp index 2b16ef09429aebfb17429588cd39278cc9711113..5aef97cd63d3be5d12e6f99654a802f3ff3e4328 100644 --- a/ecmascript/builtins/builtins_math.cpp +++ b/ecmascript/builtins/builtins_math.cpp @@ -34,6 +34,7 @@ JSTaggedValue BuiltinsMath::Abs(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (numberValue.IsDouble()) { // if number_value is double,NaN,Undefine, deal in this case // if number_value is a String ,which can change to double. e.g."100",deal in this case @@ -52,6 +53,7 @@ JSTaggedValue BuiltinsMath::Acos(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // value == -NaN , <-1 or > 1,result is NaN @@ -70,6 +72,7 @@ JSTaggedValue BuiltinsMath::Acosh(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; if (value >= 1) { @@ -87,6 +90,7 @@ JSTaggedValue BuiltinsMath::Asin(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; if (value >= -1 && value <= 1) { @@ -104,6 +108,7 @@ JSTaggedValue BuiltinsMath::Asinh(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // value == -NaN, NaN, result is NaN @@ -122,6 +127,7 @@ JSTaggedValue BuiltinsMath::Atan(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // value == -NaN, NaN, result is NaN @@ -140,6 +146,7 @@ JSTaggedValue BuiltinsMath::Atanh(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; if (value >= -1 && value <= 1) { @@ -159,7 +166,9 @@ JSTaggedValue BuiltinsMath::Atan2(EcmaRuntimeCallInfo *argv) JSHandle msgX = GetCallArg(argv, 1); double result = base::NAN_VALUE; JSTaggedNumber numberValueY = JSTaggedValue::ToNumber(thread, msgY); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSTaggedNumber numberValueX = JSTaggedValue::ToNumber(thread, msgX); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double valueY = numberValueY.GetNumber(); double valueX = numberValueX.GetNumber(); // y = +0 and x > +0, return +0 @@ -186,6 +195,7 @@ JSTaggedValue BuiltinsMath::Cbrt(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // if value == -NaN, NaN, result is NaN @@ -204,6 +214,7 @@ JSTaggedValue BuiltinsMath::Ceil(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN or -NaN, +infinite, -infinite,return value @@ -228,6 +239,7 @@ JSTaggedValue BuiltinsMath::Clz32(EcmaRuntimeCallInfo *argv) constexpr int defaultValue = 32; JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); auto tmpValue = std::abs(value); auto result = numberValue.ToUint32(); @@ -247,6 +259,7 @@ JSTaggedValue BuiltinsMath::Cos(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN or -NaN, +infinite, -infinite, result is NaN @@ -265,6 +278,7 @@ JSTaggedValue BuiltinsMath::Cosh(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // if value is NaN or -NaN, result is NaN @@ -283,6 +297,7 @@ JSTaggedValue BuiltinsMath::Exp(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // if value is NaN or -NaN, result is NaN @@ -301,6 +316,7 @@ JSTaggedValue BuiltinsMath::Expm1(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // if value is NaN or -NaN, result is NaN @@ -319,6 +335,7 @@ JSTaggedValue BuiltinsMath::Floor(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN or -NaN, +infinite, -infinite, +0, -0, return value @@ -345,6 +362,7 @@ JSTaggedValue BuiltinsMath::Fround(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result; if (std::isnan(std::abs(value))) { @@ -370,6 +388,7 @@ JSTaggedValue BuiltinsMath::Hypot(EcmaRuntimeCallInfo *argv) for (uint32_t i = 0; i < argLen; i++) { JSHandle msg = GetCallArg(argv, i); numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); value = numberValue.GetNumber(); result = std::hypot(result, value); } @@ -386,7 +405,9 @@ JSTaggedValue BuiltinsMath::Imul(EcmaRuntimeCallInfo *argv) JSHandle msg1 = GetCallArg(argv, 0); JSHandle msg2 = GetCallArg(argv, 1); JSTaggedNumber numberValue1 = JSTaggedValue::ToNumber(thread, msg1); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSTaggedNumber numberValue2 = JSTaggedValue::ToNumber(thread, msg2); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); auto value1 = numberValue1.GetNumber(); auto value2 = numberValue2.GetNumber(); if (!std::isfinite(value1) || !std::isfinite(value2)) { @@ -409,6 +430,7 @@ JSTaggedValue BuiltinsMath::Log(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN , -NaN , or < 0,result is NaN @@ -427,6 +449,7 @@ JSTaggedValue BuiltinsMath::Log1p(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN , -NaN , or < -1,result is NaN @@ -445,6 +468,7 @@ JSTaggedValue BuiltinsMath::Log10(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN , -NaN , or < 0,result is NaN @@ -463,6 +487,7 @@ JSTaggedValue BuiltinsMath::Log2(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN , -NaN , or < 0,result is NaN @@ -493,6 +518,7 @@ JSTaggedValue BuiltinsMath::Max(EcmaRuntimeCallInfo *argv) for (uint32_t i = 0; i < argLen; i++) { JSHandle msg = GetCallArg(argv, i); numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); value = numberValue.GetNumber(); if (std::isnan(std::abs(value))) { // If any value is NaN, or -NaN, the max result is NaN @@ -527,6 +553,7 @@ JSTaggedValue BuiltinsMath::Min(EcmaRuntimeCallInfo *argv) for (uint32_t i = 0; i < argLen; i++) { JSHandle msg = GetCallArg(argv, i); numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); value = numberValue.GetNumber(); if (std::isnan(std::abs(value))) { // If any value is NaN or -NaN, the min result is NaN @@ -600,6 +627,7 @@ JSTaggedValue BuiltinsMath::Round(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); auto result = base::NAN_VALUE; const double diff = 0.5; @@ -637,6 +665,7 @@ JSTaggedValue BuiltinsMath::Sign(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); if (std::isnan(std::abs(value))) { return GetTaggedDouble(std::abs(value)); @@ -659,6 +688,7 @@ JSTaggedValue BuiltinsMath::Sin(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN or -NaN, the result is NaN @@ -677,6 +707,7 @@ JSTaggedValue BuiltinsMath::Sinh(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN or -NaN, the result is NaN @@ -695,6 +726,7 @@ JSTaggedValue BuiltinsMath::Sqrt(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is negative, include -NaN and -Infinity but not -0.0, the result is NaN @@ -717,6 +749,7 @@ JSTaggedValue BuiltinsMath::Tan(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; // If value is NaN or -NaN, +infinite, -infinite, result is NaN @@ -735,6 +768,7 @@ JSTaggedValue BuiltinsMath::Tanh(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; if (!std::isnan(std::abs(value))) { @@ -752,6 +786,7 @@ JSTaggedValue BuiltinsMath::Trunc(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle msg = GetCallArg(argv, 0); JSTaggedNumber numberValue = JSTaggedValue::ToNumber(thread, msg); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); double value = numberValue.GetNumber(); double result = base::NAN_VALUE; if (!std::isfinite(value)) { diff --git a/ecmascript/builtins/builtins_math.h b/ecmascript/builtins/builtins_math.h index 293d6761cfc5d1e3363c4ef83460fda1f2f14b43..c80d1c47dba577662506a9ed3bffadc778a5401a 100644 --- a/ecmascript/builtins/builtins_math.h +++ b/ecmascript/builtins/builtins_math.h @@ -18,6 +18,58 @@ #include "ecmascript/base/builtins_base.h" +// List of constants in Math, excluding '@@' internal properties. +#define BUILTIN_MATH_CONSTANTS(V) \ + V(E) \ + V(LN10) \ + V(LN2) \ + V(LOG10E) \ + V(LOG2E) \ + V(PI) \ + V(SQRT1_2) \ + V(SQRT2) + +// List of functions in Math. +// V(name, func, length, stubIndex) +// where BuiltinsMath::func refers to the native implementation of Math[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +#define BUILTIN_MATH_FUNCTIONS(V) \ + V("abs", Abs, 1, ABS) /* Math.abs ( x ) */ \ + V("acos", Acos, 1, ACOS) /* Math.acos ( x ) */ \ + V("acosh", Acosh, 1, INVALID) /* Math.acosh ( x ) */ \ + V("asin", Asin, 1, INVALID) /* Math.asin ( x ) */ \ + V("asinh", Asinh, 1, INVALID) /* Math.asinh ( x ) */ \ + V("atan", Atan, 1, ATAN) /* Math.atan ( x ) */ \ + V("atan2", Atan2, 2, INVALID) /* Math.atan2 ( y, x ) */ \ + V("atanh", Atanh, 1, INVALID) /* Math.atanh ( x ) */ \ + V("cbrt", Cbrt, 1, INVALID) /* Math.cbrt ( x ) */ \ + V("ceil", Ceil, 1, INVALID) /* Math.ceil ( x ) */ \ + V("clz32", Clz32, 1, INVALID) /* Math.clz32 ( x ) */ \ + V("cos", Cos, 1, COS) /* Math.cos ( x ) */ \ + V("cosh", Cosh, 1, INVALID) /* Math.cosh ( x ) */ \ + V("exp", Exp, 1, INVALID) /* Math.exp ( x ) */ \ + V("expm1", Expm1, 1, INVALID) /* Math.expm1 ( x ) */ \ + V("floor", Floor, 1, FLOOR) /* Math.floor ( x ) */ \ + V("fround", Fround, 1, INVALID) /* Math.fround ( x ) */ \ + V("hypot", Hypot, 2, INVALID) /* Math.hypot ( ...args ) */ \ + V("imul", Imul, 2, INVALID) /* Math.imul ( x, y ) */ \ + V("log", Log, 1, INVALID) /* Math.log ( x ) */ \ + V("log10", Log10, 1, INVALID) /* Math.log10 ( x ) */ \ + V("log1p", Log1p, 1, INVALID) /* Math.log1p ( x ) */ \ + V("log2", Log2, 1, INVALID) /* Math.log2 ( x ) */ \ + V("max", Max, 2, INVALID) /* Math.max ( ...args ) */ \ + V("min", Min, 2, INVALID) /* Math.min ( ...args ) */ \ + V("pow", Pow, 2, INVALID) /* Math.pow ( base, exponent ) */ \ + V("random", Random, 0, INVALID) /* Math.random ( ) */ \ + V("round", Round, 1, INVALID) /* Math.round ( x ) */ \ + V("sign", Sign, 1, INVALID) /* Math.sign ( x ) */ \ + V("sin", Sin, 1, SIN) /* Math.sin ( x ) */ \ + V("sinh", Sinh, 1, INVALID) /* Math.sinh ( x ) */ \ + V("sqrt", Sqrt, 1, SQRT) /* Math.sqrt ( x ) */ \ + V("tan", Tan, 1, INVALID) /* Math.tan ( x ) */ \ + V("tanh", Tanh, 1, INVALID) /* Math.tanh ( x ) */ \ + V("trunc", Trunc, 1, INVALID) /* Math.trunc ( x ) */ + namespace panda::ecmascript::builtins { class BuiltinsMath : public base::BuiltinsBase { public: @@ -107,6 +159,34 @@ public: static JSTaggedValue Tanh(EcmaRuntimeCallInfo *argv); // 20.2.2.35 static JSTaggedValue Trunc(EcmaRuntimeCallInfo *argv); + + // Excluding the '@@' internal properties. + static Span GetMathConstants() + { + return Span(MATH_CONSTANTS); + } + + static Span GetMathFunctions() + { + return Span(MATH_FUNCTIONS); + } + +private: +#define BUILTIN_MATH_CONSTANT_ENTRY(name) \ + base::BuiltinConstantEntry::Create(#name, JSTaggedValue(BuiltinsMath::name)), + + static inline std::array MATH_CONSTANTS = { + BUILTIN_MATH_CONSTANTS(BUILTIN_MATH_CONSTANT_ENTRY) + }; +#undef BUILTIN_MATH_CONSTANT_ENTRY + +#define BUILTIN_MATH_FUNCTION_ENTRY(name, func, length, builtinId) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsMath::func, length, kungfu::BuiltinsStubCSigns::builtinId), + + static constexpr std::array MATH_FUNCTIONS = { + BUILTIN_MATH_FUNCTIONS(BUILTIN_MATH_FUNCTION_ENTRY) + }; +#undef BUILTIN_MATH_FUNCTION_ENTRY }; } // namespace panda::ecmascript::builtins #endif // ECMASCRIPT_BUILTINS_BUILTINS_MATH_H diff --git a/ecmascript/builtins/builtins_number.cpp b/ecmascript/builtins/builtins_number.cpp index bbd99fcfc828922061c92d07ea982bde7bef60cf..e876022964690ceb825fd418b4751d42b9b32336 100644 --- a/ecmascript/builtins/builtins_number.cpp +++ b/ecmascript/builtins/builtins_number.cpp @@ -44,21 +44,26 @@ JSTaggedValue BuiltinsNumber::NumberConstructor(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle newTarget = GetNewTarget(argv); + // 1. If value is present, then a , b , c. // 2. Else Let n be +0𝔽. JSTaggedNumber numberValue(0); if (argv->GetArgsNumber() > 0) { JSHandle value = GetCallArg(argv, 0); // a. Let prim be ? ToNumeric(value). - JSHandle numericVal = JSTaggedValue::ToNumeric(thread, value); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // b. If Type(prim) is BigInt, let n be 𝔽(ℝ(prim)). - if (numericVal->IsBigInt()) { - JSHandle bigNumericVal(numericVal); - numberValue = BigInt::BigIntToNumber(bigNumericVal); + if (!value->IsNumber()) { + JSHandle numericVal = JSTaggedValue::ToNumeric(thread, value); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // b. If Type(prim) is BigInt, let n be 𝔽(ℝ(prim)). + if (numericVal->IsBigInt()) { + JSHandle bigNumericVal(numericVal); + numberValue = BigInt::BigIntToNumber(bigNumericVal); + } else { + // c. Otherwise, let n be prim. + numberValue = JSTaggedNumber(numericVal.GetTaggedValue()); + } } else { - // c. Otherwise, let n be prim. - numberValue = JSTaggedNumber(numericVal.GetTaggedValue()); + numberValue = JSTaggedNumber(value.GetTaggedValue()); } } // 3. If NewTarget is undefined, return n. @@ -196,6 +201,12 @@ JSTaggedValue BuiltinsNumber::ParseInt(EcmaRuntimeCallInfo *argv) // 1. Let inputString be ToString(string). JSHandle numberString = JSTaggedValue::ToString(thread, msg); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if ((radix == base::DECIMAL || radix == 0)) { + int32_t elementIndex = 0; + if (EcmaStringAccessor(numberString).ToInt(&elementIndex)) { + return GetTaggedInt(elementIndex); + } + } CVector buf; Span str = EcmaStringAccessor(numberString).ToUtf8Span(buf); @@ -303,7 +314,7 @@ JSTaggedValue BuiltinsNumber::ToLocaleString(EcmaRuntimeCallInfo *argv) // 1. Let x be ? thisNumberValue(this value). [[maybe_unused]] JSHandle x(thread, ThisNumberValue(thread, argv)); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - + JSHandle locales = GetCallArg(argv, 0); JSHandle options = GetCallArg(argv, 1); [[maybe_unused]] bool cacheable = (locales->IsUndefined() || locales->IsString()) && options->IsUndefined(); @@ -431,7 +442,14 @@ JSTaggedValue BuiltinsNumber::ToString(EcmaRuntimeCallInfo *argv) } // 8. If radixNumber = 10, return ToString(x). if (radix == base::DECIMAL) { - return value.ToString(thread).GetTaggedValue(); + JSHandle cacheTable(thread->GetCurrentEcmaContext()->GetNumberToStringResultCache()); + JSTaggedValue cacheResult = cacheTable->FindCachedResult(value); + if (cacheResult != JSTaggedValue::Undefined()) { + return cacheResult; + } + JSHandle resultJSHandle = value.ToString(thread); + cacheTable->SetCachedResult(thread, value, resultJSHandle); + return resultJSHandle.GetTaggedValue(); } double valueNumber = value.GetNumber(); @@ -479,4 +497,32 @@ JSTaggedNumber BuiltinsNumber::ThisNumberValue(JSThread *thread, EcmaRuntimeCall [[maybe_unused]] EcmaHandleScope handleScope(thread); THROW_TYPE_ERROR_AND_RETURN(thread, "not number type", JSTaggedNumber::Exception()); } + +JSTaggedValue NumberToStringResultCache::CreateCacheTable(const JSThread *thread) +{ + int length = INITIAL_CACHE_NUMBER * ENTRY_SIZE; + auto table = static_cast( + *thread->GetEcmaVM()->GetFactory()->NewTaggedArray(length, JSTaggedValue::Undefined())); + return JSTaggedValue(table); +} + +JSTaggedValue NumberToStringResultCache::FindCachedResult(JSTaggedValue &number) +{ + int entry = NumberToStringResultCache::GetNumberHash(number); + uint32_t index = entry * ENTRY_SIZE; + JSTaggedValue entryNumber = Get(index + NUMBER_INDEX); + if (entryNumber == number) { + return Get(index + RESULT_INDEX); + } + return JSTaggedValue::Undefined(); +} + +void NumberToStringResultCache::SetCachedResult(const JSThread *thread, JSTaggedValue &number, + JSHandle &result) +{ + int entry = NumberToStringResultCache::GetNumberHash(number); + uint32_t index = static_cast(entry * ENTRY_SIZE); + Set(thread, index + NUMBER_INDEX, number); + Set(thread, index + RESULT_INDEX, result.GetTaggedValue()); +} } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_number.h b/ecmascript/builtins/builtins_number.h index fc770f84ed24a71306ebae7e64421655ab070fa0..b3bde0ed31193e28ed03604b3b606c75e44f1931 100644 --- a/ecmascript/builtins/builtins_number.h +++ b/ecmascript/builtins/builtins_number.h @@ -19,9 +19,75 @@ #include "ecmascript/base/builtins_base.h" #include "ecmascript/js_tagged_value.h" +// List of constants in Number, excluding '@@' internal properties. +#define BUILTIN_NUMBER_CONSTANTS(V) \ + V(EPSILON) /* Number.EPSILON */ \ + V(MAX_SAFE_INTEGER) /* Number.MAX_SAFE_INTEGER */ \ + V(MAX_VALUE) /* Number.MAX_VALUE */ \ + V(MIN_SAFE_INTEGER) /* Number.MIN_SAFE_INTEGER */ \ + V(MIN_VALUE) /* Number.MIN_VALUE */ \ + V(NEGATIVE_INFINITY) /* Number.NEGATIVE_INFINITY */ \ + V(NaN) /* Number.NaN */ \ + V(POSITIVE_INFINITY) /* Number.POSITIVE_INFINITY */ + +// List of functions in Number. +// V(name, func, length, stubIndex) +// where BuiltinsNumber::func refers to the native implementation of Number[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +#define BUILTIN_NUMBER_NON_GLOBAL_FUNCTIONS(V) \ + V("isFinite", IsFinite, 1, INVALID) /* Number.isFinite ( number ) */ \ + V("isInteger", IsInteger, 1, INVALID) /* Number.isInteger ( number ) */ \ + V("isNaN", IsNaN, 1, INVALID) /* Number.isNaN ( number ) */ \ + V("isSafeInteger", IsSafeInteger, 1, INVALID) /* Number.isSafeInteger ( number ) */ + +// List of functions in Number that can be accessed via globalThis. +// V(name, func, length, stubIndex) +// where BuiltinsNumber::func refers to the native implementation of Number[name]. +#define BUILTIN_NUMBER_GLOBAL_FUNCTIONS(V) \ + V("parseFloat", ParseFloat, 1, NumberParseFloat) /* Number.parseFloat ( string ) */ \ + V("parseInt", ParseInt, 2, INVALID) /* Number.parseInt ( string, radix ) */ + +#define BUILTIN_NUMBER_FUNCTIONS(V) \ + BUILTIN_NUMBER_NON_GLOBAL_FUNCTIONS(V) \ + BUILTIN_NUMBER_GLOBAL_FUNCTIONS(V) + +// List of functions in Number.prototype, excluding the constructor and '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsNumber::func refers to the native implementation of Number.prototype[name]. +#define BUILTIN_NUMBER_PROTOTYPE_FUNCTIONS(V) \ + /* Number.prototype.toExponential ( fractionDigits ) */ \ + V("toExponential", ToExponential, 1, INVALID) \ + /* Number.prototype.toFixed ( fractionDigits ) */ \ + V("toFixed", ToFixed, 1, INVALID) \ + /* Number.prototype.toLocaleString ( [ reserved1 [ , reserved2 ] ] ) */ \ + V("toLocaleString", ToLocaleString, 0, INVALID) \ + /* Number.prototype.toPrecision ( precision ) */ \ + V("toPrecision", ToPrecision, 1, INVALID) \ + /* Number.prototype.toString ( [ radix ] ) */ \ + V("toString", ToString, 1, INVALID) \ + /* Number.prototype.valueOf ( ) */ \ + V("valueOf", ValueOf, 0, INVALID) + namespace panda::ecmascript::builtins { class BuiltinsNumber : public base::BuiltinsBase { public: + // 21.1.2.1 Number.EPSILON + static constexpr double EPSILON = std::numeric_limits::epsilon(); + // 21.1.2.6 Number.MAX_SAFE_INTEGER (which is 2**53 - 1 = 9007199254740991) + static constexpr int64_t MAX_SAFE_INTEGER = (1LL << 53) - 1; + // 21.1.2.8 Number.MIN_SAFE_INTEGER (which is -(2**53 - 1) = -9007199254740991) + static constexpr int64_t MIN_SAFE_INTEGER = -((1LL << 53) - 1); + // 21.1.2.7 Number.MAX_VALUE + static constexpr double MAX_VALUE = std::numeric_limits::max(); + // 21.1.2.9 Number.MIN_VALUE + static constexpr double MIN_VALUE = std::numeric_limits::denorm_min(); + // 21.1.2.14 Number.POSITIVE_INFINITY + static constexpr double POSITIVE_INFINITY = std::numeric_limits::infinity(); + // 21.1.2.11 Number.NEGATIVE_INFINITY + static constexpr double NEGATIVE_INFINITY = -POSITIVE_INFINITY; + // 21.1.2.10 Number.NaN + static constexpr double NaN = NAN; + // 20.1.1.1 static JSTaggedValue NumberConstructor(EcmaRuntimeCallInfo *argv); @@ -52,8 +118,83 @@ public: // 20.1.3.7 static JSTaggedValue ValueOf(EcmaRuntimeCallInfo *argv); + // Excluding the '@@' internal properties. + static Span GetNumberConstants() + { + return Span(NUMBER_CONSTANTS); + } + + // Excluding the '@@' internal properties. + static Span GetNumberNonGlobalFunctions() + { + return Span(NUMBER_NON_GLOBAL_FUNCTIONS); + } + + // Excluding the '@@' internal properties. + static Span GetNumberGlobalFunctions() + { + return Span(NUMBER_GLOBAL_FUNCTIONS); + } + + // Excluding the constructor and '@@' internal properties. + static Span GetNumberPrototypeFunctions() + { + return Span(NUMBER_PROTOTYPE_FUNCTIONS); + } + private: +#define BUILTIN_NUMBER_CONSTANT_ENTRY(name) \ + base::BuiltinConstantEntry::Create(#name, JSTaggedValue(BuiltinsNumber::name)), + + static inline std::array NUMBER_CONSTANTS = { + BUILTIN_NUMBER_CONSTANTS(BUILTIN_NUMBER_CONSTANT_ENTRY) + }; +#undef BUILTIN_NUMBER_CONSTANT_ENTRY + +#define BUILTIN_NUMBER_FUNCTION_ENTRY(name, func, length, id) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsNumber::func, length, kungfu::BuiltinsStubCSigns::id), + + static constexpr std::array NUMBER_NON_GLOBAL_FUNCTIONS = { + BUILTIN_NUMBER_NON_GLOBAL_FUNCTIONS(BUILTIN_NUMBER_FUNCTION_ENTRY) + }; + static constexpr std::array NUMBER_GLOBAL_FUNCTIONS = { + BUILTIN_NUMBER_GLOBAL_FUNCTIONS(BUILTIN_NUMBER_FUNCTION_ENTRY) + }; + static constexpr std::array NUMBER_PROTOTYPE_FUNCTIONS = { + BUILTIN_NUMBER_PROTOTYPE_FUNCTIONS(BUILTIN_NUMBER_FUNCTION_ENTRY) + }; +#undef BUILTIN_NUMBER_FUNCTION_ENTRY + static JSTaggedNumber ThisNumberValue(JSThread *thread, EcmaRuntimeCallInfo *argv); }; + +class NumberToStringResultCache : public TaggedArray { +public: + static NumberToStringResultCache *Cast(TaggedObject *object) + { + return reinterpret_cast(object); + } + static JSTaggedValue CreateCacheTable(const JSThread *thread); + JSTaggedValue FindCachedResult(JSTaggedValue &number); + void SetCachedResult(const JSThread *thread, JSTaggedValue &number, JSHandle &result); + int GetNumberHash(JSTaggedValue &number) + { + int mask = INITIAL_CACHE_NUMBER - 1; + int value = 0; + if (number.IsInt()) { + value = number.GetInt(); + } else { + int64_t bits = base::bit_cast(number.GetDouble()); + value = static_cast(bits) ^ static_cast(bits >> 32); // 32: hight 32 bit + } + return value & mask; + } + +private: + static constexpr int INITIAL_CACHE_NUMBER = 256; + static constexpr int NUMBER_INDEX = 0; + static constexpr int RESULT_INDEX = 1; + static constexpr int ENTRY_SIZE = 2; +}; } // namespace panda::ecmascript::builtins #endif // ECMASCRIPT_BUILTINS_BUILTINS_NUBMER_H diff --git a/ecmascript/builtins/builtins_object.cpp b/ecmascript/builtins/builtins_object.cpp index 8cb27595c3b58d02220693dd5a7bdf12016b1e26..7b4d6dfa0f12bf5f78475911f29ab7efa1d143da 100644 --- a/ecmascript/builtins/builtins_object.cpp +++ b/ecmascript/builtins/builtins_object.cpp @@ -60,6 +60,37 @@ JSTaggedValue BuiltinsObject::ObjectConstructor(EcmaRuntimeCallInfo *argv) return JSTaggedValue::ToObject(thread, value).GetTaggedValue(); } +JSTaggedValue BuiltinsObject::AssignTaggedValue(JSThread *thread, const JSHandle &source, + const JSHandle &toAssign) +{ + JSHandle from = JSTaggedValue::ToObject(thread, source); + JSHandle keys = JSTaggedValue::GetOwnPropertyKeys(thread, JSHandle::Cast(from)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + JSMutableHandle key(thread, JSTaggedValue::Undefined()); + uint32_t keysLen = keys->GetLength(); + for (uint32_t j = 0; j < keysLen; j++) { + PropertyDescriptor desc(thread); + key.Update(keys->Get(j)); + bool success = JSTaggedValue::GetOwnProperty(thread, JSHandle::Cast(from), key, desc); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + if (success && desc.IsEnumerable()) { + JSTaggedValue value = desc.GetValue().GetTaggedValue(); + if (value.IsUndefined() || JSHandle::Cast(from)->IsJSProxy()) { + value = ObjectFastOperator::FastGetPropertyByValue(thread, from.GetTaggedValue(), + key.GetTaggedValue()); + } + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + ObjectFastOperator::FastSetPropertyByValue(thread, toAssign.GetTaggedValue(), key.GetTaggedValue(), + value); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } + } + return JSTaggedValue::Undefined(); +} + // 19.1.2.1 Object.assign ( target, ...sources ) JSTaggedValue BuiltinsObject::Assign(EcmaRuntimeCallInfo *argv) { @@ -686,14 +717,24 @@ JSTaggedValue BuiltinsObject::HasOwnProperty(EcmaRuntimeCallInfo *argv) BUILTINS_API_TRACE(thread, Object, HasOwnProperty); [[maybe_unused]] EcmaHandleScope handleScope(thread); // 1. Let P be ToPropertyKey(V). + JSHandle thisValue = GetThis(argv); JSHandle prop = GetCallArg(argv, 0); + + std::pair result = ObjectFastOperator::HasOwnProperty(thread, thisValue.GetTaggedValue(), + prop.GetTaggedValue()); + if (!result.first.IsHole()) { + return GetTaggedBoolean(true); + } else if (result.second) { + return GetTaggedBoolean(false); + } + JSHandle property = JSTaggedValue::ToPropertyKey(thread, prop); // 2. ReturnIfAbrupt(P). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 3. Let O be ToObject(this value). - JSHandle object = JSTaggedValue::ToObject(thread, GetThis(argv)); + JSHandle object = JSTaggedValue::ToObject(thread, thisValue); // 4. ReturnIfAbrupt(O). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -730,6 +771,7 @@ JSTaggedValue BuiltinsObject::IsPrototypeOf(EcmaRuntimeCallInfo *argv) return GetTaggedBoolean(true); } msgValueHandle.Update(JSTaggedValue::GetPrototype(thread, msgValueHandle)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } return GetTaggedBoolean(false); } @@ -790,44 +832,47 @@ JSTaggedValue BuiltinsObject::ToLocaleString(EcmaRuntimeCallInfo *argv) return JSFunction::Invoke(info, calleeKey); } -JSTaggedValue BuiltinsObject::GetBuiltinTag(JSThread *thread, const JSHandle &object) +JSTaggedValue BuiltinsObject::GetBuiltinObjectToString(JSThread *thread, const JSHandle &object) { - BUILTINS_API_TRACE(thread, Object, GetBuiltinTag); + BUILTINS_API_TRACE(thread, Object, GetBuiltinObjectToString); // 4. Let isArray be IsArray(O). bool isArray = object->IsJSArray(); // 5. ReturnIfAbrupt(isArray). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - JSHandle builtinTag = factory->NewFromASCII("Object"); - // 6. If isArray is true, let builtinTag be "Array". if (isArray) { - builtinTag = factory->NewFromASCII("Array"); + // 6. If isArray is true, return "[object Array]". + return thread->GlobalConstants()->GetArrayToString(); } else if (object->IsJSPrimitiveRef()) { - // 7. Else, if O is an exotic String object, let builtinTag be "String". + // 7. Else, if O is an exotic String object, return "[object String]". JSPrimitiveRef *primitiveRef = JSPrimitiveRef::Cast(*object); if (primitiveRef->IsString()) { - builtinTag = factory->NewFromASCII("String"); + return thread->GlobalConstants()->GetStringToString(); } else if (primitiveRef->IsBoolean()) { - // 11. Else, if O has a [[BooleanData]] internal slot, let builtinTag be "Boolean". - builtinTag = factory->NewFromASCII("Boolean"); + // 11. Else, if O has a [[BooleanData]] internal slot, return "[object Boolean]". + return thread->GlobalConstants()->GetBooleanToString(); } else if (primitiveRef->IsNumber()) { - // 12. Else, if O has a [[NumberData]] internal slot, let builtinTag be "Number". - builtinTag = factory->NewFromASCII("Number"); + // 12. Else, if O has a [[NumberData]] internal slot, return "[object Number]". + return thread->GlobalConstants()->GetNumberToString(); } } else if (object->IsArguments()) { - builtinTag = factory->NewFromASCII("Arguments"); + // if O has a [[ArgumentsData]] internal slot, return "[object Arguments]". + return thread->GlobalConstants()->GetArgumentsToString(); } else if (object->IsCallable()) { - builtinTag = factory->NewFromASCII("Function"); + // if O has a [[CallableData]] internal slot, return "[object Function]". + return thread->GlobalConstants()->GetFunctionToString(); } else if (object->IsJSError()) { - builtinTag = JSHandle::Cast(thread->GlobalConstants()->GetHandledErrorString()); + // if O has a [[ErrorData]] internal slot, return "[object Error]". + return thread->GlobalConstants()->GetErrorToString(); } else if (object->IsDate()) { - builtinTag = factory->NewFromASCII("Date"); + // if O has a [[DateData]] internal slot, return "[object Date]". + return thread->GlobalConstants()->GetDateToString(); } else if (object->IsJSRegExp()) { - builtinTag = factory->NewFromASCII("RegExp"); + // if O has a [[RegExpData]] internal slot, return "[object JSRegExp]". + return thread->GlobalConstants()->GetRegExpToString(); } - // 15. Else, let builtinTag be "Object". - return builtinTag.GetTaggedValue(); + // 15. Else, return "[Object Object]". + return thread->GlobalConstants()->GetObjectToString(); } // 19.1.3.6 Object.prototype.toString() @@ -841,17 +886,16 @@ JSTaggedValue BuiltinsObject::ToString(EcmaRuntimeCallInfo *argv) JSHandle msg = GetThis(argv); if (msg->IsUndefined()) { - return GetTaggedString(thread, "[object Undefined]"); + return thread->GlobalConstants()->GetUndefinedToString(); } // 2. If the this value is null, return "[object Null]". if (msg->IsNull()) { - return GetTaggedString(thread, "[object Null]"); + return thread->GlobalConstants()->GetNullToString(); } // 3. Let O be ToObject(this value). JSHandle object = JSTaggedValue::ToObject(thread, GetThis(argv)); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - JSHandle builtinTag(thread, GetBuiltinTag(thread, object)); // 16. Let tag be Get (O, @@toStringTag). auto ecmaVm = thread->GetEcmaVM(); @@ -863,9 +907,9 @@ JSTaggedValue BuiltinsObject::ToString(EcmaRuntimeCallInfo *argv) // 17. ReturnIfAbrupt(tag). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // 18. If Type(tag) is not String, let tag be builtinTag. + // 18. If Type(tag) is not String, return builtin object to string. if (!tag->IsString()) { - tag = builtinTag; + return GetBuiltinObjectToString(thread, object); } // 19. Return the String that is the result of concatenating "[object ", tag, and "]". @@ -874,6 +918,7 @@ JSTaggedValue BuiltinsObject::ToString(EcmaRuntimeCallInfo *argv) JSHandle newLeftStringHandle = factory->ConcatFromString(leftString, JSTaggedValue::ToString(thread, tag)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); auto result = factory->ConcatFromString(newLeftStringHandle, rightString); return result.GetTaggedValue(); } @@ -1035,6 +1080,7 @@ JSTaggedValue BuiltinsObject::CreateDataPropertyOnObjectFunctions(EcmaRuntimeCal // 5. Perform ! CreateDataPropertyOrThrow(O, propertyKey, value). JSObject::CreateDataPropertyOrThrow(thread, thisObjHandle, propertyKey, value); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 6. Return undefined. return JSTaggedValue::Undefined(); diff --git a/ecmascript/builtins/builtins_object.h b/ecmascript/builtins/builtins_object.h index 0e7043f3a96085740f3dce4bdf3b14a57b1d139f..16516203e8cd5bb45095841bec983c17453d5c29 100644 --- a/ecmascript/builtins/builtins_object.h +++ b/ecmascript/builtins/builtins_object.h @@ -21,6 +21,74 @@ #include "ecmascript/js_handle.h" #include "ecmascript/js_hclass.h" +// List of functions in Object, excluding the '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsObject::func refers to the native implementation of Object[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +// The following functions are not implemented yet: +// - Object.getOwnPropertyDescriptors ( O ) +#define BUILTIN_OBJECT_FUNCTIONS(V) \ + /* Object.assign ( target, ...sources ) */ \ + V("assign", Assign, 2, ObjectAssign) \ + /* Object.create ( O, Properties ) */ \ + V("create", Create, 2, ObjectCreate) \ + /* Object.defineProperties ( O, Properties ) */ \ + V("defineProperties", DefineProperties, 2, INVALID) \ + /* Object.defineProperty ( O, P, Attributes ) */ \ + V("defineProperty", DefineProperty, 3, INVALID) \ + /* Object.entries ( O ) */ \ + V("entries", Entries, 1, INVALID) \ + /* Object.freeze ( O ) */ \ + V("freeze", Freeze, 1, INVALID) \ + /* Object.fromEntries ( iterable ) */ \ + V("fromEntries", FromEntries, 1, INVALID) \ + /* Object.getOwnPropertyDescriptor ( O, P ) */ \ + V("getOwnPropertyDescriptor", GetOwnPropertyDescriptor, 2, INVALID) \ + /* Object.getOwnPropertyNames ( O ) */ \ + V("getOwnPropertyNames", GetOwnPropertyNames, 1, INVALID) \ + /* Object.getOwnPropertySymbols ( O ) */ \ + V("getOwnPropertySymbols", GetOwnPropertySymbols, 1, INVALID) \ + /* Object.getPrototypeOf ( O ) */ \ + V("getPrototypeOf", GetPrototypeOf, 1, INVALID) \ + /* Object.hasOwn ( O, P ) */ \ + V("hasOwn", HasOwn, 2, INVALID) \ + /* Object.is ( value1, value2 ) */ \ + V("is", Is, 2, INVALID) \ + /* Object.isExtensible ( O ) */ \ + V("isExtensible", IsExtensible, 1, INVALID) \ + /* Object.isFrozen ( O ) */ \ + V("isFrozen", IsFrozen, 1, INVALID) \ + /* Object.isSealed ( O ) */ \ + V("isSealed", IsSealed, 1, INVALID) \ + /* Object.keys ( O ) */ \ + V("keys", Keys, 1, INVALID) \ + /* Object.preventExtensions ( O ) */ \ + V("preventExtensions", PreventExtensions, 1, INVALID) \ + /* Object.seal ( O ) */ \ + V("seal", Seal, 1, INVALID) \ + /* Object.setPrototypeOf ( O, proto ) */ \ + V("setPrototypeOf", SetPrototypeOf, 2, INVALID) \ + /* Object.values ( O ) */ \ + V("values", Values, 1, INVALID) + +// List of functions in Object.prototype, excluding the constructor and '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsObject::func refers to the native implementation of Object.prototype[name]. +#define BUILTIN_OBJECT_PROTOTYPE_FUNCTIONS(V) \ + V("createRealm", CreateRealm, 0, INVALID) \ + /* Object.prototype.hasOwnProperty ( V ) */ \ + V("hasOwnProperty", HasOwnProperty, 1, INVALID) \ + /* Object.prototype.isPrototypeOf ( V ) */ \ + V("isPrototypeOf", IsPrototypeOf, 1, INVALID) \ + /* Object.prototype.propertyIsEnumerable ( V ) */ \ + V("propertyIsEnumerable", PropertyIsEnumerable, 1, INVALID) \ + /* Object.prototype.toLocaleString ( [ reserved1 [ , reserved2 ] ] ) */ \ + V("toLocaleString", ToLocaleString, 0, INVALID) \ + /* Object.prototype.toString ( ) */ \ + V("toString", ToString, 0, ObjectToString) \ + /* Object.prototype.valueOf ( ) */ \ + V("valueOf", ValueOf, 0, INVALID) + namespace panda::ecmascript::builtins { enum class KeyType : uint8_t { STRING_TYPE = 0, @@ -96,11 +164,35 @@ public: // 20.1.2.13 Object.hasOwn ( O, P ) static JSTaggedValue HasOwn(EcmaRuntimeCallInfo *argv); + static Span GetObjectFunctions() + { + return Span(OBJECT_FUNCTIONS); + } + + // Excluding the constructor and '@@' internal properties + static Span GetObjectPrototypeFunctions() + { + return Span(OBJECT_PROTOTYPE_FUNCTIONS); + } + + static JSTaggedValue AssignTaggedValue(JSThread *thread, const JSHandle &source, + const JSHandle &toAssign); private: +#define BUILTIN_OBJECT_FUNCTION_ENTRY(name, func, length, id) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsObject::func, length, kungfu::BuiltinsStubCSigns::id), + + static constexpr std::array OBJECT_FUNCTIONS = { + BUILTIN_OBJECT_FUNCTIONS(BUILTIN_OBJECT_FUNCTION_ENTRY) + }; + static constexpr std::array OBJECT_PROTOTYPE_FUNCTIONS = { + BUILTIN_OBJECT_PROTOTYPE_FUNCTIONS(BUILTIN_OBJECT_FUNCTION_ENTRY) + }; +#undef BUILTIN_OBJECT_FUNCTION_ENTRY + static JSTaggedValue ObjectDefineProperties(JSThread *thread, const JSHandle &obj, const JSHandle &prop); static JSTaggedValue GetOwnPropertyKeys(JSThread *thread, const JSHandle &obj, const KeyType &type); - static JSTaggedValue GetBuiltinTag(JSThread *thread, const JSHandle &object); + static JSTaggedValue GetBuiltinObjectToString(JSThread *thread, const JSHandle &object); }; } // namespace panda::ecmascript::builtins #endif // ECMASCRIPT_BUILTINS_BUILTINS_OBJECT_H diff --git a/ecmascript/builtins/builtins_promise.cpp b/ecmascript/builtins/builtins_promise.cpp index b145ef32d8ebb0a788ae3fbbb76c0a45acce3517..58c97ca44d7f8addb5785e07a4cc69f15e2c022a 100644 --- a/ecmascript/builtins/builtins_promise.cpp +++ b/ecmascript/builtins/builtins_promise.cpp @@ -73,7 +73,7 @@ JSTaggedValue BuiltinsPromise::PromiseConstructor(EcmaRuntimeCallInfo *argv) auto resolveFunc = resolvingFunction->GetResolveFunction(); auto rejectFunc = resolvingFunction->GetRejectFunction(); JSHandle undefined = globalConst->GetHandledUndefined(); - const int32_t argsLength = 2; // 2: «resolvingFunctions.[[Resolve]], resolvingFunctions.[[Reject]]» + const uint32_t argsLength = 2; // 2: «resolvingFunctions.[[Resolve]], resolvingFunctions.[[Reject]]» EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, executor, undefined, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(resolveFunc, rejectFunc); @@ -152,6 +152,7 @@ JSTaggedValue BuiltinsPromise::All(EcmaRuntimeCallInfo *argv) if (!itRecord->GetDone()) { JSHandle closeVal = JSIterator::IteratorClose(thread, itor, JSHandle::Cast(result)); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, capa); if (closeVal.GetTaggedValue().IsRecord()) { result = JSHandle::Cast(closeVal); RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, capa); @@ -220,6 +221,7 @@ JSTaggedValue BuiltinsPromise::Race(EcmaRuntimeCallInfo *argv) if (!iteratorRecord->GetDone()) { JSHandle value = JSIterator::IteratorClose(thread, iterator, JSHandle::Cast(result)); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, promiseCapability); if (value.GetTaggedValue().IsCompletionRecord()) { result = JSHandle(value); RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, promiseCapability); @@ -694,6 +696,7 @@ JSTaggedValue BuiltinsPromise::Any(EcmaRuntimeCallInfo *argv) if (!iteratorRecord->GetDone()) { JSHandle resultHandle = JSHandle::Cast(result); JSHandle closeVal = JSIterator::IteratorClose(thread, iterator, resultHandle); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, promiseCapability); if (closeVal.GetTaggedValue().IsCompletionRecord()) { result = JSHandle(closeVal); RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, promiseCapability); @@ -759,6 +762,7 @@ JSHandle BuiltinsPromise::PerformPromiseAny(JSThread *thread, PropertyDescriptor msgDesc(thread, errorsValue, true, false, true); JSHandle errorTagged = JSHandle::Cast(error); JSTaggedValue::DefinePropertyOrThrow(thread, errorTagged, errorsKey, msgDesc); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(CompletionRecord, thread); // 3. Return ThrowCompletion(error). JSHandle errorCompletion( factory->NewCompletionRecord(CompletionRecordType::THROW, errorTagged)); @@ -876,6 +880,7 @@ JSTaggedValue BuiltinsPromise::AllSettled(EcmaRuntimeCallInfo *argv) if (!iteratorRecord->GetDone()) { JSHandle resultHandle = JSHandle::Cast(result); JSHandle closeVal = JSIterator::IteratorClose(thread, iterator, resultHandle); + RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, promiseCapability); if (closeVal.GetTaggedValue().IsCompletionRecord()) { result = JSHandle(closeVal); RETURN_REJECT_PROMISE_IF_ABRUPT(thread, result, promiseCapability); diff --git a/ecmascript/builtins/builtins_promise.h b/ecmascript/builtins/builtins_promise.h index aa44e32dbfeca01016b9c3a95889d3ebccde8bae..415bf6f361362291e69e54642119efb6521a9a23 100644 --- a/ecmascript/builtins/builtins_promise.h +++ b/ecmascript/builtins/builtins_promise.h @@ -23,6 +23,35 @@ #include "ecmascript/js_tagged_value.h" #include "ecmascript/object_factory.h" +// List of functions in Promise, excluding the '@@' propertiex. +// V(name, func, length, stubIndex) +// where BuiltinsPromise::func refers to the native implementation of Promise[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +#define BUILTIN_PROMISE_FUNCTIONS(V) \ + /* Promise.all ( iterable ) */ \ + V("all", All, 1, INVALID) \ + /* Promise.allSettled ( iterable ) */ \ + V("allSettled", AllSettled, 1, INVALID) \ + /* Promise.any ( iterable ) */ \ + V("any", Any, 1, INVALID) \ + /* Promise.race ( iterable ) */ \ + V("race", Race, 1, INVALID) \ + /* Promise.reject ( r ) */ \ + V("reject", Reject, 1, INVALID) \ + /* Promise.resolve ( x ) */ \ + V("resolve", Resolve, 1, INVALID) + +// List of functions in Promise.prototype, excluding the constructor and '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsPromise::func refers to the native implementation of Promise.prototype[name]. +#define BUILTIN_PROMISE_PROTOTYPE_FUNCTIONS(V) \ + /* Promise.prototype.catch ( onRejected ) */ \ + V("catch", Catch, 1, INVALID) \ + /* Promise.prototype.finally ( onFinally ) */ \ + V("finally", Finally, 1, INVALID) \ + /* Promise.prototype.then ( onFulfilled, onRejected ) */ \ + V("then", Then, 2, INVALID) + namespace panda::ecmascript::builtins { class BuiltinsPromise : public base::BuiltinsBase { public: @@ -56,7 +85,30 @@ public: static JSTaggedValue GetPromiseResolve(JSThread *thread, JSHandle promiseConstructor); + // Excluding the '@@' internal properties + static Span GetPromiseFunctions() + { + return Span(PROMISE_FUNCTIONS); + } + + // Excluding the constructor and '@@' internal properties. + static Span GetPromisePrototypeFunctions() + { + return Span(PROMISE_PROTOTYPE_FUNCTIONS); + } + private: +#define BUILTIN_PROMISE_FUNCTION_ENTRY(name, method, length, id) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsPromise::method, length, kungfu::BuiltinsStubCSigns::id), + + static constexpr std::array PROMISE_FUNCTIONS = { + BUILTIN_PROMISE_FUNCTIONS(BUILTIN_PROMISE_FUNCTION_ENTRY) + }; + static constexpr std::array PROMISE_PROTOTYPE_FUNCTIONS = { + BUILTIN_PROMISE_PROTOTYPE_FUNCTIONS(BUILTIN_PROMISE_FUNCTION_ENTRY) + }; +#undef BUILTIN_PROMISE_FUNCTION_ENTRY + static JSTaggedValue PerformPromiseAll(JSThread *thread, const JSHandle &itRecord, const JSHandle &ctor, diff --git a/ecmascript/builtins/builtins_promise_handler.cpp b/ecmascript/builtins/builtins_promise_handler.cpp index d9fcc4e0b5b4544bbdfc03e13cdc99c5ce055bbe..74a7c561390f566175ef9343cf692633a2c21b36 100644 --- a/ecmascript/builtins/builtins_promise_handler.cpp +++ b/ecmascript/builtins/builtins_promise_handler.cpp @@ -274,8 +274,8 @@ JSTaggedValue BuiltinsPromiseHandler::ThenFinally(EcmaRuntimeCallInfo *argv) EcmaRuntimeCallInfo *taggedInfo = EcmaInterpreter::NewRuntimeCallInfo(thread, onFinally, undefined, undefined, 0); JSTaggedValue result = JSFunction::Call(taggedInfo); - JSHandle resultHandle(thread, result); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle resultHandle(thread, result); // 5. Let C be F.[[Constructor]]. // 6. Assert: IsConstructor(C) is true. JSHandle thenFinallyConstructor(thread, thenFinally->GetConstructor()); @@ -315,8 +315,8 @@ JSTaggedValue BuiltinsPromiseHandler::CatchFinally(EcmaRuntimeCallInfo *argv) EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, onFinally, undefined, undefined, 0); JSTaggedValue result = JSFunction::Call(info); - JSHandle resultHandle(thread, result); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle resultHandle(thread, result); // 5. Let C be F.[[Constructor]]. // 6. Assert: IsConstructor(C) is true. JSHandle catchFinallyConstructor(thread, catchFinally->GetConstructor()); @@ -361,6 +361,7 @@ JSHandle BuiltinsPromiseHandler::PromiseResolve(JSThread *thread, // 3. Let promiseCapability be ? NewPromiseCapability(C). // 4. ReturnIfAbrupt(promiseCapability) JSHandle promiseCapability = JSPromise::NewPromiseCapability(thread, constructor); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); JSHandle promiseCapaHandle = JSHandle::Cast(promiseCapability); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, promiseCapaHandle); // 6. Let resolveResult be Call(promiseCapability.[[Resolve]], undefined, «x»). @@ -415,10 +416,12 @@ JSTaggedValue BuiltinsPromiseHandler::AllSettledResolveElementFunction(EcmaRunti JSHandle statusKey = globalConst->GetHandledPromiseStatusString(); JSHandle fulfilledKey = globalConst->GetHandledPromiseFulfilledString(); JSObject::CreateDataPropertyOrThrow(thread, obj, statusKey, fulfilledKey); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 11. Perform ! CreateDataPropertyOrThrow(obj, "value", x). JSHandle valueKey = globalConst->GetHandledValueString(); JSHandle xValue = GetCallArg(argv, 0); JSObject::CreateDataPropertyOrThrow(thread, obj, valueKey, xValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 12. Set values[index] to obj. JSHandle arrayValues = JSHandle::Cast(JSHandle(thread, values->GetValue())); @@ -478,10 +481,12 @@ JSTaggedValue BuiltinsPromiseHandler::AllSettledRejectElementFunction(EcmaRuntim JSHandle statusKey = globalConst->GetHandledPromiseStatusString(); JSHandle rejectedKey = globalConst->GetHandledPromiseRejectedString(); JSObject::CreateDataPropertyOrThrow(thread, obj, statusKey, rejectedKey); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 11. Perform ! CreateDataPropertyOrThrow(obj, "reason", x). JSHandle xReason = GetCallArg(argv, 0); JSHandle reasonKey = globalConst->GetHandledPromiseReasonString(); JSObject::CreateDataPropertyOrThrow(thread, obj, reasonKey, xReason); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 12. Set values[index] to obj. JSHandle arrayValues = JSHandle::Cast(JSHandle(thread, values->GetValue())); @@ -550,6 +555,7 @@ JSTaggedValue BuiltinsPromiseHandler::AnyRejectElementFunction(EcmaRuntimeCallIn PropertyDescriptor msgDesc(thread, errorsValue, true, false, true); JSHandle errorTagged = JSHandle::Cast(error); JSTaggedValue::DefinePropertyOrThrow(thread, errorTagged, errorsKey, msgDesc); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // c. Return ? Call(promiseCapability.[[Reject]], undefined, « error »). JSHandle capaReject(thread, capa->GetReject()); JSHandle undefined(globalConst->GetHandledUndefined()); diff --git a/ecmascript/builtins/builtins_promise_job.cpp b/ecmascript/builtins/builtins_promise_job.cpp index 51ed2a87a4e8aa8fe797a80eee779d9ad4def9df..d4ddde6a04550dde7a1ea8d11bcb06c3810b6dca 100644 --- a/ecmascript/builtins/builtins_promise_job.cpp +++ b/ecmascript/builtins/builtins_promise_job.cpp @@ -15,7 +15,6 @@ #include "ecmascript/builtins/builtins_promise_job.h" -#include "ecmascript/base/path_helper.h" #include "ecmascript/ecma_macros.h" #include "ecmascript/global_env.h" #include "ecmascript/interpreter/interpreter.h" @@ -26,13 +25,16 @@ #include "ecmascript/js_promise.h" #include "ecmascript/js_tagged_value.h" #include "ecmascript/module/js_dynamic_import.h" +#include "ecmascript/module/js_module_deregister.h" #include "ecmascript/module/js_module_manager.h" +#include "ecmascript/module/module_path_helper.h" #include "ecmascript/platform/file.h" #include "ecmascript/require/js_cjs_module.h" #include "libpandabase/macros.h" namespace panda::ecmascript::builtins { -using PathHelper = base::PathHelper; +using JSRecordInfo = ecmascript::JSPandaFile::JSRecordInfo; + JSTaggedValue BuiltinsPromiseJob::PromiseReactionJob(EcmaRuntimeCallInfo *argv) { ASSERT(argv); @@ -51,10 +53,11 @@ JSTaggedValue BuiltinsPromiseJob::PromiseReactionJob(EcmaRuntimeCallInfo *argv) // 3. Let handler be reaction.[[Handler]]. JSHandle handler(thread, reaction->GetHandler()); JSHandle call(thread, capability->GetResolve()); - const int32_t argsLength = 1; + const uint32_t argsLength = 1; JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *runtimeInfo = EcmaInterpreter::NewRuntimeCallInfo(thread, call, undefined, undefined, argsLength); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (handler->IsString()) { // 4. If handler is "Identity", let handlerResult be NormalCompletion(argument). // 5. Else if handler is "Thrower", let handlerResult be Completion{[[type]]: throw, [[value]]: argument, @@ -68,6 +71,7 @@ JSTaggedValue BuiltinsPromiseJob::PromiseReactionJob(EcmaRuntimeCallInfo *argv) // 6. Else, let handlerResult be Call(handler, undefined, «argument»). EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, handler, undefined, undefined, argsLength); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(argument.GetTaggedValue()); JSTaggedValue taggedValue = JSFunction::Call(info); // 7. If handlerResult is an abrupt completion, then @@ -101,7 +105,7 @@ JSTaggedValue BuiltinsPromiseJob::PromiseResolveThenableJob(EcmaRuntimeCallInfo JSHandle then = GetCallArg(argv, BuiltinsBase::ArgsPosition::THIRD); // 2. Let thenCallResult be Call(then, thenable, «resolvingFunctions.[[Resolve]], resolvingFunctions.[[Reject]]»). - const int32_t argsLength = 2; // 2: «resolvingFunctions.[[Resolve]], resolvingFunctions.[[Reject]]» + const uint32_t argsLength = 2; // 2: «resolvingFunctions.[[Resolve]], resolvingFunctions.[[Reject]]» JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, then, thenable, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -135,46 +139,46 @@ JSTaggedValue BuiltinsPromiseJob::DynamicImportJob(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle resolve(GetCallArg(argv, 0)); - JSHandle reject(GetCallArg(argv, 1)); // 1 : first argument - JSHandle dirPath(GetCallArg(argv, 2)); // 2 : second argument - JSHandle specifier(GetCallArg(argv, 3)); // 3 : third argument - JSHandle recordName(GetCallArg(argv, 4)); // 4 : fourth recordName + JSHandle reject(GetCallArg(argv, 1)); // 1 : reject method + JSHandle dirPath(GetCallArg(argv, 2)); // 2 : current file path(containing file name) + JSHandle specifier(GetCallArg(argv, 3)); // 3 : request module's path + JSHandle recordName(GetCallArg(argv, 4)); // 4 : js recordName or undefined // Let specifierString be Completion(ToString(specifier)) JSHandle specifierString = JSTaggedValue::ToString(thread, specifier); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, CatchException(thread, reject)); - JSHandle moduleName; + // Resolve request module's ohmurl + JSMutableHandle moduleName(thread, thread->GlobalConstants()->GetUndefined()); CString entryPoint = JSPandaFile::ENTRY_MAIN_FUNCTION; - CString baseFilename = ConvertToString(dirPath.GetTaggedValue()); - CString fileNameStr = ""; + CString fileNameStr = ConvertToString(dirPath.GetTaggedValue()); CString requestPath = ConvertToString(specifierString.GetTaggedValue()); + LOG_ECMA(DEBUG) << "Start importing dynamic module : " << requestPath; // resolve native module auto [isNative, moduleType] = SourceTextModule::CheckNativeModule(requestPath); + ModuleManager *moduleManager = thread->GetCurrentEcmaContext()->GetModuleManager(); if (isNative) { return DynamicImport::ExecuteNativeModule(thread, specifierString, moduleType, resolve, reject); } + if (recordName->IsUndefined()) { - moduleName = ResolveFilenameFromNative(thread, dirPath.GetTaggedValue(), - specifierString.GetTaggedValue()); + moduleName.Update(ResolveFilenameFromNative(thread, dirPath.GetTaggedValue(), + specifierString.GetTaggedValue()).GetTaggedValue()); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, CatchException(thread, reject)); fileNameStr = ConvertToString(moduleName.GetTaggedValue()); } else { CString recordNameStr = ConvertToString(recordName.GetTaggedValue()); std::shared_ptr jsPandaFile = - JSPandaFileManager::GetInstance()->LoadJSPandaFile(thread, baseFilename, recordNameStr.c_str()); + JSPandaFileManager::GetInstance()->LoadJSPandaFile(thread, fileNameStr, recordNameStr.c_str()); if (jsPandaFile == nullptr) { - CString msg = "Load file with filename '" + baseFilename + "' failed, recordName '" + recordNameStr + "'"; - JSTaggedValue error = factory->GetJSError(ErrorType::REFERENCE_ERROR, msg.c_str()).GetTaggedValue(); - THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, CatchException(thread, reject)); + LOG_FULL(FATAL) << "Load current file's panda file failed. Current file is " << recordNameStr; } - entryPoint = - PathHelper::ConcatFileNameWithMerge(thread, jsPandaFile.get(), baseFilename, recordNameStr, requestPath); + ModulePathHelper::ConcatFileNameWithMerge(thread, jsPandaFile.get(), + fileNameStr, recordNameStr, requestPath); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, CatchException(thread, reject)); - fileNameStr = baseFilename; - moduleName = vm->GetFactory()->NewFromUtf8(entryPoint); + moduleName.Update(factory->NewFromUtf8(entryPoint).GetTaggedValue()); } std::shared_ptr jsPandaFile = JSPandaFileManager::GetInstance()->LoadJSPandaFile(thread, fileNameStr, entryPoint); @@ -183,34 +187,45 @@ JSTaggedValue BuiltinsPromiseJob::DynamicImportJob(EcmaRuntimeCallInfo *argv) JSTaggedValue error = factory->GetJSError(ErrorType::REFERENCE_ERROR, msg.c_str()).GetTaggedValue(); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, CatchException(thread, reject)); } - bool isModule = jsPandaFile->IsModule(thread, entryPoint); - RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, CatchException(thread, reject)); - ModuleManager *moduleManager = thread->GetCurrentEcmaContext()->GetModuleManager(); - JSMutableHandle moduleNamespace(thread, JSTaggedValue::Undefined()); + + // Loading request module. if (!moduleManager->IsImportedModuleLoaded(moduleName.GetTaggedValue())) { if (!JSPandaFileExecutor::ExecuteFromFile(thread, fileNameStr.c_str(), entryPoint.c_str(), false, true)) { CString msg = "Cannot execute request dynamic-imported module : " + entryPoint; JSTaggedValue error = factory->GetJSError(ErrorType::REFERENCE_ERROR, msg.c_str()).GetTaggedValue(); THROW_NEW_ERROR_AND_RETURN_VALUE(thread, error, CatchException(thread, reject)); } + } else { + ModuleDeregister::ReviseLoadedModuleCount(thread, moduleName.GetTaggedValue()); } RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, CatchException(thread, reject)); - if (!isModule) { + JSRecordInfo recordInfo; + bool hasRecord = jsPandaFile->CheckAndGetRecordInfo(entryPoint, recordInfo); + if (!hasRecord) { + LOG_FULL(ERROR) << "cannot find record '" << entryPoint <<"' in basefileName " << fileNameStr << "."; + CString msg = "cannot find record '" + entryPoint + "', please check the request path."; + THROW_REFERENCE_ERROR_AND_RETURN(thread, msg.c_str(), CatchException(thread, reject)); + } + JSMutableHandle moduleNamespace(thread, JSTaggedValue::Undefined()); + // only support importing es module, or return a default object. + if (!jsPandaFile->IsModule(recordInfo)) { moduleNamespace.Update(vm->GetGlobalEnv()->GetExportOfScript()); } else { // b. Let moduleRecord be ! HostResolveImportedModule(referencingScriptOrModule, specifier). JSHandle moduleRecord = moduleManager->HostGetImportedModule(moduleName.GetTaggedValue()); - // d. Let namespace be ? GetModuleNamespace(moduleRecord). - moduleNamespace.Update(SourceTextModule::GetModuleNamespace(thread, moduleRecord)); + JSHandle nameSp = SourceTextModule::GetModuleNamespace(thread, moduleRecord); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, CatchException(thread, reject)); + // d. Let namespace be ? GetModuleNamespace(moduleRecord). + moduleNamespace.Update(nameSp); } JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, JSHandle(resolve), undefined, undefined, 1); + RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, CatchException(thread, reject)); info->SetCallArg(moduleNamespace.GetTaggedValue()); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return JSFunction::Call(info); diff --git a/ecmascript/builtins/builtins_proxy.cpp b/ecmascript/builtins/builtins_proxy.cpp index fa7f52944d1807b2d822b64e11ed23f22e430d36..7753f81e827d458868fabac35faf6257053e6363 100644 --- a/ecmascript/builtins/builtins_proxy.cpp +++ b/ecmascript/builtins/builtins_proxy.cpp @@ -85,12 +85,8 @@ JSTaggedValue BuiltinsProxy::InvalidateProxyFunction(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); - JSHandle revokeObj(GetThis(argv)); - JSHandle revokeKey = thread->GlobalConstants()->GetHandledRevokeString(); - - PropertyDescriptor desc(thread); - JSObject::GetOwnProperty(thread, revokeObj, revokeKey, desc); - JSProxyRevocFunction::ProxyRevocFunctions(thread, JSHandle(desc.GetValue())); + JSHandle proxy = GetConstructor(argv); + JSProxyRevocFunction::ProxyRevocFunctions(thread, JSHandle(proxy)); return JSTaggedValue::Undefined(); } } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_reflect.cpp b/ecmascript/builtins/builtins_reflect.cpp index 05f037fac98cceb5537885d3656ceec6c5b25007..0cd4459a717734a5d708fde7e3144b5cdcbc2016 100644 --- a/ecmascript/builtins/builtins_reflect.cpp +++ b/ecmascript/builtins/builtins_reflect.cpp @@ -40,7 +40,7 @@ JSTaggedValue BuiltinsReflect::ReflectApply(EcmaRuntimeCallInfo *argv) // 3. Perform PrepareForTailCall(). // 4. Return ? Call(target, thisArgument, args). - const int32_t argsLength = static_cast(args->GetLength()); + const uint32_t argsLength = args->GetLength(); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, target, thisArgument, undefined, argsLength); @@ -75,7 +75,7 @@ JSTaggedValue BuiltinsReflect::ReflectConstruct(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle args = JSHandle::Cast(argOrAbrupt); // 5. Return ? Construct(target, args, newTarget). - const int32_t argsLength = static_cast(args->GetLength()); + const uint32_t argsLength = args->GetLength(); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, target, undefined, newTarget, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); diff --git a/ecmascript/builtins/builtins_reflect.h b/ecmascript/builtins/builtins_reflect.h index 9c2e926c5883a5bbc06946a3782aa5c3ccd326f6..3c8b9b3397d8ff0b1e227aa0bca72cf62acc63e4 100644 --- a/ecmascript/builtins/builtins_reflect.h +++ b/ecmascript/builtins/builtins_reflect.h @@ -20,6 +20,38 @@ #include "ecmascript/js_function.h" #include "ecmascript/js_array.h" +// List of functions in Reflect, excluding the '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsRefject::func refers to the native implementation of Reflect[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +#define BUILTIN_REFLECT_FUNCTIONS(V) \ + /* Reflect.apply ( target, thisArgument, argumentsList ) */ \ + V("apply", ReflectApply, 3, INVALID) \ + /* Reflect.construct ( target, argumentsList [ , newTarget ] ) */ \ + V("construct", ReflectConstruct, 2, INVALID) \ + /* Reflect.defineProperty ( target, propertyKey, attributes ) */ \ + V("defineProperty", ReflectDefineProperty, 3, INVALID) \ + /* Reflect.deleteProperty ( target, propertyKey ) */ \ + V("deleteProperty", ReflectDeleteProperty, 2, INVALID) \ + /* Reflect.get ( target, propertyKey [ , receiver ] ) */ \ + V("get", ReflectGet, 2, INVALID) \ + /* Reflect.getOwnPropertyDescriptor ( target, propertyKey ) */ \ + V("getOwnPropertyDescriptor", ReflectGetOwnPropertyDescriptor, 2, INVALID) \ + /* Reflect.getPrototypeOf ( target ) */ \ + V("getPrototypeOf", ReflectGetPrototypeOf, 1, INVALID) \ + /* Reflect.has ( target, propertyKey ) */ \ + V("has", ReflectHas, 2, INVALID) \ + /* Reflect.isExtensible ( target ) */ \ + V("isExtensible", ReflectIsExtensible, 1, INVALID) \ + /* Reflect.ownKeys ( target ) */ \ + V("ownKeys", ReflectOwnKeys, 1, INVALID) \ + /* Reflect.preventExtensions ( target ) */ \ + V("preventExtensions", ReflectPreventExtensions, 1, INVALID) \ + /* Reflect.set ( target, propertyKey, V [ , receiver ] ) */ \ + V("set", ReflectSet, 3, INVALID) \ + /* Reflect.setPrototypeOf ( target, proto ) */ \ + V("setPrototypeOf", ReflectSetPrototypeOf, 2, INVALID) + namespace panda::ecmascript::builtins { class BuiltinsReflect : public base::BuiltinsBase { public: @@ -61,6 +93,21 @@ public: // ecma 26.1.13 static JSTaggedValue ReflectSetPrototypeOf(EcmaRuntimeCallInfo *argv); + + // Excluding the '@@' internal properties. + static Span GetReflectFunctions() + { + return Span(REFLECT_FUNCTIONS); + } + +private: +#define BUILTINS_REFLECT_FUNCTION_ENTRY(name, method, length, id) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsReflect::method, length, kungfu::BuiltinsStubCSigns::id), + + static constexpr std::array REFLECT_FUNCTIONS = { + BUILTIN_REFLECT_FUNCTIONS(BUILTINS_REFLECT_FUNCTION_ENTRY) + }; +#undef BUILTINS_REFLECT_FUNCTION_ENTRY }; } // namespace panda::ecmascript::builtins #endif // ECMASCRIPT_BUILTINS_BUILTINS_REFLECT_H diff --git a/ecmascript/builtins/builtins_regexp.cpp b/ecmascript/builtins/builtins_regexp.cpp index 7df6eec0364ba4a28c6140becffc4e937d5b3d4e..032a9e6976012d933fa20d1f09633454a678822b 100644 --- a/ecmascript/builtins/builtins_regexp.cpp +++ b/ecmascript/builtins/builtins_regexp.cpp @@ -95,6 +95,7 @@ JSTaggedValue BuiltinsRegExp::RegExpConstructor(EcmaRuntimeCallInfo *argv) // 5.c Else, let F be flags. flagsTemp = JSHandle(thread, *JSTaggedValue::ToString(thread, flags)); } + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // 6. Else if patternIsRegExp is true } else if (patternIsRegExp) { JSHandle sourceString(globalConst->GetHandledSourceString()); @@ -114,6 +115,7 @@ JSTaggedValue BuiltinsRegExp::RegExpConstructor(EcmaRuntimeCallInfo *argv) } else { // 6.d Else, let F be flags. flagsTemp = JSHandle(thread, *JSTaggedValue::ToString(thread, flags)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } } else { // 7.a Let P be pattern. @@ -123,6 +125,7 @@ JSTaggedValue BuiltinsRegExp::RegExpConstructor(EcmaRuntimeCallInfo *argv) flagsTemp = flags; } else { flagsTemp = JSHandle(thread, *JSTaggedValue::ToString(thread, flags)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } } // 8. Let O be RegExpAlloc(newTarget). @@ -182,15 +185,19 @@ JSTaggedValue BuiltinsRegExp::Test(EcmaRuntimeCallInfo *argv) // 1. Let R be the this value. JSHandle thisObj = GetThis(argv); JSHandle inputStr = GetCallArg(argv, 0); + // 2. If Type(R) is not Object, throw a TypeError exception. + if (!thisObj->IsECMAObject()) { + // throw a TypeError exception. + THROW_TYPE_ERROR_AND_RETURN(thread, "this is not Object", JSTaggedValue::Exception()); + } // 3. Let string be ToString(S). // 4. ReturnIfAbrupt(string). JSHandle stringHandle = JSTaggedValue::ToString(thread, inputStr); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle string = JSHandle::Cast(stringHandle); - // 2. If Type(R) is not Object, throw a TypeError exception. - if (!thisObj->IsECMAObject()) { - // throw a TypeError exception. - THROW_TYPE_ERROR_AND_RETURN(thread, "this is not Object", JSTaggedValue::Exception()); + // test fast path + if (IsFastRegExp(thread, thisObj)) { + return RegExpTestFast(thread, thisObj, string, true); } // 5. Let match be RegExpExec(R, string). @@ -201,6 +208,45 @@ JSTaggedValue BuiltinsRegExp::Test(EcmaRuntimeCallInfo *argv) return GetTaggedBoolean(!matchResult.IsNull()); } +bool BuiltinsRegExp::IsFastRegExp(JSThread *thread, JSHandle ®exp) +{ + JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + const GlobalEnvConstants *globalConst = thread->GlobalConstants(); + JSHClass *hclass = JSHandle::Cast(regexp)->GetJSHClass(); + JSHClass *originHClass = JSHClass::Cast(globalConst->GetJSRegExpClass().GetTaggedObject()); + // regexp instance hclass + if (hclass != originHClass) { + return false; + } + // RegExp.prototype hclass + JSTaggedValue proto = hclass->GetPrototype(); + JSHClass *regexpHclass = proto.GetTaggedObject()->GetClass(); + JSHandle originRegexpClassValue = env->GetRegExpPrototypeClass(); + JSHClass *originRegexpHclass = JSHClass::Cast(originRegexpClassValue.GetTaggedValue().GetTaggedObject()); + if (regexpHclass != originRegexpHclass) { + return false; + } + // RegExp.prototype.exec + auto execVal = JSObject::Cast(proto)->GetPropertyInlinedProps(JSRegExp::EXEC_INLINE_PROPERTY_INDEX); + if (execVal != env->GetTaggedRegExpExecFunction()) { + return false; + } + return true; +} + +JSTaggedValue BuiltinsRegExp::RegExpTestFast(JSThread *thread, JSHandle ®exp, + const JSHandle &inputStr, bool useCache) +{ + // 1. Assert: Type(S) is String. + ASSERT(inputStr->IsString()); + // 2. If R does not have a [[RegExpMatcher]] internal slot, throw a TypeError exception. + if (!regexp->IsJSRegExp()) { + // throw a TypeError exception. + THROW_TYPE_ERROR_AND_RETURN(thread, "this does not have a [[RegExpMatcher]]", JSTaggedValue::Exception()); + } + return RegExpExecForTestFast(thread, regexp, inputStr, useCache); +} + // 20.2.5.14 JSTaggedValue BuiltinsRegExp::ToString(EcmaRuntimeCallInfo *argv) { @@ -268,6 +314,18 @@ JSTaggedValue BuiltinsRegExp::GetGlobal(EcmaRuntimeCallInfo *argv) return GetTaggedBoolean(result); } +// 22.2.6.6 +JSTaggedValue BuiltinsRegExp::GetHasIndices(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, RegExp, GetHasIndices); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + JSHandle thisObj = GetThis(argv); + bool result = GetFlagsInternal(thread, thisObj, RegExpParser::FLAG_HASINDICES); + return GetTaggedBoolean(result); +} + // 20.2.5.5 JSTaggedValue BuiltinsRegExp::GetIgnoreCase(EcmaRuntimeCallInfo *argv) { @@ -377,6 +435,7 @@ JSTaggedValue BuiltinsRegExp::Match(EcmaRuntimeCallInfo *argv) // 3. Let S be ToString(string) JSHandle inputString = GetCallArg(argv, 0); JSHandle stringHandle = JSTaggedValue::ToString(thread, inputString); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); bool useCache = true; JSHandle cacheTable(thread->GetCurrentEcmaContext()->GetRegExpCache()); if (cacheTable->GetLargeStrCount() == 0 || cacheTable->GetConflictCount() == 0) { @@ -389,13 +448,6 @@ JSTaggedValue BuiltinsRegExp::Match(EcmaRuntimeCallInfo *argv) // 2. If Type(rx) is not Object, throw a TypeError exception. THROW_TYPE_ERROR_AND_RETURN(thread, "this is not Object", JSTaggedValue::Exception()); } - // 5. Let global be ToBoolean(Get(rx, "global")). - const GlobalEnvConstants *globalConst = thread->GlobalConstants(); - JSHandle global = globalConst->GetHandledGlobalString(); - JSTaggedValue globalValue = - ObjectFastOperator::FastGetPropertyByValue(thread, thisObj.GetTaggedValue(), global.GetTaggedValue()); - // 6. ReturnIfAbrupt(global). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle regexpObj(thisObj); JSMutableHandle pattern(thread, JSTaggedValue::Undefined()); @@ -404,13 +456,31 @@ JSTaggedValue BuiltinsRegExp::Match(EcmaRuntimeCallInfo *argv) pattern.Update(regexpObj->GetOriginalSource()); flags.Update(regexpObj->GetOriginalFlags()); } - bool isGlobal = globalValue.ToBoolean(); + + const GlobalEnvConstants *globalConst = thread->GlobalConstants(); + bool isGlobal = false; + bool fullUnicode = false; + bool unmodified = IsFastRegExp(thread, thisObj); + if (unmodified) { + uint8_t flagsBits = static_cast(flags->GetInt()); + isGlobal = (flagsBits & RegExpParser::FLAG_GLOBAL) != 0; + fullUnicode = (flagsBits & RegExpParser::FLAG_UTF16) != 0; + } else { + // 5. Let global be ToBoolean(Get(rx, "global")). + JSHandle global = globalConst->GetHandledGlobalString(); + JSTaggedValue globalValue = + ObjectFastOperator::FastGetPropertyByValue(thread, thisObj.GetTaggedValue(), global.GetTaggedValue()); + // 6. ReturnIfAbrupt(global). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + isGlobal = globalValue.ToBoolean(); + } // 7. If global is false, then if (!isGlobal) { // a. Return RegExpExec(rx, S). if (useCache) { JSTaggedValue cacheResult = cacheTable->FindCachedResult(thread, pattern, flags, inputString, - RegExpExecResultCache::EXEC_TYPE, thisObj); + RegExpExecResultCache::EXEC_TYPE, thisObj, + JSTaggedValue(0)); if (!cacheResult.IsUndefined()) { return cacheResult; } @@ -421,34 +491,35 @@ JSTaggedValue BuiltinsRegExp::Match(EcmaRuntimeCallInfo *argv) if (useCache) { JSTaggedValue cacheResult = cacheTable->FindCachedResult(thread, pattern, flags, inputString, - RegExpExecResultCache::MATCH_TYPE, thisObj); + RegExpExecResultCache::MATCH_TYPE, thisObj, + JSTaggedValue(0)); if (!cacheResult.IsUndefined()) { return cacheResult; } } - // 8. Else global is true - // a. Let fullUnicode be ToBoolean(Get(rx, "unicode")). - JSHandle unicode = globalConst->GetHandledUnicodeString(); - JSTaggedValue uincodeValue = - ObjectFastOperator::FastGetPropertyByValue(thread, thisObj.GetTaggedValue(), unicode.GetTaggedValue()); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - bool fullUnicode = uincodeValue.ToBoolean(); - // b. ReturnIfAbrupt(fullUnicode) - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // c. Let setStatus be Set(rx, "lastIndex", 0, true). + if (!unmodified) { + // 8. Else global is true + // a. Let fullUnicode be ToBoolean(Get(rx, "unicode")). + JSHandle unicode = globalConst->GetHandledUnicodeString(); + JSTaggedValue uincodeValue = + ObjectFastOperator::FastGetPropertyByValue(thread, thisObj.GetTaggedValue(), unicode.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + fullUnicode = uincodeValue.ToBoolean(); + } + // b. Let setStatus be Set(rx, "lastIndex", 0, true). JSHandle lastIndexString(globalConst->GetHandledLastIndexString()); ObjectFastOperator::FastSetPropertyByValue(thread, thisObj.GetTaggedValue(), lastIndexString.GetTaggedValue(), JSTaggedValue(0)); - // d. ReturnIfAbrupt(setStatus). + // c. ReturnIfAbrupt(setStatus). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // e. Let A be ArrayCreate(0). + // d. Let A be ArrayCreate(0). JSHandle array(JSArray::ArrayCreate(thread, JSTaggedNumber(0))); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // f. Let n be 0. + // e. Let n be 0. int resultNum = 0; JSMutableHandle result(thread, JSTaggedValue(0)); - // g. Repeat, + // f. Repeat, while (true) { // i. Let result be RegExpExec(rx, S). result.Update(RegExpExec(thread, thisObj, string, useCache)); @@ -464,7 +535,7 @@ JSTaggedValue BuiltinsRegExp::Match(EcmaRuntimeCallInfo *argv) if (useCache) { RegExpExecResultCache::AddResultInCache(thread, cacheTable, pattern, flags, inputString, JSHandle(array), - RegExpExecResultCache::MATCH_TYPE, 0); + RegExpExecResultCache::MATCH_TYPE, 0, 0); } // 2. Else, return A. return array.GetTaggedValue(); @@ -545,8 +616,8 @@ JSTaggedValue BuiltinsRegExp::MatchAll(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); runtimeInfo->SetCallArg(thisObj.GetTaggedValue(), flagsStrHandle.GetTaggedValue()); JSTaggedValue taggedMatcher = JSFunction::Construct(runtimeInfo); - JSHandle matcherHandle(thread, taggedMatcher); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle matcherHandle(thread, taggedMatcher); // 7. Let lastIndex be ? ToLength(? Get(R, "lastIndex")). JSHandle lastIndexString(globalConstants->GetHandledLastIndexString()); @@ -629,9 +700,11 @@ JSTaggedValue BuiltinsRegExp::RegExpReplaceFast(JSThread *thread, JSHandle cacheTable->GetStrLenThreshold()) { useCache = true; } + uint32_t lastIndexInput = lastIndex; if (useCache) { JSTaggedValue cacheResult = cacheTable->FindCachedResult(thread, pattern, flagsBits, tagInputString, RegExpExecResultCache::REPLACE_TYPE, regexp, + JSTaggedValue(lastIndexInput), globalConst->GetEmptyString()); if (!cacheResult.IsUndefined()) { return cacheResult; @@ -640,20 +713,26 @@ JSTaggedValue BuiltinsRegExp::RegExpReplaceFast(JSThread *thread, JSHandle globalTable(thread->GetCurrentEcmaContext()->GetRegExpGlobalResult()); // 12. Let done be false. // 13. Repeat, while done is false for (;;) { if (lastIndex > inputLength) { break; } - bool isUtf16 = EcmaStringAccessor(inputString).IsUtf16(); - auto inputPtr = EcmaStringAccessor(inputString).ToOneByteDataForced(); - const uint8_t *strBuffer = inputPtr.get(); - - RegExpExecutor::MatchResult matchResult = Matcher(thread, regexp, strBuffer, inputLength, lastIndex, isUtf16); - if (!matchResult.isSuccess_) { + FlatStringInfo flatStrInfo = EcmaStringAccessor::FlattenAllString(thread->GetEcmaVM(), inputString); + if (EcmaStringAccessor(inputString).IsTreeString()) { // use flattenedString as srcString + inputString = JSHandle(thread, flatStrInfo.GetString()); + } + const uint8_t *strBuffer; + if (isUtf16) { + strBuffer = reinterpret_cast(flatStrInfo.GetDataUtf16()); + } else { + strBuffer = flatStrInfo.GetDataUtf8(); + } + bool matchResult = Matcher(thread, regexp, strBuffer, inputLength, lastIndex, isUtf16); + if (!matchResult) { if (flags & (RegExpParser::FLAG_STICKY | RegExpParser::FLAG_GLOBAL)) { lastIndex = 0; ObjectFastOperator::FastSetPropertyByValue(thread, regexp.GetTaggedValue(), @@ -662,8 +741,8 @@ JSTaggedValue BuiltinsRegExp::RegExpReplaceFast(JSThread *thread, JSHandle(globalTable->GetStartOfCaptureIndex(0).GetInt()); + uint32_t endIndex = static_cast(globalTable->GetEndIndex().GetInt()); lastIndex = endIndex; if (nextPosition < startIndex) { auto substr = EcmaStringAccessor::FastSubString( @@ -693,7 +772,7 @@ JSTaggedValue BuiltinsRegExp::RegExpReplaceFast(JSThread *thread, JSHandle(resultValue), - RegExpExecResultCache::REPLACE_TYPE, lastIndex, + RegExpExecResultCache::REPLACE_TYPE, lastIndexInput, lastIndex, globalConst->GetEmptyString()); } return resultValue.GetTaggedValue(); @@ -716,6 +795,14 @@ JSTaggedValue BuiltinsRegExp::Replace(EcmaRuntimeCallInfo *argv) // 3. Let S be ToString(string). JSHandle string = GetCallArg(argv, 0); JSHandle inputReplaceValue = GetCallArg(argv, 1); + return ReplaceInternal(thread, thisObj, string, inputReplaceValue); +} + +JSTaggedValue BuiltinsRegExp::ReplaceInternal(JSThread *thread, + JSHandle thisObj, + JSHandle string, + JSHandle inputReplaceValue) +{ JSHandle srcString = JSTaggedValue::ToString(thread, string); const GlobalEnvConstants *globalConst = thread->GlobalConstants(); @@ -734,48 +821,64 @@ JSTaggedValue BuiltinsRegExp::Replace(EcmaRuntimeCallInfo *argv) JSHandle lastIndex = globalConst->GetHandledLastIndexString(); // 8. Let global be ToBoolean(Get(rx, "global")). ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - JSHandle global = globalConst->GetHandledGlobalString(); - JSTaggedValue globalValue = - ObjectFastOperator::FastGetPropertyByValue(thread, thisObj.GetTaggedValue(), global.GetTaggedValue()); - // 9. ReturnIfAbrupt(global). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - bool isGlobal = globalValue.ToBoolean(); - - // 10. If global is true, then + bool isGlobal = false; bool fullUnicode = false; - if (isGlobal) { - // a. Let fullUnicode be ToBoolean(Get(rx, "unicode")). - JSHandle unicode = globalConst->GetHandledUnicodeString(); - JSTaggedValue fullUnicodeTag = - ObjectFastOperator::FastGetPropertyByValue(thread, thisObj.GetTaggedValue(), unicode.GetTaggedValue()); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - fullUnicode = fullUnicodeTag.ToBoolean(); - // b. ReturnIfAbrupt(fullUnicode). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // c. Let setStatus be Set(rx, "lastIndex", 0, true). - ObjectFastOperator::FastSetPropertyByValue(thread, thisObj.GetTaggedValue(), - lastIndex.GetTaggedValue(), JSTaggedValue(0)); - // d. ReturnIfAbrupt(setStatus). + bool unmodified = IsFastRegExp(thread, thisObj); + if (unmodified) { + JSHandle regexpObj(thisObj); + uint8_t flagsBits = static_cast(regexpObj->GetOriginalFlags().GetInt()); + isGlobal = (flagsBits & RegExpParser::FLAG_GLOBAL) != 0; + fullUnicode = (flagsBits & RegExpParser::FLAG_UTF16) != 0; + if (isGlobal) { + ObjectFastOperator::FastSetPropertyByValue(thread, thisObj.GetTaggedValue(), + lastIndex.GetTaggedValue(), JSTaggedValue(0)); + // ReturnIfAbrupt(setStatus). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } + } else { + JSHandle global = globalConst->GetHandledGlobalString(); + JSTaggedValue globalValue = + ObjectFastOperator::FastGetPropertyByValue(thread, thisObj.GetTaggedValue(), global.GetTaggedValue()); + // 9. ReturnIfAbrupt(global). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + isGlobal = globalValue.ToBoolean(); + // 10. If global is true, then + if (isGlobal) { + // a. Let fullUnicode be ToBoolean(Get(rx, "unicode")). + JSHandle unicode = globalConst->GetHandledUnicodeString(); + JSTaggedValue fullUnicodeTag = + ObjectFastOperator::FastGetPropertyByValue(thread, thisObj.GetTaggedValue(), unicode.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + fullUnicode = fullUnicodeTag.ToBoolean(); + // b. Let setStatus be Set(rx, "lastIndex", 0, true). + ObjectFastOperator::FastSetPropertyByValue(thread, thisObj.GetTaggedValue(), + lastIndex.GetTaggedValue(), JSTaggedValue(0)); + // c. ReturnIfAbrupt(setStatus). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } } // Add cache for regexp replace bool useCache = false; + // Add cache for the intermediate result of replace + bool useIntermediateCache = false; JSMutableHandle pattern(thread, JSTaggedValue::Undefined()); JSMutableHandle flagsBits(thread, JSTaggedValue::Undefined()); JSHandle cacheTable(thread->GetCurrentEcmaContext()->GetRegExpCache()); - if (isGlobal && !functionalReplace && thisObj->IsJSRegExp()) { + if (isGlobal && thisObj->IsJSRegExp()) { JSHClass *hclass = JSHandle::Cast(thisObj)->GetJSHClass(); JSHClass *originHClass = JSHClass::Cast(globalConst->GetJSRegExpClass().GetTaggedObject()); if (hclass == originHClass) { - if (EcmaStringAccessor(replaceValueHandle).GetLength() == 0) { + if (!functionalReplace && EcmaStringAccessor(replaceValueHandle).GetLength() == 0) { return RegExpReplaceFast(thread, thisObj, srcString, length); - } else { - JSHandle regexpHandle(thisObj); - if (regexpHandle->IsJSRegExp()) { - pattern.Update(regexpHandle->GetOriginalSource()); - flagsBits.Update(regexpHandle->GetOriginalFlags()); - } + } + JSHandle regexpHandle(thisObj); + if (regexpHandle->IsJSRegExp()) { + useIntermediateCache = true; + pattern.Update(regexpHandle->GetOriginalSource()); + flagsBits.Update(regexpHandle->GetOriginalFlags()); + } + if (!functionalReplace) { uint32_t strLength = EcmaStringAccessor(replaceValueHandle).GetLength(); uint32_t largeStrCount = cacheTable->GetLargeStrCount(); if (largeStrCount != 0) { @@ -789,7 +892,7 @@ JSTaggedValue BuiltinsRegExp::Replace(EcmaRuntimeCallInfo *argv) useCache = true; JSTaggedValue cacheResult = cacheTable->FindCachedResult(thread, pattern, flagsBits, string, RegExpExecResultCache::REPLACE_TYPE, - thisObj, + thisObj, JSTaggedValue(0), inputReplaceValue.GetTaggedValue()); if (!cacheResult.IsUndefined()) { return cacheResult; @@ -801,83 +904,108 @@ JSTaggedValue BuiltinsRegExp::Replace(EcmaRuntimeCallInfo *argv) JSHandle matchedStr = globalConst->GetHandledZeroString(); // 11. Let results be a new empty List. - JSHandle resultsList(JSArray::ArrayCreate(thread, JSTaggedNumber(0))); + JSMutableHandle resultsList(thread, JSArray::ArrayCreate(thread, JSTaggedNumber(0))); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); int resultsIndex = 0; - // 12. Let done be false. - // 13. Repeat, while done is false JSMutableHandle nextIndexHandle(thread, JSTaggedValue(0)); JSMutableHandle execResult(thread, JSTaggedValue(0)); - for (;;) { - // a. Let result be RegExpExec(rx, S). - execResult.Update(RegExpExec(thread, thisObj, inputStr, useCache)); - // b. ReturnIfAbrupt(result). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // c. If result is null, set done to true. - if (execResult->IsNull()) { - break; - } - // d. Else result is not null, i. Append result to the end of results. - JSObject::CreateDataProperty(thread, resultsList, resultsIndex, execResult); - resultsIndex++; - // ii. If global is false, set done to true. - if (!isGlobal) { - break; - } - // iii. Else, 1. Let matchStr be ToString(Get(result, "0")). - JSTaggedValue getMatchVal = ObjectFastOperator::FastGetPropertyByValue( - thread, execResult.GetTaggedValue(), matchedStr.GetTaggedValue()); - JSHandle getMatch(thread, getMatchVal); - JSHandle matchString = JSTaggedValue::ToString(thread, getMatch); - // 2. ReturnIfAbrupt(matchStr). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // 3. If matchStr is the empty String, then - if (EcmaStringAccessor(matchString).GetLength() == 0) { - // a. Let thisIndex be ToLength(Get(rx, "lastIndex")). - JSTaggedValue thisIndexVal = ObjectFastOperator::FastGetPropertyByValue( - thread, thisObj.GetTaggedValue(), lastIndex.GetTaggedValue()); - JSHandle thisIndexHandle(thread, thisIndexVal); - uint32_t thisIndex = 0; - if (thisIndexHandle->IsInt()) { - thisIndex = static_cast(thisIndexHandle->GetInt()); - } else { - thisIndex = JSTaggedValue::ToLength(thread, thisIndexHandle).GetNumber(); - // b. ReturnIfAbrupt(thisIndex). - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // Add cache for the intermediate result of replace + JSTaggedValue cachedResultsList(JSTaggedValue::VALUE_UNDEFINED); + if (useIntermediateCache) { + cachedResultsList = cacheTable->FindCachedResult(thread, pattern, flagsBits, string, + RegExpExecResultCache::INTERMEDIATE_REPLACE_TYPE, + thisObj, JSTaggedValue(0), JSTaggedValue::Undefined(), + true); + } + if (!cachedResultsList.IsUndefined()) { + resultsList.Update(cachedResultsList); + resultsIndex = static_cast(JSArray::Cast(resultsList.GetTaggedValue())->GetArrayLength()); + } else { + // 12. Let done be false. + // 13. Repeat, while done is false + for (;;) { + // a. Let result be RegExpExec(rx, S). + execResult.Update(RegExpExec(thread, thisObj, inputStr, useCache)); + // b. ReturnIfAbrupt(result). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // c. If result is null, set done to true. + if (execResult->IsNull()) { + break; } - // c. Let nextIndex be AdvanceStringIndex(S, thisIndex, fullUnicode). - uint32_t nextIndex = AdvanceStringIndex(inputStr, thisIndex, fullUnicode); - nextIndexHandle.Update(JSTaggedValue(nextIndex)); - // d. Let setStatus be Set(rx, "lastIndex", nextIndex, true). - ObjectFastOperator::FastSetPropertyByValue(thread, thisObj.GetTaggedValue(), lastIndex.GetTaggedValue(), - nextIndexHandle.GetTaggedValue()); - // e. ReturnIfAbrupt(setStatus). + // d. Else result is not null, i. Append result to the end of results. + JSObject::CreateDataProperty(thread, resultsList, resultsIndex, execResult); + resultsIndex++; + // ii. If global is false, set done to true. + if (!isGlobal) { + break; + } + // iii. Else, 1. Let matchStr be ToString(Get(result, "0")). + JSTaggedValue getMatchVal = ObjectFastOperator::FastGetPropertyByValue( + thread, execResult.GetTaggedValue(), matchedStr.GetTaggedValue()); + JSHandle getMatch(thread, getMatchVal); + JSHandle matchString = JSTaggedValue::ToString(thread, getMatch); + // 2. ReturnIfAbrupt(matchStr). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // 3. If matchStr is the empty String, then + if (EcmaStringAccessor(matchString).GetLength() == 0) { + // a. Let thisIndex be ToLength(Get(rx, "lastIndex")). + JSTaggedValue thisIndexVal = ObjectFastOperator::FastGetPropertyByValue( + thread, thisObj.GetTaggedValue(), lastIndex.GetTaggedValue()); + JSHandle thisIndexHandle(thread, thisIndexVal); + uint32_t thisIndex = 0; + if (thisIndexHandle->IsInt()) { + thisIndex = static_cast(thisIndexHandle->GetInt()); + } else { + thisIndex = JSTaggedValue::ToLength(thread, thisIndexHandle).GetNumber(); + // b. ReturnIfAbrupt(thisIndex). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } + // c. Let nextIndex be AdvanceStringIndex(S, thisIndex, fullUnicode). + uint32_t nextIndex = AdvanceStringIndex(inputStr, thisIndex, fullUnicode); + nextIndexHandle.Update(JSTaggedValue(nextIndex)); + // d. Let setStatus be Set(rx, "lastIndex", nextIndex, true). + ObjectFastOperator::FastSetPropertyByValue(thread, thisObj.GetTaggedValue(), lastIndex.GetTaggedValue(), + nextIndexHandle.GetTaggedValue()); + // e. ReturnIfAbrupt(setStatus). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } + } + if (useIntermediateCache) { + RegExpExecResultCache::AddResultInCache(thread, cacheTable, pattern, flagsBits, string, + JSHandle(resultsList), + RegExpExecResultCache::INTERMEDIATE_REPLACE_TYPE, 0, 0, + JSTaggedValue::Undefined(), true); } } // 14. Let accumulatedResult be the empty String value. std::string accumulatedResult; // 15. Let nextSourcePosition be 0. uint32_t nextSourcePosition = 0; - JSHandle getMatchString; + JSMutableHandle getMatchString(thread, JSTaggedValue::Undefined()); JSMutableHandle resultValues(thread, JSTaggedValue(0)); JSMutableHandle ncapturesHandle(thread, JSTaggedValue(0)); JSMutableHandle capN(thread, JSTaggedValue(0)); // 16. Repeat, for each result in results, for (int i = 0; i < resultsIndex; i++) { resultValues.Update(ObjectFastOperator::FastGetPropertyByIndex(thread, resultsList.GetTaggedValue(), i)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // a. Let nCaptures be ToLength(Get(result, "length")). - JSHandle lengthHandle = globalConst->GetHandledLengthString(); - ncapturesHandle.Update(ObjectFastOperator::FastGetPropertyByValue( - thread, resultValues.GetTaggedValue(), lengthHandle.GetTaggedValue())); - uint32_t ncaptures = JSTaggedValue::ToUint32(thread, ncapturesHandle); + uint32_t ncaptures; + if (unmodified) { + ncaptures = static_cast(JSArray::Cast(resultValues.GetTaggedValue())->GetArrayLength()); + } else { + JSHandle lengthHandle = globalConst->GetHandledLengthString(); + ncapturesHandle.Update(ObjectFastOperator::FastGetPropertyByValue( + thread, resultValues.GetTaggedValue(), lengthHandle.GetTaggedValue())); + ncaptures = JSTaggedValue::ToUint32(thread, ncapturesHandle); + } // b. ReturnIfAbrupt(nCaptures). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // c. Let nCaptures be max(nCaptures − 1, 0). ncaptures = std::max((ncaptures - 1), 0); // d. Let matched be ToString(Get(result, "0")). JSTaggedValue value = ObjectFastOperator::GetPropertyByIndex(thread, resultValues.GetTaggedValue(), 0); - getMatchString = JSHandle(thread, value); + getMatchString.Update(value); JSHandle matchString = JSTaggedValue::ToString(thread, getMatchString); // e. ReturnIfAbrupt(matched). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -956,13 +1084,14 @@ JSTaggedValue BuiltinsRegExp::Replace(EcmaRuntimeCallInfo *argv) replacerArgs->Set(thread, index + 3, namedCaptures.GetTaggedValue()); // 3: position of groups } // iv. Let replValue be Call(replaceValue, undefined, replacerArgs). - const int32_t argsLength = static_cast(replacerArgs->GetLength()); + const uint32_t argsLength = replacerArgs->GetLength(); JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, inputReplaceValue, undefined, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(argsLength, replacerArgs); JSTaggedValue replaceResult = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle replValue(thread, replaceResult); // v. Let replacement be ToString(replValue). JSHandle replacementString = JSTaggedValue::ToString(thread, replValue); @@ -1002,7 +1131,7 @@ JSTaggedValue BuiltinsRegExp::Replace(EcmaRuntimeCallInfo *argv) if (useCache) { RegExpExecResultCache::AddResultInCache(thread, cacheTable, pattern, flagsBits, string, JSHandle(resultValue), - RegExpExecResultCache::REPLACE_TYPE, nextIndexHandle->GetInt(), + RegExpExecResultCache::REPLACE_TYPE, 0, nextIndexHandle->GetInt(), inputReplaceValue.GetTaggedValue()); } return resultValue.GetTaggedValue(); @@ -1016,7 +1145,7 @@ JSTaggedValue BuiltinsRegExp::Replace(EcmaRuntimeCallInfo *argv) if (useCache) { RegExpExecResultCache::AddResultInCache(thread, cacheTable, pattern, flagsBits, string, JSHandle(resultValue), - RegExpExecResultCache::REPLACE_TYPE, nextIndexHandle->GetInt(), + RegExpExecResultCache::REPLACE_TYPE, 0, nextIndexHandle->GetInt(), inputReplaceValue.GetTaggedValue()); } return resultValue.GetTaggedValue(); @@ -1156,7 +1285,8 @@ JSTaggedValue BuiltinsRegExp::Split(EcmaRuntimeCallInfo *argv) JSHandle cacheTable(thread->GetCurrentEcmaContext()->GetRegExpCache()); if (useCache) { JSTaggedValue cacheResult = cacheTable->FindCachedResult(thread, pattern, flagsBits, inputString, - RegExpExecResultCache::SPLIT_TYPE, thisObj); + RegExpExecResultCache::SPLIT_TYPE, thisObj, + JSTaggedValue(0)); if (!cacheResult.IsUndefined()) { return cacheResult; } @@ -1251,7 +1381,7 @@ JSTaggedValue BuiltinsRegExp::Split(EcmaRuntimeCallInfo *argv) if (useCache) { RegExpExecResultCache::AddResultInCache(thread, cacheTable, pattern, flagsBits, inputString, JSHandle(array), - RegExpExecResultCache::SPLIT_TYPE, lastIndex); + RegExpExecResultCache::SPLIT_TYPE, 0, lastIndex); } return array.GetTaggedValue(); } @@ -1286,7 +1416,7 @@ JSTaggedValue BuiltinsRegExp::Split(EcmaRuntimeCallInfo *argv) if (useCache) { RegExpExecResultCache::AddResultInCache(thread, cacheTable, pattern, flagsBits, inputString, JSHandle(array), - RegExpExecResultCache::SPLIT_TYPE, lastIndex); + RegExpExecResultCache::SPLIT_TYPE, 0, lastIndex); } return array.GetTaggedValue(); } @@ -1308,16 +1438,16 @@ JSTaggedValue BuiltinsRegExp::Split(EcmaRuntimeCallInfo *argv) if (lim == MAX_SPLIT_LIMIT) { RegExpExecResultCache::AddResultInCache(thread, cacheTable, pattern, flagsBits, inputString, JSHandle(array), RegExpExecResultCache::SPLIT_TYPE, - endIndex); + 0, endIndex); } // 28. Return A. return array.GetTaggedValue(); } // NOLINTNEXTLINE(readability-non-const-parameter) -RegExpExecutor::MatchResult BuiltinsRegExp::Matcher(JSThread *thread, const JSHandle ®exp, - const uint8_t *buffer, size_t length, int32_t lastIndex, - bool isUtf16) +bool BuiltinsRegExp::Matcher(JSThread *thread, const JSHandle ®exp, + const uint8_t *buffer, size_t length, int32_t lastIndex, + bool isUtf16) { BUILTINS_API_TRACE(thread, RegExp, Matcher); // get bytecode @@ -1325,14 +1455,16 @@ RegExpExecutor::MatchResult BuiltinsRegExp::Matcher(JSThread *thread, const JSHa void *dynBuf = JSNativePointer::Cast(bufferData.GetTaggedObject())->GetExternalPointer(); auto bytecodeBuffer = reinterpret_cast(dynBuf); // execute - Chunk chunk(thread->GetNativeAreaAllocator()); + RegExpCachedChunk chunk(thread); RegExpExecutor executor(&chunk); if (lastIndex < 0) { lastIndex = 0; } bool ret = executor.Execute(buffer, lastIndex, static_cast(length), bytecodeBuffer, isUtf16); - RegExpExecutor::MatchResult result = executor.GetResult(thread, ret); - return result; + if (ret) { + executor.GetResult(thread); + } + return ret; } uint32_t BuiltinsRegExp::AdvanceStringIndex(const JSHandle &inputStr, uint32_t index, @@ -1391,6 +1523,69 @@ bool BuiltinsRegExp::GetFlagsInternal(JSThread *thread, const JSHandle(regexpObj->GetOriginalFlags().GetInt()); return flags & mask; } + +// 22.2.7.8 +JSHandle BuiltinsRegExp::MakeMatchIndicesIndexPairArray(JSThread *thread, + const std::vector>& indices, + const std::vector>& groupNames, bool hasGroups) +{ + // 1. Let n be the number of elements in indices. + uint32_t n = indices.size(); + // Assert: groupNames has n - 1 elements. + ASSERT(groupNames.size() == n - 1); + // 5. Let A be ! ArrayCreate(n). + JSHandle results(JSArray::ArrayCreate(thread, JSTaggedNumber(n))); + RETURN_HANDLE_IF_ABRUPT_COMPLETION(JSTaggedValue, thread); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + // 6. If hasGroups is true, then + // a. Let groups be OrdinaryObjectCreate(null). + // 7. Else, + // a. Let groups be undefined. + JSMutableHandle groups(thread, JSTaggedValue::Undefined()); + if (hasGroups) { + JSHandle nullHandle(thread, JSTaggedValue::Null()); + JSHandle nullObj = factory->OrdinaryNewJSObjectCreate(nullHandle); + groups.Update(nullObj.GetTaggedValue()); + } + // 8. Perform ! CreateDataPropertyOrThrow(A, "groups", groups). + const GlobalEnvConstants *globalConst = thread->GlobalConstants(); + JSHandle groupsKey = globalConst->GetHandledGroupsString(); + JSObject::CreateDataProperty(thread, results, groupsKey, groups); + // 9. For each integer i such that 0 ≤ i < n, in ascending order, do + // a. Let matchIndices be indices[i]. + // b. If matchIndices is not undefined, then + // i. Let matchIndexPair be GetMatchIndexPair(S, matchIndices). + // c. Else, + // i. Let matchIndexPair be undefined. + // d. Perform ! CreateDataPropertyOrThrow(A, ! ToString(𝔽(i)), matchIndexPair). + // e. If i > 0 and groupNames[i - 1] is not undefined, then + // i. Assert: groups is not undefined. + // ii. Perform ! CreateDataPropertyOrThrow(groups, groupNames[i - 1], matchIndexPair). + JSMutableHandle matchIndexPair(thread, JSTaggedValue::Undefined()); + for (uint32_t i = 0; i < n; i++) { + std::pair matchIndices = indices[i]; + if (!matchIndices.first.IsUndefined()) { + JSHandle match = factory->NewTaggedArray(2); // 2 means the length of array + match->Set(thread, 0, matchIndices.first); + match->Set(thread, 1, matchIndices.second); + JSHandle pair(JSArray::CreateArrayFromList(thread, JSHandle::Cast(match))); + matchIndexPair.Update(pair.GetTaggedValue()); + } else { + matchIndexPair.Update(JSTaggedValue::Undefined()); + } + JSObject::CreateDataProperty(thread, results, i, matchIndexPair); + if (i > 0) { + JSHandle groupName = groupNames[i - 1]; + if (!groupName->IsUndefined()) { + JSHandle groupObject = JSHandle::Cast(groups); + JSObject::CreateDataProperty(thread, groupObject, groupName, matchIndexPair); + } + } + } + // 10. Return A. + return JSHandle::Cast(results); +} + // 21.2.5.2.2 JSTaggedValue BuiltinsRegExp::RegExpBuiltinExec(JSThread *thread, const JSHandle ®exp, const JSHandle &inputStr, bool useCache) @@ -1412,23 +1607,28 @@ JSTaggedValue BuiltinsRegExp::RegExpBuiltinExec(JSThread *thread, const JSHandle lastIndex = lastIndexNumber.GetNumber(); } - JSHandle globalHandle = globalConst->GetHandledGlobalString(); - bool global = ObjectFastOperator::FastGetPropertyByValue( - thread, regexp.GetTaggedValue(), globalHandle.GetTaggedValue()).ToBoolean(); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - JSHandle stickyHandle = globalConst->GetHandledStickyString(); - bool sticky = ObjectFastOperator::FastGetPropertyByValue( - thread, regexp.GetTaggedValue(), stickyHandle.GetTaggedValue()).ToBoolean(); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (!global && !sticky) { - lastIndex = 0; - } - JSHandle regexpObj(regexp); JSMutableHandle pattern(thread, regexpObj->GetOriginalSource()); JSMutableHandle flags(thread, regexpObj->GetOriginalFlags()); - JSHandle cacheTable(thread->GetCurrentEcmaContext()->GetRegExpCache()); + + uint8_t flagsBits = static_cast(flags->GetInt()); + bool global = (flagsBits & RegExpParser::FLAG_GLOBAL) != 0; + bool sticky = (flagsBits & RegExpParser::FLAG_STICKY) != 0; + bool hasIndices = (flagsBits & RegExpParser::FLAG_HASINDICES) != 0; + if (!global && !sticky) { + lastIndex = 0; + } + uint32_t lastIndexInput = static_cast(lastIndex); + if (useCache) { + JSTaggedValue cacheResult = cacheTable->FindCachedResult(thread, pattern, flags, inputStr, + RegExpExecResultCache::EXEC_TYPE, regexp, + JSTaggedValue(lastIndexInput)); + if (!cacheResult.IsUndefined()) { + return cacheResult; + } + } + uint32_t length = EcmaStringAccessor(inputStr->GetTaggedObject()).GetLength(); if (lastIndex > static_cast(length)) { ObjectFastOperator::FastSetPropertyByValue(thread, regexp.GetTaggedValue(), lastIndexHandle.GetTaggedValue(), @@ -1436,13 +1636,21 @@ JSTaggedValue BuiltinsRegExp::RegExpBuiltinExec(JSThread *thread, const JSHandle RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return JSTaggedValue::Null(); } - JSHandle inputString = JSTaggedValue::ToString(thread, inputStr); - bool isUtf16 = EcmaStringAccessor(inputString).IsUtf16(); - auto inputPtr = EcmaStringAccessor(inputString).ToOneByteDataForced(); - const uint8_t *strBuffer = inputPtr.get(); + JSHandle inputString = JSHandle::Cast(inputStr); size_t stringLength = EcmaStringAccessor(inputString).GetLength(); - RegExpExecutor::MatchResult matchResult = Matcher(thread, regexp, strBuffer, stringLength, lastIndex, isUtf16); - if (!matchResult.isSuccess_) { + bool isUtf16 = EcmaStringAccessor(inputString).IsUtf16(); + FlatStringInfo flatStrInfo = EcmaStringAccessor::FlattenAllString(thread->GetEcmaVM(), inputString); + if (EcmaStringAccessor(inputString).IsTreeString()) { // use flattenedString as srcString + inputString = JSHandle(thread, flatStrInfo.GetString()); + } + const uint8_t *strBuffer; + if (isUtf16) { + strBuffer = reinterpret_cast(flatStrInfo.GetDataUtf16()); + } else { + strBuffer = flatStrInfo.GetDataUtf8(); + } + bool matchResult = Matcher(thread, regexp, strBuffer, stringLength, lastIndex, isUtf16); + if (!matchResult) { if (global || sticky) { JSHandle lastIndexValue(thread, JSTaggedValue(0)); ObjectFastOperator::FastSetPropertyByValue(thread, regexp.GetTaggedValue(), @@ -1452,7 +1660,10 @@ JSTaggedValue BuiltinsRegExp::RegExpBuiltinExec(JSThread *thread, const JSHandle } return JSTaggedValue::Null(); } - uint32_t endIndex = matchResult.endIndex_; + JSHandle globalTable(thread->GetCurrentEcmaContext()->GetRegExpGlobalResult()); + globalTable->ResetDollar(thread); + globalTable->SetInputString(thread, inputString.GetTaggedValue()); + uint32_t endIndex = static_cast(globalTable->GetEndIndex().GetInt()); if (global || sticky) { // a. Let setStatus be Set(R, "lastIndex", e, true). ObjectFastOperator::FastSetPropertyByValue(thread, regexp.GetTaggedValue(), lastIndexHandle.GetTaggedValue(), @@ -1460,54 +1671,71 @@ JSTaggedValue BuiltinsRegExp::RegExpBuiltinExec(JSThread *thread, const JSHandle // b. ReturnIfAbrupt(setStatus). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } - uint32_t capturesSize = matchResult.captures_.size(); + uint32_t capturesSize = static_cast(globalTable->GetTotalCaptureCounts().GetInt()); JSHandle results(JSArray::ArrayCreate(thread, JSTaggedNumber(capturesSize))); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - uint32_t matchIndex = matchResult.index_; // 24. Perform CreateDataProperty(A, "index", matchIndex). JSHandle indexKey = globalConst->GetHandledIndexString(); - JSHandle indexValue(thread, JSTaggedValue(matchIndex)); + JSHandle indexValue(thread, globalTable->GetStartOfCaptureIndex(0)); JSObject::CreateDataProperty(thread, results, indexKey, indexValue); // 25. Perform CreateDataProperty(A, "input", S). JSHandle inputKey = globalConst->GetHandledInputString(); - JSHandle inputValue(thread, static_cast(inputStr->GetTaggedObject())); JSObject::CreateDataProperty(thread, results, inputKey, inputValue); + // 27. Perform CreateDataProperty(A, "0", matched_substr). - JSHandle zeroValue(matchResult.captures_[0].second); + uint32_t startIndex = static_cast(globalTable->GetStartOfCaptureIndex(0).GetInt()); + uint32_t len = static_cast(globalTable->GetEndOfCaptureIndex(0).GetInt()) - startIndex; + JSHandle zeroValue(thread, JSTaggedValue(EcmaStringAccessor::FastSubString( + thread->GetEcmaVM(), inputString, startIndex, len))); JSObject::CreateDataProperty(thread, results, 0, zeroValue); - ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); - JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + // Let indices be a new empty List. + // Let groupNames be a new empty List. + // Append match to indices. + std::vector> indices; + std::vector> groupNames; + indices.emplace_back(std::make_pair(globalTable->GetStartOfCaptureIndex(0), JSTaggedValue(endIndex))); + // If R contains any GroupName, then + // a. Let groups be OrdinaryObjectCreate(null). + // b. Let hasGroups be true. + // Else, + // a. Let groups be undefined. + // b. Let hasGroups be false. JSHandle groupName(thread, regexpObj->GetGroupName()); JSMutableHandle groups(thread, JSTaggedValue::Undefined()); + bool hasGroups = false; if (!groupName->IsUndefined()) { + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); JSHandle nullHandle(thread, JSTaggedValue::Null()); JSHandle nullObj = factory->OrdinaryNewJSObjectCreate(nullHandle); groups.Update(nullObj.GetTaggedValue()); + hasGroups = true; } + // Perform ! CreateDataPropertyOrThrow(A, "groups", groups). JSHandle groupsKey = globalConst->GetHandledGroupsString(); JSObject::CreateDataProperty(thread, results, groupsKey, groups); // Create a new RegExp on global - JSHandle globalRegExp = JSHandle(env->GetRegExpFunction()); - JSMutableHandle keyString(thread, JSTaggedValue::Undefined()); uint32_t captureIndex = 1; + JSHandle undefined = globalConst->GetHandledUndefined(); + JSMutableHandle iValue(thread, JSTaggedValue::Undefined()); // 28. For each integer i such that i > 0 and i <= n for (; captureIndex < capturesSize; captureIndex++) { // a. Let capture_i be ith element of r's captures List - JSTaggedValue capturedValue; - if (matchResult.captures_[captureIndex].first) { - capturedValue = JSTaggedValue::Undefined(); + int32_t captureStartIndex = globalTable->GetStartOfCaptureIndex(captureIndex).GetInt(); + int32_t captureEndIndex = globalTable->GetEndOfCaptureIndex(captureIndex).GetInt(); + int32_t subStrLen = captureEndIndex - captureStartIndex; + if (subStrLen < 0) { + iValue.Update(JSTaggedValue::Undefined()); + indices.emplace_back(std::make_pair(JSTaggedValue::Undefined(), JSTaggedValue::Undefined())); } else { - capturedValue = matchResult.captures_[captureIndex].second.GetTaggedValue(); + iValue.Update(JSTaggedValue(EcmaStringAccessor::FastSubString( + thread->GetEcmaVM(), inputString, captureStartIndex, subStrLen))); + indices.emplace_back(std::make_pair(captureStartIndex, captureEndIndex)); } - JSHandle iValue(thread, capturedValue); // add to RegExp.$i and i must <= 9 if (captureIndex <= REGEXP_GLOBAL_ARRAY_SIZE) { - keyString.Update(GetDollarString(thread, static_cast(captureIndex))); - ObjectOperator op(thread, globalRegExp, keyString); - PropertyBox *cell = PropertyBox::Cast(op.GetValue().GetTaggedObject()); - cell->SetValue(thread, iValue); + globalTable->SetCapture(thread, captureIndex, iValue.GetTaggedValue()); } JSObject::CreateDataProperty(thread, results, captureIndex, iValue); @@ -1517,21 +1745,31 @@ JSTaggedValue BuiltinsRegExp::RegExpBuiltinExec(JSThread *thread, const JSHandle if (groupArray->GetLength() > captureIndex - 1) { JSHandle skey(thread, groupArray->Get(captureIndex - 1)); JSObject::CreateDataProperty(thread, groupObject, skey, iValue); + groupNames.emplace_back(skey); + } else { + groupNames.emplace_back(undefined); } + } else { + groupNames.emplace_back(undefined); } } + // If hasIndices is true, then + // a. Let indicesArray be MakeMatchIndicesIndexPairArray(S, indices, groupNames, hasGroups). + // b. Perform ! CreateDataPropertyOrThrow(A, "indices", indicesArray). + if (hasIndices) { + auto indicesArray = MakeMatchIndicesIndexPairArray(thread, indices, groupNames, hasGroups); + JSHandle indicesKey = globalConst->GetHandledIndicesString(); + JSObject::CreateDataProperty(thread, results, indicesKey, indicesArray); + } JSHandle emptyString = thread->GlobalConstants()->GetHandledEmptyString(); while (captureIndex <= REGEXP_GLOBAL_ARRAY_SIZE) { - keyString.Update(GetDollarString(thread, static_cast(captureIndex))); - ObjectOperator op(thread, globalRegExp, keyString); - PropertyBox *cell = PropertyBox::Cast(op.GetValue().GetTaggedObject()); - cell->SetValue(thread, emptyString); + globalTable->SetCapture(thread, captureIndex, emptyString.GetTaggedValue()); ++captureIndex; } - if (lastIndex == 0 && useCache) { + if (useCache) { RegExpExecResultCache::AddResultInCache(thread, cacheTable, pattern, flags, inputStr, JSHandle(results), RegExpExecResultCache::EXEC_TYPE, - endIndex); + lastIndexInput, endIndex); } // 29. Return A. return results.GetTaggedValue(); @@ -1548,11 +1786,23 @@ JSTaggedValue BuiltinsRegExp::RegExpExec(JSThread *thread, const JSHandleIsString()); // 3. Let exec be Get(R, "exec"). JSHandle inputStr = JSTaggedValue::ToString(thread, inputString); - + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); const GlobalEnvConstants *globalConst = thread->GlobalConstants(); JSHandle execHandle = globalConst->GetHandledExecString(); JSTaggedValue execVal = ObjectFastOperator::FastGetPropertyByValue(thread, regexp.GetTaggedValue(), execHandle.GetTaggedValue()); + if (execVal == env->GetTaggedRegExpExecFunction()) { + JSTaggedValue result = RegExpBuiltinExec(thread, regexp, JSHandle(inputStr), useCache); + // b. ReturnIfAbrupt(result). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (!result.IsECMAObject() && !result.IsNull()) { + // throw a TypeError exception. + THROW_TYPE_ERROR_AND_RETURN(thread, "exec result is null or is not Object", JSTaggedValue::Exception()); + } + return result; + } + JSHandle exec(thread, execVal); // 4. ReturnIfAbrupt(exec). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -1580,6 +1830,93 @@ JSTaggedValue BuiltinsRegExp::RegExpExec(JSThread *thread, const JSHandle ®exp, + const JSHandle &inputStr, bool useCache) +{ + JSHandle object = JSHandle::Cast(regexp); + JSTaggedValue lastIndexValue = object->GetPropertyInlinedProps(LAST_INDEX_OFFSET); + // ASSERT GetPropertyInlinedProps(LAST_INDEX_OFFSET) is not hole + ASSERT(!JSTaggedValue::SameValue(lastIndexValue, JSTaggedValue::Hole())); + // 1. load lastIndex as length + int32_t lastIndex = 0; + if (lastIndexValue.IsInt()) { + lastIndex = lastIndexValue.GetInt(); + } else { + JSHandle lastIndexResult(thread, lastIndexValue); + JSTaggedNumber lastIndexNumber = JSTaggedValue::ToLength(thread, lastIndexResult); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + lastIndex = lastIndexNumber.GetNumber(); + } + // 2. Check whether the regexp is global or sticky, which determines whether we update last index later on. + JSHandle regexpObj(regexp); + JSMutableHandle pattern(thread, regexpObj->GetOriginalSource()); + JSMutableHandle flags(thread, regexpObj->GetOriginalFlags()); + JSHandle cacheTable(thread->GetCurrentEcmaContext()->GetRegExpCache()); + uint8_t flagsBits = static_cast(flags->GetInt()); + bool global = (flagsBits & RegExpParser::FLAG_GLOBAL) != 0; + bool sticky = (flagsBits & RegExpParser::FLAG_STICKY) != 0; + if (!global && !sticky) { + lastIndex = 0; + } + // 3. Search RegExpExecResult cache + uint32_t lastIndexInput = static_cast(lastIndex); + if (useCache) { + JSTaggedValue cacheResult = cacheTable->FindCachedResult(thread, pattern, flags, inputStr, + RegExpExecResultCache::TEST_TYPE, regexp, + JSTaggedValue(lastIndexInput)); + if (!cacheResult.IsUndefined()) { + return cacheResult; + } + } + + uint32_t length = EcmaStringAccessor(inputStr->GetTaggedObject()).GetLength(); + if (lastIndex > static_cast(length)) { + object->SetPropertyInlinedPropsWithRep(thread, LAST_INDEX_OFFSET, JSTaggedValue(0)); + return JSTaggedValue::False(); + } + JSHandle inputString = JSHandle::Cast(inputStr); + size_t stringLength = EcmaStringAccessor(inputString).GetLength(); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + bool isUtf16 = EcmaStringAccessor(inputString).IsUtf16(); + FlatStringInfo flatStrInfo = EcmaStringAccessor::FlattenAllString(thread->GetEcmaVM(), inputString); + if (EcmaStringAccessor(inputString).IsTreeString()) { // use flattenedString as srcString + inputString = JSHandle(thread, flatStrInfo.GetString()); + } + const uint8_t *strBuffer; + if (isUtf16) { + strBuffer = reinterpret_cast(flatStrInfo.GetDataUtf16()); + } else { + strBuffer = flatStrInfo.GetDataUtf8(); + } + bool matchResult = Matcher(thread, regexp, strBuffer, stringLength, lastIndex, isUtf16); + if (!matchResult) { + if (global || sticky) { + object->SetPropertyInlinedPropsWithRep(thread, LAST_INDEX_OFFSET, JSTaggedValue(0)); + } + if (useCache) { + RegExpExecResultCache::AddResultInCache(thread, cacheTable, pattern, flags, inputStr, + JSHandle(thread, JSTaggedValue(matchResult)), + RegExpExecResultCache::TEST_TYPE, + lastIndexInput, 0); // 0: match fail so lastIndex is 0 + } + return JSTaggedValue::False(); + } + JSHandle globalTable(thread->GetCurrentEcmaContext()->GetRegExpGlobalResult()); + globalTable->ResetDollar(thread); + globalTable->SetInputString(thread, inputString.GetTaggedValue()); + JSTaggedValue endIndex = globalTable->GetEndIndex(); + if (global || sticky) { + object->SetPropertyInlinedPropsWithRep(thread, LAST_INDEX_OFFSET, endIndex); + } + if (useCache) { + RegExpExecResultCache::AddResultInCache(thread, cacheTable, pattern, flags, inputStr, + JSHandle(thread, JSTaggedValue(matchResult)), + RegExpExecResultCache::TEST_TYPE, + lastIndexInput, endIndex.GetInt()); + } + return GetTaggedBoolean(matchResult); +} + // 21.2.3.2.1 JSTaggedValue BuiltinsRegExp::RegExpAlloc(JSThread *thread, const JSHandle &newTarget) { @@ -1622,6 +1959,9 @@ uint32_t BuiltinsRegExp::UpdateExpressionFlags(JSThread *thread, const CString & case 'y': flagsBitsTemp = RegExpParser::FLAG_STICKY; break; + case 'd': + flagsBitsTemp = RegExpParser::FLAG_HASINDICES; + break; default: { ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); JSHandle syntaxError = @@ -1640,39 +1980,16 @@ uint32_t BuiltinsRegExp::UpdateExpressionFlags(JSThread *thread, const CString & return flagsBits; } -JSHandle BuiltinsRegExp::GetDollarString(JSThread *thread, RegExpGlobalArrayIndex index) -{ - BUILTINS_API_TRACE(thread, RegExp, GetDollarString); - switch (index) { - case DOLLAR_ONE: - return thread->GlobalConstants()->GetHandledDollarStringOne(); - case DOLLAR_TWO: - return thread->GlobalConstants()->GetHandledDollarStringTwo(); - case DOLLAR_THREE: - return thread->GlobalConstants()->GetHandledDollarStringThree(); - case DOLLAR_FOUR: - return thread->GlobalConstants()->GetHandledDollarStringFour(); - case DOLLAR_FIVE: - return thread->GlobalConstants()->GetHandledDollarStringFive(); - case DOLLAR_SIX: - return thread->GlobalConstants()->GetHandledDollarStringSix(); - case DOLLAR_SEVEN: - return thread->GlobalConstants()->GetHandledDollarStringSeven(); - case DOLLAR_EIGHT: - return thread->GlobalConstants()->GetHandledDollarStringEight(); - case DOLLAR_NINE: - return thread->GlobalConstants()->GetHandledDollarStringNine(); - default: - return thread->GlobalConstants()->GetHandledEmptyString(); - } -} - JSTaggedValue BuiltinsRegExp::FlagsBitsToString(JSThread *thread, uint8_t flags) { - ASSERT((flags & 0xC0) == 0); // 0xC0: first 2 bits of flags must be 0 + ASSERT((flags & 0x80) == 0); // 0x80: first bit of flags must be 0 BUILTINS_API_TRACE(thread, RegExp, FlagsBitsToString); - uint8_t *flagsStr = new uint8_t[7]; // 7: maximum 6 flags + '\0' + uint8_t *flagsStr = new uint8_t[RegExpParser::FLAG_NUM + 1]; // FLAG_NUM flags + '\0' size_t flagsLen = 0; + if (flags & RegExpParser::FLAG_HASINDICES) { + flagsStr[flagsLen] = 'd'; + flagsLen++; + } if (flags & RegExpParser::FLAG_GLOBAL) { flagsStr[flagsLen] = 'g'; flagsLen++; @@ -1733,7 +2050,7 @@ JSTaggedValue BuiltinsRegExp::RegExpInitialize(JSThread *thread, const JSHandle< // 6. ReturnIfAbrupt(F). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); /** - * 7. If F contains any code unit other than "g", "i", "m", "u", or "y" or if it contains the same code + * 7. If F contains any code unit other than "d", "g", "i", "m", "u", or "y" or if it contains the same code * unit more than once, throw a SyntaxError exception. **/ CString checkStr = ConvertToString(*flagsStrHandle, StringConvertedUsage::LOGICOPERATION); @@ -1829,6 +2146,31 @@ EcmaString *BuiltinsRegExp::EscapeRegExpPattern(JSThread *thread, const JSHandle return *factory->NewFromUtf8(srcStdStr); } +// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) +#define SET_GET_CAPTURE_IMPL(index) \ + JSTaggedValue BuiltinsRegExp::GetCapture##index(JSThread *thread, [[maybe_unused]] const JSHandle &obj) \ + { \ + return RegExpGlobalResult::GetCapture(thread); \ + } \ + bool BuiltinsRegExp::SetCapture##index([[maybe_unused]] JSThread *thread, \ + [[maybe_unused]] const JSHandle &obj, \ + [[maybe_unused]] const JSHandle &value, \ + [[maybe_unused]] bool mayThrow) \ + { \ + return true; \ + } + + SET_GET_CAPTURE_IMPL(1) + SET_GET_CAPTURE_IMPL(2) + SET_GET_CAPTURE_IMPL(3) + SET_GET_CAPTURE_IMPL(4) + SET_GET_CAPTURE_IMPL(5) + SET_GET_CAPTURE_IMPL(6) + SET_GET_CAPTURE_IMPL(7) + SET_GET_CAPTURE_IMPL(8) + SET_GET_CAPTURE_IMPL(9) +#undef SET_GET_CAPTURE_IMPL + JSTaggedValue RegExpExecResultCache::CreateCacheTable(JSThread *thread) { int length = CACHE_TABLE_HEADER_SIZE + INITIAL_CACHE_NUMBER * ENTRY_SIZE; @@ -1844,24 +2186,28 @@ JSTaggedValue RegExpExecResultCache::CreateCacheTable(JSThread *thread) return JSTaggedValue(table); } -JSTaggedValue RegExpExecResultCache::FindCachedResult(JSThread *thread, const JSHandle &pattern, +JSTaggedValue RegExpExecResultCache::FindCachedResult(JSThread *thread, + const JSHandle &pattern, const JSHandle &flags, const JSHandle &input, CacheType type, - const JSHandle ®exp, JSTaggedValue extend) + const JSHandle ®exp, + JSTaggedValue lastIndexInput, JSTaggedValue extend, + bool isIntermediateResult) { JSTaggedValue patternValue = pattern.GetTaggedValue(); JSTaggedValue flagsValue = flags.GetTaggedValue(); JSTaggedValue inputValue = input.GetTaggedValue(); - if (!pattern->IsString() || !flags->IsInt() || !input->IsString()) { + if (!pattern->IsString() || !flags->IsInt() || !input->IsString() || !lastIndexInput.IsInt()) { return JSTaggedValue::Undefined(); } - uint32_t hash = pattern->GetKeyHashCode() + static_cast(flags->GetInt()) + input->GetKeyHashCode(); + uint32_t hash = pattern->GetKeyHashCode() + static_cast(flags->GetInt()) + + input->GetKeyHashCode() + static_cast(lastIndexInput.GetInt()); uint32_t entry = hash & static_cast(GetCacheLength() - 1); - if (!Match(entry, patternValue, flagsValue, inputValue, extend)) { + if (!Match(entry, patternValue, flagsValue, inputValue, lastIndexInput, extend)) { uint32_t entry2 = (entry + 1) & static_cast(GetCacheLength() - 1); - if (!Match(entry2, patternValue, flagsValue, inputValue, extend)) { + if (!Match(entry2, patternValue, flagsValue, inputValue, lastIndexInput, extend)) { return JSTaggedValue::Undefined(); } entry = entry2; @@ -1869,6 +2215,11 @@ JSTaggedValue RegExpExecResultCache::FindCachedResult(JSThread *thread, const JS ASSERT((static_cast(CACHE_TABLE_HEADER_SIZE) + static_cast(entry) * static_cast(ENTRY_SIZE)) <= static_cast(UINT32_MAX)); uint32_t index = CACHE_TABLE_HEADER_SIZE + entry * ENTRY_SIZE; + // update cached value if input value is changed + JSTaggedValue cachedStr = Get(index + INPUT_STRING_INDEX); + if (!cachedStr.IsUndefined() && cachedStr != inputValue) { + Set(thread, index + INPUT_STRING_INDEX, inputValue); + } JSTaggedValue result; switch (type) { case REPLACE_TYPE: @@ -1883,6 +2234,12 @@ JSTaggedValue RegExpExecResultCache::FindCachedResult(JSThread *thread, const JS case EXEC_TYPE: result = Get(index + RESULT_EXEC_INDEX); break; + case INTERMEDIATE_REPLACE_TYPE: + result = Get(index + RESULT_INTERMEDIATE_REPLACE_INDEX); + break; + case TEST_TYPE: + result = Get(index + RESULT_TEST_INDEX); + break; default: LOG_ECMA(FATAL) << "this branch is unreachable"; UNREACHABLE(); @@ -1892,7 +2249,7 @@ JSTaggedValue RegExpExecResultCache::FindCachedResult(JSThread *thread, const JS JSHandle lastIndexHandle = thread->GlobalConstants()->GetHandledLastIndexString(); ObjectFastOperator::FastSetPropertyByValue(thread, regexp.GetTaggedValue(), lastIndexHandle.GetTaggedValue(), Get(index + LAST_INDEX_INDEX)); - if (result.IsJSArray()) { + if (!isIntermediateResult && result.IsJSArray()) { JSHandle resultHandle(thread, JSArray::Cast(result)); JSHandle copyArray = thread->GetEcmaVM()->GetFactory()->CloneArrayLiteral(resultHandle); return copyArray.GetTaggedValue(); @@ -1904,14 +2261,15 @@ void RegExpExecResultCache::AddResultInCache(JSThread *thread, JSHandle &pattern, const JSHandle &flags, const JSHandle &input, const JSHandle &resultArray, CacheType type, - uint32_t lastIndex, JSTaggedValue extend) + uint32_t lastIndexInput, uint32_t lastIndex, JSTaggedValue extend, + bool isIntermediateResult) { if (!pattern->IsString() || !flags->IsInt() || !input->IsString()) { return; } JSHandle resultArrayCopy; - if (resultArray->IsJSArray()) { + if (!isIntermediateResult && resultArray->IsJSArray()) { JSHandle copyArray = thread->GetEcmaVM()->GetFactory() ->CloneArrayLiteral(JSHandle(resultArray)); resultArrayCopy = JSHandle(copyArray); @@ -1922,19 +2280,21 @@ void RegExpExecResultCache::AddResultInCache(JSThread *thread, JSHandle(flagsValue.GetInt()) + - inputValue.GetKeyHashCode(); + inputValue.GetKeyHashCode() + lastIndexInput; uint32_t entry = hash & static_cast(cache->GetCacheLength() - 1); ASSERT((static_cast(CACHE_TABLE_HEADER_SIZE) + static_cast(entry) * static_cast(ENTRY_SIZE)) <= static_cast(UINT32_MAX)); uint32_t index = CACHE_TABLE_HEADER_SIZE + entry * ENTRY_SIZE; if (cache->Get(index).IsUndefined()) { cache->SetCacheCount(thread, cache->GetCacheCount() + 1); - cache->SetEntry(thread, entry, patternValue, flagsValue, inputValue, lastIndexValue, extend); + cache->SetEntry(thread, entry, patternValue, flagsValue, inputValue, + lastIndexInputValue, lastIndexValue, extend); cache->UpdateResultArray(thread, entry, resultArrayCopy.GetTaggedValue(), type); - } else if (cache->Match(entry, patternValue, flagsValue, inputValue, extend)) { + } else if (cache->Match(entry, patternValue, flagsValue, inputValue, lastIndexInputValue, extend)) { cache->UpdateResultArray(thread, entry, resultArrayCopy.GetTaggedValue(), type); } else { uint32_t entry2 = (entry + 1) & static_cast(cache->GetCacheLength() - 1); @@ -1956,15 +2316,17 @@ void RegExpExecResultCache::AddResultInCache(JSThread *thread, JSHandleGet(index2).IsUndefined()) { cache->SetCacheCount(thread, cache->GetCacheCount() + 1); - cache->SetEntry(thread, entry2, patternValue, flagsValue, inputValue, lastIndexValue, extendValue); + cache->SetEntry(thread, entry2, patternValue, flagsValue, inputValue, + lastIndexInputValue, lastIndexValue, extendValue); cache->UpdateResultArray(thread, entry2, resultArrayCopy.GetTaggedValue(), type); - } else if (cache->Match(entry2, patternValue, flagsValue, inputValue, extendValue)) { + } else if (cache->Match(entry2, patternValue, flagsValue, inputValue, lastIndexInputValue, extendValue)) { cache->UpdateResultArray(thread, entry2, resultArrayCopy.GetTaggedValue(), type); } else { cache->SetConflictCount(thread, cache->GetConflictCount() > 1 ? (cache->GetConflictCount() - 1) : 0); cache->SetCacheCount(thread, cache->GetCacheCount() - 1); cache->ClearEntry(thread, entry2); - cache->SetEntry(thread, entry, patternValue, flagsValue, inputValue, lastIndexValue, extendValue); + cache->SetEntry(thread, entry, patternValue, flagsValue, inputValue, + lastIndexInputValue, lastIndexValue, extendValue); cache->UpdateResultArray(thread, entry, resultArrayCopy.GetTaggedValue(), type); } } @@ -1979,7 +2341,8 @@ void RegExpExecResultCache::GrowRegexpCache(JSThread *thread, JSHandle(CACHE_TABLE_HEADER_SIZE) + static_cast(entry) * static_cast(ENTRY_SIZE)) <= static_cast(INT_MAX)); @@ -1987,6 +2350,7 @@ void RegExpExecResultCache::SetEntry(JSThread *thread, int entry, JSTaggedValue Set(thread, index + PATTERN_INDEX, pattern); Set(thread, index + FLAG_INDEX, flags); Set(thread, index + INPUT_STRING_INDEX, input); + Set(thread, index + LAST_INDEX_INPUT_INDEX, lastIndexInputValue); Set(thread, index + LAST_INDEX_INDEX, lastIndexValue); Set(thread, index + EXTEND_INDEX, extendValue); } @@ -2010,6 +2374,12 @@ void RegExpExecResultCache::UpdateResultArray(JSThread *thread, int entry, JSTag case EXEC_TYPE: Set(thread, index + RESULT_EXEC_INDEX, resultArray); break; + case INTERMEDIATE_REPLACE_TYPE: + Set(thread, index + RESULT_INTERMEDIATE_REPLACE_INDEX, resultArray); + break; + case TEST_TYPE: + Set(thread, index + RESULT_TEST_INDEX, resultArray); + break; default: LOG_ECMA(FATAL) << "this branch is unreachable"; UNREACHABLE(); @@ -2029,25 +2399,36 @@ void RegExpExecResultCache::ClearEntry(JSThread *thread, int entry) } bool RegExpExecResultCache::Match(int entry, JSTaggedValue &pattern, JSTaggedValue &flags, JSTaggedValue &input, - JSTaggedValue &extend) + JSTaggedValue &lastIndexInputValue, JSTaggedValue &extend) { ASSERT((static_cast(CACHE_TABLE_HEADER_SIZE) + static_cast(entry) * static_cast(ENTRY_SIZE)) <= static_cast(INT_MAX)); int index = CACHE_TABLE_HEADER_SIZE + entry * ENTRY_SIZE; + JSTaggedValue keyPattern = Get(index + PATTERN_INDEX); + if (keyPattern.IsUndefined()) { + return false; + } + + uint8_t flagsBits = static_cast(flags.GetInt()); JSTaggedValue keyFlags = Get(index + FLAG_INDEX); - JSTaggedValue keyInput = Get(index + INPUT_STRING_INDEX); - JSTaggedValue keyExtend = Get(index + EXTEND_INDEX); + uint8_t keyFlagsBits = static_cast(keyFlags.GetInt()); + if (flagsBits != keyFlagsBits) { + return false; + } - if (keyPattern.IsUndefined()) { + uint32_t lastIndexInputInt = static_cast(lastIndexInputValue.GetInt()); + JSTaggedValue keyLastIndexInput = Get(index + LAST_INDEX_INPUT_INDEX); + uint32_t keyLastIndexInputInt = static_cast(keyLastIndexInput.GetInt()); + if (lastIndexInputInt != keyLastIndexInputInt) { return false; } + JSTaggedValue keyInput = Get(index + INPUT_STRING_INDEX); + JSTaggedValue keyExtend = Get(index + EXTEND_INDEX); EcmaString *patternStr = EcmaString::Cast(pattern.GetTaggedObject()); - uint8_t flagsBits = static_cast(flags.GetInt()); EcmaString *inputStr = EcmaString::Cast(input.GetTaggedObject()); EcmaString *keyPatternStr = EcmaString::Cast(keyPattern.GetTaggedObject()); - uint8_t keyFlagsBits = static_cast(keyFlags.GetInt()); EcmaString *keyInputStr = EcmaString::Cast(keyInput.GetTaggedObject()); bool extendEqual = false; if (extend.IsString() && keyExtend.IsString()) { @@ -2059,7 +2440,39 @@ bool RegExpExecResultCache::Match(int entry, JSTaggedValue &pattern, JSTaggedVal } else { return false; } - return EcmaStringAccessor::StringsAreEqual(patternStr, keyPatternStr) && flagsBits == keyFlagsBits && - EcmaStringAccessor::StringsAreEqual(inputStr, keyInputStr) && extendEqual; + return extendEqual && + EcmaStringAccessor::StringsAreEqual(patternStr, keyPatternStr) && + EcmaStringAccessor::StringsAreEqual(inputStr, keyInputStr); +} + +JSTaggedValue RegExpGlobalResult::CreateGlobalResultTable(JSThread *thread) +{ + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + uint32_t initialLength = GLOBAL_TABLE_SIZE + INITIAL_CAPTURE_INDICES; + auto table = static_cast( + *factory->NewTaggedArray(initialLength, JSTaggedValue::Undefined())); + // initialize dollars with empty string + JSTaggedValue emptyString = factory->GetEmptyString().GetTaggedValue(); + for (uint32_t i = 1; i <= DOLLAR_NUMBER; i++) { + table->SetCapture(thread, CAPTURE_START_INDEX + i, emptyString); + } + // initialize match info + table->SetTotalCaptureCounts(thread, JSTaggedValue(0)); + table->SetInputString(thread, emptyString); + for (uint32_t i = 0; i < INITIAL_CAPTURE_INDICES / 2; i++) { // 2: capture pair + table->SetStartOfCaptureIndex(thread, i, JSTaggedValue(0)); + table->SetEndOfCaptureIndex(thread, i, JSTaggedValue(0)); + } + return JSTaggedValue(table); +} + +JSHandle RegExpGlobalResult::GrowCapturesCapacity(JSThread *thread, + JSHandleresult, uint32_t length) +{ + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle newResult = factory->ExtendArray( + JSHandle(result), length, JSTaggedValue(0)); + thread->GetCurrentEcmaContext()->SetRegExpGlobalResult(newResult.GetTaggedValue()); + return JSHandle(newResult); } } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_regexp.h b/ecmascript/builtins/builtins_regexp.h index 6830d9d070cc01d60d425f0799b85f0ce71f2e98..a2b8a7d3db286de354ddb4f57969bb3eb46325eb 100644 --- a/ecmascript/builtins/builtins_regexp.h +++ b/ecmascript/builtins/builtins_regexp.h @@ -26,19 +26,6 @@ namespace panda::ecmascript::builtins { class BuiltinsRegExp : public base::BuiltinsBase { public: - enum RegExpGlobalArrayIndex { - DUMP_HEAD, - DOLLAR_ONE, - DOLLAR_TWO, - DOLLAR_THREE, - DOLLAR_FOUR, - DOLLAR_FIVE, - DOLLAR_SIX, - DOLLAR_SEVEN, - DOLLAR_EIGHT, - DOLLAR_NINE - }; - // 21.2.3.1 RegExp ( pattern, flags ) static JSTaggedValue RegExpConstructor(EcmaRuntimeCallInfo *argv); @@ -86,14 +73,38 @@ public: // 21.2.5.2.3 AdvanceStringIndex ( S, index, unicode ) static uint32_t AdvanceStringIndex(const JSHandle &inputStr, uint32_t index, bool unicode); + // 22.2.6.6 get RegExp.prototype.hasIndices + static JSTaggedValue GetHasIndices(EcmaRuntimeCallInfo *argv); + + static JSTaggedValue ReplaceInternal(JSThread *thread, + JSHandle thisObj, + JSHandle string, + JSHandle inputReplaceValue); +// NOLINTNEXTLINE(cppcoreguidelines-macro-usage) +#define SET_GET_CAPTURE(index) \ + static JSTaggedValue GetCapture##index(JSThread *thread, const JSHandle &obj); \ + static bool SetCapture##index(JSThread *thread, const JSHandle &obj, \ + const JSHandle &value, bool mayThrow); + + SET_GET_CAPTURE(1) + SET_GET_CAPTURE(2) + SET_GET_CAPTURE(3) + SET_GET_CAPTURE(4) + SET_GET_CAPTURE(5) + SET_GET_CAPTURE(6) + SET_GET_CAPTURE(7) + SET_GET_CAPTURE(8) + SET_GET_CAPTURE(9) +#undef SET_GET_CAPTURE private: static constexpr uint32_t MIN_REPLACE_STRING_LENGTH = 1000; static constexpr uint32_t MAX_SPLIT_LIMIT = 0xFFFFFFFFu; static constexpr uint32_t REGEXP_GLOBAL_ARRAY_SIZE = 9; + static constexpr uint32_t LAST_INDEX_OFFSET = 0; - static RegExpExecutor::MatchResult Matcher(JSThread *thread, const JSHandle ®exp, - const uint8_t *buffer, size_t length, int32_t lastindex, bool isUtf16); + static bool Matcher(JSThread *thread, const JSHandle ®exp, + const uint8_t *buffer, size_t length, int32_t lastindex, bool isUtf16); static bool GetFlagsInternal(JSThread *thread, const JSHandle &obj, const uint8_t mask); @@ -106,8 +117,6 @@ private: static uint32_t UpdateExpressionFlags(JSThread *thread, const CString &checkStr); - static JSHandle GetDollarString(JSThread *thread, RegExpGlobalArrayIndex index); - // 21.2.3.2.2 Runtime Semantics: RegExpInitialize ( obj, pattern, flags ) static JSTaggedValue RegExpInitialize(JSThread *thread, const JSHandle &obj, const JSHandle &pattern, const JSHandle &flags); @@ -116,6 +125,15 @@ private: const JSHandle &flags); static JSTaggedValue RegExpReplaceFast(JSThread *thread, JSHandle ®exp, JSHandle inputString, uint32_t inputLength); + static JSTaggedValue RegExpTestFast(JSThread *thread, JSHandle ®exp, + const JSHandle &inputString, bool useCache); + static JSTaggedValue RegExpExecForTestFast(JSThread *thread, JSHandle ®exp, + const JSHandle &inputStr, bool useCache); + static bool IsFastRegExp(JSThread *thread, JSHandle ®exp); + // 22.2.7.8 MakeMatchIndicesIndexPairArray ( S, indices, groupNames, hasGroups ) + static JSHandle MakeMatchIndicesIndexPairArray(JSThread* thread, + const std::vector>& indices, + const std::vector>& groupNames, bool hasGroups); }; class RegExpExecResultCache : public TaggedArray { @@ -124,7 +142,9 @@ public: REPLACE_TYPE, SPLIT_TYPE, MATCH_TYPE, - EXEC_TYPE + EXEC_TYPE, + INTERMEDIATE_REPLACE_TYPE, + TEST_TYPE }; static RegExpExecResultCache *Cast(TaggedObject *object) { @@ -135,21 +155,24 @@ public: JSTaggedValue FindCachedResult(JSThread *thread, const JSHandle &patten, const JSHandle &flags, const JSHandle &input, CacheType type, const JSHandle ®exp, - JSTaggedValue extend = JSTaggedValue::Undefined()); + JSTaggedValue lastIndexInput, JSTaggedValue extend = JSTaggedValue::Undefined(), + bool isIntermediateResult = false); // extend as an additional parameter to judge cached static void AddResultInCache(JSThread *thread, JSHandle cache, const JSHandle &patten, const JSHandle &flags, const JSHandle &input, const JSHandle &resultArray, - CacheType type, uint32_t lastIndex, JSTaggedValue extend = JSTaggedValue::Undefined()); + CacheType type, uint32_t lastIndexInput, uint32_t lastIndex, + JSTaggedValue extend = JSTaggedValue::Undefined(), + bool isIntermediateResult = false); static void GrowRegexpCache(JSThread *thread, JSHandle cache); void ClearEntry(JSThread *thread, int entry); void SetEntry(JSThread *thread, int entry, JSTaggedValue &patten, JSTaggedValue &flags, JSTaggedValue &input, - JSTaggedValue &lastIndexValue, JSTaggedValue &extendValue); + JSTaggedValue &lastIndexInputValue, JSTaggedValue &lastIndexValue, JSTaggedValue &extendValue); void UpdateResultArray(JSThread *thread, int entry, JSTaggedValue resultArray, CacheType type); bool Match(int entry, JSTaggedValue &pattenStr, JSTaggedValue &flagsStr, JSTaggedValue &inputStr, - JSTaggedValue &extend); + JSTaggedValue &lastIndexInputValue, JSTaggedValue &extend); inline void SetHitCount(JSThread *thread, int hitCount) { Set(thread, CACHE_HIT_COUNT_INDEX, JSTaggedValue(hitCount)); @@ -231,14 +254,128 @@ private: static constexpr int PATTERN_INDEX = 0; static constexpr int FLAG_INDEX = 1; static constexpr int INPUT_STRING_INDEX = 2; - static constexpr int LAST_INDEX_INDEX = 3; - static constexpr int RESULT_REPLACE_INDEX = 4; - static constexpr int RESULT_SPLIT_INDEX = 5; - static constexpr int RESULT_MATCH_INDEX = 6; - static constexpr int RESULT_EXEC_INDEX = 7; + static constexpr int LAST_INDEX_INPUT_INDEX = 3; + static constexpr int LAST_INDEX_INDEX = 4; + static constexpr int RESULT_REPLACE_INDEX = 5; + static constexpr int RESULT_SPLIT_INDEX = 6; + static constexpr int RESULT_MATCH_INDEX = 7; + static constexpr int RESULT_EXEC_INDEX = 8; + static constexpr int RESULT_INTERMEDIATE_REPLACE_INDEX = 9; + static constexpr int RESULT_TEST_INDEX = 10; // Extend index used for saving an additional parameter to judge cached - static constexpr int EXTEND_INDEX = 8; - static constexpr int ENTRY_SIZE = 9; + static constexpr int EXTEND_INDEX = 11; + static constexpr int ENTRY_SIZE = 12; +}; + +class RegExpGlobalResult : public TaggedArray { +public: + static RegExpGlobalResult *Cast(TaggedObject *object) + { + return reinterpret_cast(object); + } + static JSTaggedValue CreateGlobalResultTable(JSThread *thread); + + void SetCapture(JSThread *thread, int index, JSTaggedValue value) + { + ASSERT(CAPTURE_START_INDEX + index - 1 < GLOBAL_TABLE_SIZE); + Set(thread, CAPTURE_START_INDEX + index - 1, value); + } + + void ResetDollar(JSThread *thread) + { + for (uint32_t i = 0; i < DOLLAR_NUMBER; i++) { + Set(thread, CAPTURE_START_INDEX + i, JSTaggedValue::Hole()); + } + } + + template + static JSTaggedValue GetCapture(JSThread *thread) + { + JSHandle globalTable(thread->GetCurrentEcmaContext()->GetRegExpGlobalResult()); + JSTaggedValue res = globalTable->Get(CAPTURE_START_INDEX + N - 1); + int captureNum = globalTable->GetTotalCaptureCounts().GetInt(); + if (res.IsHole() && (N < captureNum)) { + uint32_t startIndex = static_cast(globalTable->GetStartOfCaptureIndex(N).GetInt()); + uint32_t endIndex = static_cast(globalTable->GetEndOfCaptureIndex(N).GetInt()); + uint32_t len = endIndex - startIndex; + if (len < 0) { + res = JSTaggedValue::Undefined(); + } else { + res = JSTaggedValue(EcmaStringAccessor::FastSubString(thread->GetEcmaVM(), + JSHandle(thread, EcmaString::Cast(globalTable->GetInputString())), startIndex, len)); + } + globalTable->Set(thread, CAPTURE_START_INDEX + N - 1, res); + } else if (res.IsHole()) { + res = thread->GetEcmaVM()->GetFactory()->GetEmptyString().GetTaggedValue(); + globalTable->Set(thread, CAPTURE_START_INDEX + N - 1, res); + } + return res; + } + + void SetTotalCaptureCounts(JSThread *thread, JSTaggedValue counts) + { + Set(thread, TOTAL_CAPTURE_COUNTS_INDEX, counts); + } + + JSTaggedValue GetTotalCaptureCounts() + { + return Get(TOTAL_CAPTURE_COUNTS_INDEX); + } + + void SetEndIndex(JSThread *thread, JSTaggedValue endIndex) + { + Set(thread, END_INDEX, endIndex); + } + + JSTaggedValue GetEndIndex() + { + return Get(END_INDEX); + } + + void SetInputString(JSThread *thread, JSTaggedValue string) + { + Set(thread, INPUT_STRING_INDEX, string); + } + + JSTaggedValue GetInputString() + { + return Get(INPUT_STRING_INDEX); + } + + void SetStartOfCaptureIndex(JSThread *thread, uint32_t index, JSTaggedValue value) + { + Set(thread, FIRST_CAPTURE_INDEX + index * 2, value); // 2 : double + } + + void SetEndOfCaptureIndex(JSThread *thread, uint32_t index, JSTaggedValue value) + { + Set(thread, FIRST_CAPTURE_INDEX + index * 2 + 1, value); // 2 : double + } + + JSTaggedValue GetStartOfCaptureIndex(uint32_t index) + { + return Get(FIRST_CAPTURE_INDEX + index * 2); // 2 : double + } + + JSTaggedValue GetEndOfCaptureIndex(uint32_t index) + { + return Get(FIRST_CAPTURE_INDEX + index * 2 + 1); // 2 : double + } + + static JSHandle GrowCapturesCapacity(JSThread *thread, + JSHandleresult, uint32_t length); + + static constexpr int FIRST_CAPTURE_INDEX = 12; // capture index starts here + +private: + static constexpr int GLOBAL_TABLE_SIZE = 12; // initial length + static constexpr int DOLLAR_NUMBER = 9; + static constexpr int CAPTURE_START_INDEX = 0; + + static constexpr int TOTAL_CAPTURE_COUNTS_INDEX = 9; // save total capture size + static constexpr int INPUT_STRING_INDEX = 10; // save input string + static constexpr int END_INDEX = 11; // save last index + static constexpr int INITIAL_CAPTURE_INDICES = 18; // length: pairs of capture start index and end index }; } // namespace panda::ecmascript::builtins #endif // ECMASCRIPT_BUILTINS_BUILTINS_REGEXP_H diff --git a/ecmascript/builtins/builtins_set.cpp b/ecmascript/builtins/builtins_set.cpp index 9a3d709f757cb4eef1fa0e17f1d1689f7208b36f..2b8883d5ffd71cbe3faeb9352bde52bd4896068e 100644 --- a/ecmascript/builtins/builtins_set.cpp +++ b/ecmascript/builtins/builtins_set.cpp @@ -117,8 +117,7 @@ JSTaggedValue BuiltinsSet::Add(EcmaRuntimeCallInfo *argv) } JSHandle value(GetCallArg(argv, 0)); - JSHandle set(JSTaggedValue::ToObject(thread, self)); - + JSHandle set(self); JSSet::Add(thread, set, value); return set.GetTaggedValue(); } @@ -136,7 +135,7 @@ JSTaggedValue BuiltinsSet::Clear(EcmaRuntimeCallInfo *argv) if (!self->IsJSSet()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSSet", JSTaggedValue::Exception()); } - JSHandle set(thread, JSSet::Cast(*JSTaggedValue::ToObject(thread, self))); + JSHandle set(self); JSSet::Clear(thread, set); return JSTaggedValue::Undefined(); } @@ -154,7 +153,7 @@ JSTaggedValue BuiltinsSet::Delete(EcmaRuntimeCallInfo *argv) THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSSet", JSTaggedValue::Exception()); } - JSHandle set(thread, JSSet::Cast(*JSTaggedValue::ToObject(thread, self))); + JSHandle set(self); JSHandle value = GetCallArg(argv, 0); bool flag = JSSet::Delete(thread, set, value); return GetTaggedBoolean(flag); @@ -172,7 +171,7 @@ JSTaggedValue BuiltinsSet::Has(EcmaRuntimeCallInfo *argv) if (!self->IsJSSet()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSSet", JSTaggedValue::Exception()); } - JSSet *jsSet = JSSet::Cast(*JSTaggedValue::ToObject(thread, self)); + JSSet* jsSet = JSSet::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle value = GetCallArg(argv, 0); bool flag = jsSet->Has(value.GetTaggedValue()); return GetTaggedBoolean(flag); @@ -202,7 +201,7 @@ JSTaggedValue BuiltinsSet::ForEach(EcmaRuntimeCallInfo *argv) // 6.Let entries be the List that is the value of S’s [[SetData]] internal slot. JSMutableHandle hashSet(thread, set->GetLinkedSet()); - const int32_t argsLength = 3; + const uint32_t argsLength = 3; int index = 0; int totalElements = hashSet->NumberOfElements() + hashSet->NumberOfDeletedElements(); JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); @@ -249,8 +248,8 @@ JSTaggedValue BuiltinsSet::GetSize(EcmaRuntimeCallInfo *argv) if (!self->IsJSSet()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSSet", JSTaggedValue::Exception()); } - JSSet *jsSet = JSSet::Cast(*JSTaggedValue::ToObject(thread, self)); - int count = jsSet->GetSize(); + JSSet* jsSet = JSSet::Cast(self.GetTaggedValue().GetTaggedObject()); + uint32_t count = jsSet->GetSize(); return JSTaggedValue(count); } @@ -262,6 +261,7 @@ JSTaggedValue BuiltinsSet::Entries(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle self = GetThis(argv); JSHandle iter = JSSetIterator::CreateSetIterator(thread, self, IterationKind::KEY_AND_VALUE); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return iter.GetTaggedValue(); } @@ -273,6 +273,7 @@ JSTaggedValue BuiltinsSet::Values(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle self = GetThis(argv); JSHandle iter = JSSetIterator::CreateSetIterator(thread, self, IterationKind::VALUE); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return iter.GetTaggedValue(); } } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_set.h b/ecmascript/builtins/builtins_set.h index aa00a122ff5c3fd33b8a8538fcb7cc89e09444eb..f3b0985dc5bb131875709dbf5f9a8fd0f59dac01 100644 --- a/ecmascript/builtins/builtins_set.h +++ b/ecmascript/builtins/builtins_set.h @@ -19,6 +19,28 @@ #include "ecmascript/base/builtins_base.h" #include "ecmascript/ecma_runtime_call_info.h" +// List of functions in Set, excluding the constructor and '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsSet::func refers to the native implementation of Set.prototype[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +// The following functions are not listed: +// - Set.prototype.keys ( ), which is strictly equal to Set.prototype.values +#define BUILTIN_SET_PROTOTYPE_FUNCTIONS(V) \ + /* Set.prototype.add ( value ) */ \ + V("add", Add, 1, SetAdd) \ + /* Set.prototype.clear ( ) */ \ + V("clear", Clear, 0, SetClear) \ + /* Set.prototype.delete ( value ) */ \ + V("delete", Delete, 1, SetDelete) \ + /* Set.prototype.entries ( ) */ \ + V("entries", Entries, 0, SetEntries) \ + /* Set.prototype.forEach ( callbackfn [ , thisArg ] ) */ \ + V("forEach", ForEach, 1, SetForEach) \ + /* Set.prototype.has ( value ) */ \ + V("has", Has, 1, SetHas) \ + /* Set.prototype.values ( ) */ \ + V("values", Values, 0, SetValues) + namespace panda::ecmascript::builtins { class BuiltinsSet : public base::BuiltinsBase { public: @@ -42,6 +64,33 @@ public: static JSTaggedValue GetSize(EcmaRuntimeCallInfo *argv); // 23.2.3.10 static JSTaggedValue Values(EcmaRuntimeCallInfo *argv); + + // Excluding the '@@' internal properties + static Span GetSetPrototypeFunctions() + { + return Span(SET_PROTOTYPE_FUNCTIONS); + } + + static size_t GetNumPrototypeInlinedProperties() + { + // 5 : 5 more inline properties in Set.prototype + // (1) Set.prototype.constructor + // (2) Set.prototype [ @@toStringTag ] + // (3) Set.prototype [ @@iterator ] + // (4) get Set.prototype.size + // (5) Set.prototype.keys, which is not included in BuiltinsSet::GetSetPrototypeFunctions() + return GetSetPrototypeFunctions().Size() + 5; + } + +private: +#define BUILTIN_SET_FUNCTION_ENTRY(name, func, length, id) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsSet::func, length, kungfu::BuiltinsStubCSigns::id), + + static constexpr std::array SET_PROTOTYPE_FUNCTIONS = { + BUILTIN_SET_PROTOTYPE_FUNCTIONS(BUILTIN_SET_FUNCTION_ENTRY) + }; + +#undef BUILTIN_SET_FUNCTION_ENTRY }; } // namespace panda::ecmascript::builtins #endif // ECMASCRIPT_BUILTINS_BUILTINS_SET_H diff --git a/ecmascript/builtins/builtins_string.cpp b/ecmascript/builtins/builtins_string.cpp index eb4efd1a1597f0f175a5d948adf2a588698c67a4..01b72d050d22d2cb09bf67c2f8259fba5415c7cd 100644 --- a/ecmascript/builtins/builtins_string.cpp +++ b/ecmascript/builtins/builtins_string.cpp @@ -40,6 +40,7 @@ #include "ecmascript/js_tagged_value-inl.h" #include "ecmascript/mem/c_containers.h" #include "ecmascript/object_factory.h" +#include "ecmascript/property_detector-inl.h" #include "ecmascript/tagged_array-inl.h" #include "ecmascript/tagged_array.h" #ifdef ARK_SUPPORT_INTL @@ -116,12 +117,7 @@ JSTaggedValue BuiltinsString::FromCharCode(EcmaRuntimeCallInfo *argv) valueTable.emplace_back(nextCv); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); } - std::u16string u16str = base::StringHelper::Utf16ToU16String(valueTable.data(), argLength); - const char16_t *constChar16tData = u16str.data(); - auto *char16tData = const_cast(constChar16tData); - auto *uint16tData = reinterpret_cast(char16tData); - uint32_t u16strSize = u16str.size(); - return factory->NewFromUtf16Literal(uint16tData, u16strSize).GetTaggedValue(); + return factory->NewFromUtf16Literal(valueTable.data(), valueTable.size()).GetTaggedValue(); } // 21.1.2.2 @@ -160,13 +156,13 @@ JSTaggedValue BuiltinsString::FromCodePoint(EcmaRuntimeCallInfo *argv) ((static_cast(cp) - ENCODE_SECOND_FACTOR) % ENCODE_FIRST_FACTOR) + ENCODE_TRAIL_LOW; std::u16string nextU16str1 = base::StringHelper::Utf16ToU16String(&cu1, 1); std::u16string nextU16str2 = base::StringHelper::Utf16ToU16String(&cu2, 1); - u16str = base::StringHelper::Append(u16str, nextU16str1); - u16str = base::StringHelper::Append(u16str, nextU16str2); + base::StringHelper::InplaceAppend(u16str, nextU16str1); + base::StringHelper::InplaceAppend(u16str, nextU16str2); u16strSize++; } else { auto u16tCp = static_cast(cp); std::u16string nextU16str = base::StringHelper::Utf16ToU16String(&u16tCp, 1); - u16str = base::StringHelper::Append(u16str, nextU16str); + base::StringHelper::InplaceAppend(u16str, nextU16str); } } const char16_t *constChar16tData = u16str.data(); @@ -247,6 +243,7 @@ JSTaggedValue BuiltinsString::CharAt(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisFlat(thread, EcmaStringAccessor::Flatten(thread->GetEcmaVM(), thisHandle)); @@ -277,6 +274,7 @@ JSTaggedValue BuiltinsString::CharCodeAt(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisFlat(thread, EcmaStringAccessor::Flatten(thread->GetEcmaVM(), thisHandle)); @@ -307,6 +305,7 @@ JSTaggedValue BuiltinsString::CodePointAt(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisFlat(thread, EcmaStringAccessor::Flatten(thread->GetEcmaVM(), thisHandle)); @@ -338,36 +337,24 @@ JSTaggedValue BuiltinsString::Concat(EcmaRuntimeCallInfo *argv) BUILTINS_API_TRACE(argv->GetThread(), String, Concat); JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); - ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + auto ecmaVm = thread->GetEcmaVM(); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); uint32_t argLength = argv->GetArgsNumber(); if (argLength == 0) { return thisHandle.GetTaggedValue(); } - std::u16string u16strThis; - std::u16string u16strNext; - bool canBeCompress = true; - u16strThis = EcmaStringAccessor(thisHandle).ToU16String(); - if (EcmaStringAccessor(thisHandle).IsUtf16()) { - canBeCompress = false; - } for (uint32_t i = 0; i < argLength; i++) { JSHandle nextTag = BuiltinsString::GetCallArg(argv, i); JSHandle nextHandle = JSTaggedValue::ToString(thread, nextTag); - u16strNext = EcmaStringAccessor(nextHandle).ToU16String(); - if (EcmaStringAccessor(nextHandle).IsUtf16()) { - canBeCompress = false; - } - u16strThis = base::StringHelper::Append(u16strThis, u16strNext); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + EcmaString *tempStr = EcmaStringAccessor::Concat(ecmaVm, thisHandle, nextHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + thisHandle = JSHandle(thread, tempStr); } - const char16_t *constChar16tData = u16strThis.data(); - auto *char16tData = const_cast(constChar16tData); - auto *uint16tData = reinterpret_cast(char16tData); - uint32_t u16strSize = u16strThis.size(); - return canBeCompress ? factory->NewFromUtf16LiteralCompress(uint16tData, u16strSize).GetTaggedValue() : - factory->NewFromUtf16LiteralNotCompress(uint16tData, u16strSize).GetTaggedValue(); + return thisHandle.GetTaggedValue(); } // 21.1.3.5 String.prototype.constructor @@ -379,6 +366,7 @@ JSTaggedValue BuiltinsString::EndsWith(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle searchTag = BuiltinsString::GetCallArg(argv, 0); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -421,6 +409,7 @@ JSTaggedValue BuiltinsString::Includes(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle searchTag = BuiltinsString::GetCallArg(argv, 0); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); bool isRegexp = JSObject::IsRegExp(thread, searchTag); @@ -456,6 +445,7 @@ JSTaggedValue BuiltinsString::IndexOf(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle searchTag = BuiltinsString::GetCallArg(argv, 0); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); uint32_t thisLen = EcmaStringAccessor(thisHandle).GetLength(); @@ -489,6 +479,7 @@ JSTaggedValue BuiltinsString::LastIndexOf(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle searchTag = BuiltinsString::GetCallArg(argv, 0); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); int32_t thisLen = static_cast(EcmaStringAccessor(thisHandle).GetLength()); @@ -525,6 +516,7 @@ JSTaggedValue BuiltinsString::LocaleCompare(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thatTag = BuiltinsString::GetCallArg(argv, 0); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); [[maybe_unused]] JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); [[maybe_unused]] JSHandle thatHandle = JSTaggedValue::ToString(thread, thatTag); @@ -576,6 +568,30 @@ JSTaggedValue BuiltinsString::LocaleCompare(EcmaRuntimeCallInfo *argv) #endif } +JSTaggedValue BuiltinsString::LocaleCompareGC(JSThread *thread, JSHandle locales, + JSHandle thisHandle, JSHandle thatHandle, + JSHandle options, bool cacheable) +{ + EcmaVM *ecmaVm = thread->GetEcmaVM(); + ObjectFactory *factory = ecmaVm->GetFactory(); + JSHandle ctor = ecmaVm->GetGlobalEnv()->GetCollatorFunction(); + JSHandle collator = + JSHandle::Cast(factory->NewJSObjectByConstructor(JSHandle(ctor))); + JSHandle initCollator = + JSCollator::InitializeCollator(thread, collator, locales, options, cacheable); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + icu::Collator *icuCollator = nullptr; + if (cacheable) { + icuCollator = JSCollator::GetCachedIcuCollator(thread, locales); + ASSERT(icuCollator != nullptr); + } else { + icuCollator = initCollator->GetIcuCollator(); + } + JSTaggedValue result = JSCollator::CompareStrings(icuCollator, thisHandle, thatHandle); + return result; +} + + // 21.1.3.11 JSTaggedValue BuiltinsString::Match(EcmaRuntimeCallInfo *argv) { @@ -585,6 +601,7 @@ JSTaggedValue BuiltinsString::Match(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); const GlobalEnvConstants *globalConst = thread->GlobalConstants(); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle regexp = BuiltinsString::GetCallArg(argv, 0); JSHandle matchTag = thread->GetEcmaVM()->GetGlobalEnv()->GetMatchSymbol(); JSHandle undefined = globalConst->GetHandledUndefined(); @@ -594,7 +611,8 @@ JSTaggedValue BuiltinsString::Match(EcmaRuntimeCallInfo *argv) JSHandle pattern(thread, re->GetOriginalSource()); JSHandle flags(thread, re->GetOriginalFlags()); JSTaggedValue cacheResult = cacheTable->FindCachedResult(thread, pattern, flags, thisTag, - RegExpExecResultCache::MATCH_TYPE, regexp); + RegExpExecResultCache::MATCH_TYPE, regexp, + JSTaggedValue(0)); if (!cacheResult.IsUndefined()) { return cacheResult; } @@ -633,6 +651,7 @@ JSTaggedValue BuiltinsString::MatchAll(EcmaRuntimeCallInfo *argv) const GlobalEnvConstants *globalConst = thread->GlobalConstants(); // 1. Let O be ? RequireObjectCoercible(this value). JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle regexp = BuiltinsString::GetCallArg(argv, 0); JSHandle matchAllTag = thread->GetEcmaVM()->GetGlobalEnv()->GetMatchAllSymbol(); JSHandle undefined = globalConst->GetHandledUndefined(); @@ -704,6 +723,7 @@ JSTaggedValue BuiltinsString::Normalize(EcmaRuntimeCallInfo *argv) auto vm = thread->GetEcmaVM(); ObjectFactory *factory = vm->GetFactory(); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_VALUE_IF_ABRUPT_COMPLETION(thread, JSTaggedValue::Exception()); JSHandle formValue; @@ -769,6 +789,7 @@ JSTaggedValue BuiltinsString::Repeat(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); uint32_t thisLen = EcmaStringAccessor(thisHandle).GetLength(); @@ -835,6 +856,13 @@ JSTaggedValue BuiltinsString::Replace(EcmaRuntimeCallInfo *argv) } } + if (searchTag->IsJSRegExp() && PropertyDetector::IsRegExpReplaceDetectorValid(env)) { + JSTaggedValue proto = JSObject::GetPrototype(JSHandle(searchTag)); + if (proto == env->GetTaggedRegExpPrototype()) { + return BuiltinsRegExp::ReplaceInternal(thread, searchTag, thisTag, replaceTag); + } + } + // If searchValue is neither undefined nor null, then if (searchTag->IsECMAObject()) { JSHandle replaceKey = env->GetReplaceSymbol(); @@ -845,7 +873,7 @@ JSTaggedValue BuiltinsString::Replace(EcmaRuntimeCallInfo *argv) // If replacer is not undefined, then if (!replaceMethod->IsUndefined()) { // Return Call(replacer, searchValue, «O, replaceValue»). - const int32_t argsLength = 2; + const uint32_t argsLength = 2; JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, replaceMethod, searchTag, undefined, argsLength); @@ -883,12 +911,13 @@ JSTaggedValue BuiltinsString::Replace(EcmaRuntimeCallInfo *argv) // If functionalReplace is true, then if (replaceTag->IsCallable()) { // Let replValue be Call(replaceValue, undefined,«matched, pos, and string»). - const int32_t argsLength = 3; // 3: «matched, pos, and string» + const uint32_t argsLength = 3; // 3: «matched, pos, and string» EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, replaceTag, undefined, undefined, argsLength); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(searchString.GetTaggedValue(), JSTaggedValue(pos), thisString.GetTaggedValue()); JSTaggedValue replStrDeocodeValue = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); replHandle.Update(replStrDeocodeValue); } else { // Let captures be an empty List. @@ -899,6 +928,7 @@ JSTaggedValue BuiltinsString::Replace(EcmaRuntimeCallInfo *argv) replHandle.Update(GetSubstitution(thread, searchString, thisString, pos, capturesList, undefined, replacement)); } JSHandle realReplaceStr = JSTaggedValue::ToString(thread, replHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // Let tailPos be pos + the number of code units in matched. int32_t tailPos = pos + static_cast(EcmaStringAccessor(searchString).GetLength()); // Let newString be the String formed by concatenating the first pos code units of string, @@ -911,8 +941,12 @@ JSTaggedValue BuiltinsString::Replace(EcmaRuntimeCallInfo *argv) auto thisLen = EcmaStringAccessor(thisString).GetLength(); JSHandle suffixString(thread, EcmaStringAccessor::FastSubString(ecmaVm, thisString, tailPos, thisLen - tailPos)); - JSHandle tempString(thread, EcmaStringAccessor::Concat(ecmaVm, prefixString, realReplaceStr)); - return JSTaggedValue(EcmaStringAccessor::Concat(ecmaVm, tempString, suffixString)); + EcmaString *tempStr = EcmaStringAccessor::Concat(ecmaVm, prefixString, realReplaceStr); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle tempString(thread, tempStr); + EcmaString *resultStr = EcmaStringAccessor::Concat(ecmaVm, tempString, suffixString); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + return JSTaggedValue(resultStr); } JSTaggedValue BuiltinsString::ReplaceAll(EcmaRuntimeCallInfo *argv) @@ -967,6 +1001,7 @@ JSTaggedValue BuiltinsString::ReplaceAll(EcmaRuntimeCallInfo *argv) JSHandle undefined = globalConst->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, replaceMethod, searchTag, undefined, argsLength); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(thisTag.GetTaggedValue(), replaceTag.GetTaggedValue()); return JSFunction::Call(info); } @@ -1005,11 +1040,13 @@ JSTaggedValue BuiltinsString::ReplaceAll(EcmaRuntimeCallInfo *argv) // If functionalReplace is true, then if (replaceTag->IsCallable()) { // Let replValue be Call(replaceValue, undefined,«matched, pos, and string»). - const int32_t argsLength = 3; // 3: «matched, pos, and string» + const uint32_t argsLength = 3; // 3: «matched, pos, and string» EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, replaceTag, undefined, undefined, argsLength); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); info->SetCallArg(searchString.GetTaggedValue(), JSTaggedValue(pos), thisString.GetTaggedValue()); JSTaggedValue replStrDeocodeValue = JSFunction::Call(info); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); replHandle.Update(replStrDeocodeValue); } else { // Let captures be an empty List. @@ -1021,6 +1058,7 @@ JSTaggedValue BuiltinsString::ReplaceAll(EcmaRuntimeCallInfo *argv) capturesList, undefined, replacement)); } JSHandle realReplaceStr = JSTaggedValue::ToString(thread, replHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // Let tailPos be pos + the number of code units in matched. // Let newString be the String formed by concatenating the first pos code units of string, // replStr, and the trailing substring of string starting at index tailPos. @@ -1201,7 +1239,7 @@ JSTaggedValue BuiltinsString::GetSubstitution(JSThread *thread, const JSHandle captureName(capture); + JSHandle captureName = JSTaggedValue::ToString(thread, capture); stringBuilder += EcmaStringAccessor(captureName).ToU16String(); if (EcmaStringAccessor(captureName).IsUtf16()) { canBeCompress = false; @@ -1254,6 +1292,7 @@ JSTaggedValue BuiltinsString::Search(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); const GlobalEnvConstants *globalConst = thread->GlobalConstants(); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle regexp = BuiltinsString::GetCallArg(argv, 0); JSHandle searchTag = thread->GetEcmaVM()->GetGlobalEnv()->GetSearchSymbol(); JSHandle undefined = globalConst->GetHandledUndefined(); @@ -1291,6 +1330,7 @@ JSTaggedValue BuiltinsString::Slice(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); int32_t thisLen = static_cast(EcmaStringAccessor(thisHandle).GetLength()); @@ -1335,10 +1375,32 @@ JSTaggedValue BuiltinsString::Split(EcmaRuntimeCallInfo *argv) // Let O be RequireObjectCoercible(this value). JSHandle thisTag = JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv)); - JSHandle thisObj(thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle seperatorTag = BuiltinsString::GetCallArg(argv, 0); JSHandle limitTag = BuiltinsString::GetCallArg(argv, 1); + if (thisTag->IsString() && seperatorTag->IsString()) { + JSHandle thisString(thisTag); + JSHandle seperatorString(seperatorTag); + auto thisLength = EcmaStringAccessor(thisString).GetLength(); + auto seperatorLength = EcmaStringAccessor(seperatorString).GetLength(); + if (limitTag->IsUndefined() && thisLength != 0 && seperatorLength != 0) { + return CreateArrayThisStringAndSeperatorStringAreNotEmpty( + thread, ecmaVm, thisString, seperatorString, thisLength, seperatorLength); + } + uint32_t lim = UINT32_MAX - 1; + if (!limitTag->IsUndefined()) { + lim = JSTaggedValue::ToInteger(thread, limitTag).ToUint32(); + } + // ReturnIfAbrupt(lim). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + if (lim == 0) { + JSHandle resultArray(JSArray::ArrayCreate(thread, JSTaggedNumber(0))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + return resultArray.GetTaggedValue(); + } + return CreateArrayBySplitString(thread, ecmaVm, thisString, seperatorString, thisLength, seperatorLength, lim); + } + // If separator is neither undefined nor null, then if (seperatorTag->IsECMAObject()) { JSHandle splitKey = env->GetSplitSymbol(); @@ -1347,7 +1409,7 @@ JSTaggedValue BuiltinsString::Split(EcmaRuntimeCallInfo *argv) RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); if (!splitter->IsUndefined()) { // Return Call(splitter, separator, «‍O, limit»). - const int32_t argsLength = 2; + const uint32_t argsLength = 2; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); EcmaRuntimeCallInfo *info = EcmaInterpreter::NewRuntimeCallInfo(thread, splitter, seperatorTag, undefined, argsLength); @@ -1359,76 +1421,131 @@ JSTaggedValue BuiltinsString::Split(EcmaRuntimeCallInfo *argv) // Let S be ToString(O). JSHandle thisString = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - // Let A be ArrayCreate(0). - JSHandle resultArray(JSArray::ArrayCreate(thread, JSTaggedNumber(0))); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - uint32_t arrayLength = 0; + // If limit is undefined, let lim = 2^53–1; else let lim = ToLength(limit). - uint32_t lim = 0; - if (limitTag->IsUndefined()) { - lim = UINT32_MAX - 1; - } else { + uint32_t lim = UINT32_MAX - 1; + if (!limitTag->IsUndefined()) { lim = JSTaggedValue::ToInteger(thread, limitTag).ToUint32(); } // ReturnIfAbrupt(lim). RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // Let s be the number of elements in S. + auto thisLength = EcmaStringAccessor(thisString).GetLength(); + JSHandle seperatorString = JSTaggedValue::ToString(thread, seperatorTag); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // If lim = 0, return A. if (lim == 0) { + JSHandle resultArray(JSArray::ArrayCreate(thread, JSTaggedNumber(0))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return resultArray.GetTaggedValue(); } - // Let s be the number of elements in S. - int32_t thisLength = static_cast(EcmaStringAccessor(thisString).GetLength()); - JSHandle seperatorString = JSTaggedValue::ToString(thread, seperatorTag); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + auto seperatorLength = EcmaStringAccessor(seperatorString).GetLength(); + // If S is undefined or (this.length = 0 and S.length != 0), return array of size is 1 containing this string if (seperatorTag->IsUndefined()) { - // Perform CreateDataProperty(A, "0", S). + JSHandle resultArray(JSArray::ArrayCreate(thread, JSTaggedNumber(1))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // Perform CreateDataProperty(A, "0", S), CreateDataProperty's fast path JSObject::CreateDataProperty(thread, resultArray, 0, JSHandle(thisString)); ASSERT_PRINT(!thread->HasPendingException(), "CreateDataProperty(A, \"0\", S) can't throw exception"); return resultArray.GetTaggedValue(); } - // If S.length = 0, then - if (thisLength == 0) { - if (EcmaStringAccessor::IndexOf(ecmaVm, thisString, seperatorString) != -1) { + return CreateArrayBySplitString(thread, ecmaVm, thisString, seperatorString, thisLength, seperatorLength, lim); +} + +JSTaggedValue BuiltinsString::CreateArrayFromString(JSThread *thread, EcmaVM *ecmaVm, + const JSHandle &thisString, uint32_t thisLength, uint32_t lim) +{ + uint32_t actualLength = std::min(thisLength, lim); + JSHandle resultArray(JSArray::ArrayCreate(thread, JSTaggedNumber(actualLength))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + for (uint32_t i = 0; i < actualLength; ++i) { + EcmaString *elementString = EcmaStringAccessor::FastSubString(ecmaVm, thisString, i, 1); + JSHandle elementTag(thread, elementString); + // Perform CreateDataProperty(A, "0", S), CreateDataProperty's fast path + JSObject::CreateDataProperty(thread, resultArray, i, elementTag); + ASSERT_PRINT(!thread->HasPendingException(), "CreateDataProperty can't throw exception"); + } + return resultArray.GetTaggedValue(); +} + +JSTaggedValue BuiltinsString::CreateArrayBySplitString(JSThread *thread, EcmaVM *ecmaVm, + const JSHandle &thisString, const JSHandle &seperatorString, + uint32_t thisLength, uint32_t seperatorLength, uint32_t lim) +{ + if (thisLength != 0) { + if (seperatorLength != 0) { + return CreateArrayThisStringAndSeperatorStringAreNotEmpty( + thread, ecmaVm, thisString, seperatorString, thisLength, seperatorLength, lim); + } + return CreateArrayFromString(thread, ecmaVm, thisString, thisLength, lim); + } else { + if (seperatorLength != 0) { + JSHandle resultArray(JSArray::ArrayCreate(thread, JSTaggedNumber(1))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // Perform CreateDataProperty(A, "0", S), CreateDataProperty's fast path + JSObject::CreateDataProperty(thread, resultArray, 0, JSHandle(thisString)); + ASSERT_PRINT(!thread->HasPendingException(), "CreateDataProperty(A, \"0\", S) can't throw exception"); return resultArray.GetTaggedValue(); } - JSObject::CreateDataProperty(thread, resultArray, 0, JSHandle(thisString)); - ASSERT_PRINT(!thread->HasPendingException(), "CreateDataProperty(A, \"0\", S) can't throw exception"); + JSHandle resultArray(JSArray::ArrayCreate(thread, JSTaggedNumber(0))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return resultArray.GetTaggedValue(); } +} - int32_t seperatorLength = static_cast(EcmaStringAccessor(seperatorString).GetLength()); - if (seperatorLength == 0) { - for (int32_t i = 0; i < thisLength; ++i) { - EcmaString *elementString = EcmaStringAccessor::FastSubString(ecmaVm, thisString, i, 1); - JSHandle elementTag(thread, elementString); - JSObject::CreateDataProperty(thread, resultArray, arrayLength, elementTag); - ASSERT_PRINT(!thread->HasPendingException(), "CreateDataProperty can't throw exception"); - ++arrayLength; - if (arrayLength == lim) { - return resultArray.GetTaggedValue(); - } +JSTaggedValue BuiltinsString::CreateArrayThisStringAndSeperatorStringAreNotEmpty(JSThread *thread, + EcmaVM *ecmaVm, const JSHandle &thisString, const JSHandle &seperatorString, + uint32_t thisLength, uint32_t seperatorLength, uint32_t lim) +{ + if (lim == UINT32_MAX - 1) { + JSHandle cacheTable(thread->GetCurrentEcmaContext()->GetStringSplitResultCache()); + JSTaggedValue cacheResult = StringSplitResultCache::FindCachedResult(thread, cacheTable, thisString, + seperatorString); + if (cacheResult != JSTaggedValue::Undefined()) { + JSHandle resultArray(JSArray::CreateArrayFromList(thread, + JSHandle(thread, cacheResult))); + return resultArray.GetTaggedValue(); } - return resultArray.GetTaggedValue(); } + uint32_t arrayLength = 0; + std::vector posArray; int32_t index = 0; int32_t pos = EcmaStringAccessor::IndexOf(ecmaVm, thisString, seperatorString); while (pos != -1) { - EcmaString *elementString = EcmaStringAccessor::FastSubString(ecmaVm, thisString, index, pos - index); - JSHandle elementTag(thread, elementString); - JSObject::CreateDataProperty(thread, resultArray, arrayLength, elementTag); - ASSERT_PRINT(!thread->HasPendingException(), "CreateDataProperty can't throw exception"); + posArray.emplace_back(pos); ++arrayLength; if (arrayLength == lim) { - return resultArray.GetTaggedValue(); + break; } - index = pos + seperatorLength; + index = pos + static_cast(seperatorLength); pos = EcmaStringAccessor::IndexOf(ecmaVm, thisString, seperatorString, index); } - EcmaString *elementString = EcmaStringAccessor::FastSubString(ecmaVm, thisString, index, thisLength - index); - JSHandle elementTag(thread, elementString); - JSObject::CreateDataProperty(thread, resultArray, arrayLength, elementTag); - ASSERT_PRINT(!thread->HasPendingException(), "CreateDataProperty can't throw exception"); - return resultArray.GetTaggedValue(); + uint32_t posArrLength = posArray.size(); + arrayLength = lim > posArrLength ? posArrLength + 1 : posArrLength; + return JSArray::ArrayCreateWithInit(thread, arrayLength, + [thread, ecmaVm, &thisString, &seperatorString, &posArray, thisLength, seperatorLength, lim, posArrLength] + (const JSHandle &newElements, [[maybe_unused]] uint32_t length) { + int32_t index = 0; + int32_t pos = 0; + JSMutableHandle elementTag(thread, JSTaggedValue::Undefined()); + for (uint32_t i = 0; i < posArrLength; i++) { + pos = posArray[i]; + EcmaString *elementString = EcmaStringAccessor::GetSubString(ecmaVm, thisString, index, pos - index); + elementTag.Update(JSTaggedValue(elementString)); + newElements->Set(thread, i, elementTag); + index = pos + static_cast(seperatorLength); + } + if (lim > posArrLength) { + EcmaString *elementString = + EcmaStringAccessor::GetSubString(ecmaVm, thisString, index, thisLength - index); + elementTag.Update(JSTaggedValue(elementString)); + newElements->Set(thread, posArrLength, elementTag); + } + if (lim == UINT32_MAX - 1) { + JSHandle cacheTable(thread->GetCurrentEcmaContext()->GetStringSplitResultCache()); + StringSplitResultCache::SetCachedResult(thread, cacheTable, thisString, seperatorString, newElements); + } + }); } // 21.1.3.18 @@ -1441,6 +1558,7 @@ JSTaggedValue BuiltinsString::StartsWith(EcmaRuntimeCallInfo *argv) JSHandle searchTag = BuiltinsString::GetCallArg(argv, 0); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); bool isRegexp = JSObject::IsRegExp(thread, searchTag); @@ -1483,6 +1601,7 @@ JSTaggedValue BuiltinsString::Substring(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); int32_t thisLen = static_cast(EcmaStringAccessor(thisHandle).GetLength()); @@ -1504,7 +1623,7 @@ JSTaggedValue BuiltinsString::Substring(EcmaRuntimeCallInfo *argv) int32_t from = std::min(start, end); int32_t to = std::max(start, end); int32_t len = to - from; - return JSTaggedValue(EcmaStringAccessor::FastSubString(thread->GetEcmaVM(), thisHandle, from, len)); + return JSTaggedValue(EcmaStringAccessor::GetSubString(thread->GetEcmaVM(), thisHandle, from, len)); } // 21.1.3.20 @@ -1518,7 +1637,7 @@ JSTaggedValue BuiltinsString::ToLocaleLowerCase(EcmaRuntimeCallInfo *argv) // Let O be RequireObjectCoercible(this value). JSHandle obj(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); - + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // Let S be ? ToString(O). JSHandle string = JSTaggedValue::ToString(thread, obj); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -1579,7 +1698,7 @@ JSTaggedValue BuiltinsString::ToLocaleUpperCase(EcmaRuntimeCallInfo *argv) // Let O be RequireObjectCoercible(this value). JSHandle obj(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); - + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // Let S be ? ToString(O). JSHandle string = JSTaggedValue::ToString(thread, obj); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -1632,6 +1751,7 @@ JSTaggedValue BuiltinsString::ToLowerCase(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *result = EcmaStringAccessor::ToLower(thread->GetEcmaVM(), thisHandle); @@ -1654,6 +1774,7 @@ JSTaggedValue BuiltinsString::ToUpperCase(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *result = EcmaStringAccessor::ToUpper(thread->GetEcmaVM(), thisHandle); @@ -1668,6 +1789,7 @@ JSTaggedValue BuiltinsString::Trim(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *res = EcmaStringAccessor::Trim(thread, thisHandle, EcmaString::TrimMode::TRIM); @@ -1681,6 +1803,7 @@ JSTaggedValue BuiltinsString::TrimStart(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag = JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *res = EcmaStringAccessor::Trim(thread, thisHandle, EcmaString::TrimMode::TRIM_START); @@ -1694,6 +1817,7 @@ JSTaggedValue BuiltinsString::TrimEnd(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag = JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *res = EcmaStringAccessor::Trim(thread, thisHandle, EcmaString::TrimMode::TRIM_END); @@ -1707,6 +1831,7 @@ JSTaggedValue BuiltinsString::TrimLeft(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag = JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *res = EcmaStringAccessor::Trim(thread, thisHandle, EcmaString::TrimMode::TRIM_START); @@ -1720,6 +1845,7 @@ JSTaggedValue BuiltinsString::TrimRight(EcmaRuntimeCallInfo *argv) JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag = JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); EcmaString *res = EcmaStringAccessor::Trim(thread, thisHandle, EcmaString::TrimMode::TRIM_END); @@ -1742,6 +1868,7 @@ JSTaggedValue BuiltinsString::GetStringIterator(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); // 1. Let O be RequireObjectCoercible(this value). JSHandle current(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); // Let S be ToString(O). JSHandle string = JSTaggedValue::ToString(thread, current); @@ -1763,6 +1890,7 @@ JSTaggedValue BuiltinsString::SubStr(EcmaRuntimeCallInfo *argv) // 2. Let S be ToString(O). JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisString = JSTaggedValue::ToString(thread, thisTag); // 3. ReturnIfAbrupt(S). @@ -1811,6 +1939,7 @@ JSTaggedValue BuiltinsString::At(EcmaRuntimeCallInfo *argv) // 1. Let O be RequireObjectCoercible(this value). // 2. Let S be ToString(O). JSHandle thisTag(JSTaggedValue::RequireObjectCoercible(thread, GetThis(argv))); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -1847,6 +1976,7 @@ JSTaggedValue BuiltinsString::GetLength(EcmaRuntimeCallInfo *argv) JSHandle thisHandle = GetThis(argv); JSHandle thisString = JSTaggedValue::ToString(thread, thisHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); return GetTaggedInt(EcmaStringAccessor(thisString).GetLength()); } @@ -1876,6 +2006,7 @@ JSTaggedValue BuiltinsString::Pad(EcmaRuntimeCallInfo *argv, bool isStart) [[maybe_unused]] EcmaHandleScope handleScope(thread); JSHandle thisTag = JSTaggedValue::RequireObjectCoercible(thread, BuiltinsString::GetThis(argv)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle thisHandle = JSTaggedValue::ToString(thread, thisTag); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle lengthTag = GetCallArg(argv, 0); @@ -1890,6 +2021,7 @@ JSTaggedValue BuiltinsString::Pad(EcmaRuntimeCallInfo *argv, bool isStart) stringBuilder = u" "; } else { JSHandle filler = JSTaggedValue::ToString(thread, fillString); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); stringBuilder = EcmaStringAccessor(filler).ToU16String(); } if (stringBuilder.size() == 0) { @@ -1926,4 +2058,60 @@ int32_t BuiltinsString::ConvertDoubleToInt(double d) } return base::NumberHelper::DoubleToInt(d, base::INT32_BITS); } + +JSTaggedValue StringSplitResultCache::CreateCacheTable(const JSThread *thread) +{ + int length = CACHE_SIZE * ENTRY_SIZE; + auto table = static_cast( + *thread->GetEcmaVM()->GetFactory()->NewTaggedArray(length, JSTaggedValue::Undefined())); + return JSTaggedValue(table); +} + +JSTaggedValue StringSplitResultCache::FindCachedResult(const JSThread *thread, + const JSHandle &cache, const JSHandle &thisString, + const JSHandle &pattern) +{ + uint32_t hash = EcmaStringAccessor(thisString).GetHashcode(); + uint32_t entry = hash & (CACHE_SIZE - 1); + uint32_t index = entry * ENTRY_SIZE; + JSTaggedValue cacheThis = cache->Get(index + STRING_INDEX); + JSTaggedValue cachePattern = cache->Get(index + PATTERN_INDEX); + if (!cacheThis.IsString() || !cachePattern.IsString()) { + return JSTaggedValue::Undefined(); + } + JSHandle cacheStringHandle(thread, cacheThis); + JSHandle cachePatternHandle(thread, cachePattern); + + if (EcmaStringAccessor::StringsAreEqual(thread->GetEcmaVM(), thisString, cacheStringHandle) && + EcmaStringAccessor::StringsAreEqual(thread->GetEcmaVM(), pattern, cachePatternHandle)) { + JSHandle cacheArray(thread, cache->Get(index + ARRAY_INDEX)); + uint32_t arrayLength = cacheArray->GetLength(); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle copyArray = factory->NewAndCopyTaggedArray(cacheArray, + arrayLength, arrayLength); + return copyArray.GetTaggedValue(); + } + return JSTaggedValue::Undefined(); +} + +void StringSplitResultCache::SetCachedResult(const JSThread *thread, const JSHandle &cache, + const JSHandle &thisString, const JSHandle &pattern, + const JSHandle &resultArray) +{ + // clone to cache array + uint32_t arrayLength = resultArray->GetLength(); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle newElements(factory->NewTaggedArray(arrayLength)); + for (uint32_t i = 0; i < arrayLength; i++) { + newElements->Set(thread, i, resultArray->Get(i)); + } + uint32_t hash = EcmaStringAccessor(thisString).GetHashcode(); + uint32_t entry = hash & (CACHE_SIZE - 1); + uint32_t index = entry * ENTRY_SIZE; + + cache->Set(thread, index + STRING_INDEX, thisString); + cache->Set(thread, index + PATTERN_INDEX, pattern); + cache->Set(thread, index + ARRAY_INDEX, newElements); +} + } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_string.h b/ecmascript/builtins/builtins_string.h index dc92575df278104af7f6a30cedc02406bd688e58..473da08c17d7de96ae472c77c1842f0127d845d2 100644 --- a/ecmascript/builtins/builtins_string.h +++ b/ecmascript/builtins/builtins_string.h @@ -20,6 +20,101 @@ #include "ecmascript/ecma_runtime_call_info.h" #include "ecmascript/js_tagged_value.h" +// List of functions in String, excluding the '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsString::func refers to the native implementation of String[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +#define BUILTIN_STRING_FUNCTIONS(V) \ + /* String.fromCharCode ( ...codeUnits ) */ \ + V("fromCharCode", FromCharCode, 1, StringFromCharCode) \ + /* String.fromCodePoint ( ...codePoints ) */ \ + V("fromCodePoint", FromCodePoint, 1, INVALID) \ + /* String.raw ( template, ...substitutions ) */ \ + V("raw", Raw, 1, INVALID) + +// List of functions in String.prototype, excluding the constructor and '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsString::func refers to the native implementation of String.prototype[name]. +// The following functions in String.prototype are not implemented yet: +// - String.prototype.isWellFormed ( ) +// - String.prototype.toWellFormed ( ) +#define BUILTIN_STRING_PROTOTYPE_FUNCTIONS(V) \ + /* String.prototype.at ( index ) */ \ + V("at", At, 1, INVALID) \ + /* String.prototype.charAt ( pos ) */ \ + V("charAt", CharAt, 1, StringCharAt) \ + /* String.prototype.charCodeAt ( pos ) */ \ + V("charCodeAt", CharCodeAt, 1, StringCharCodeAt) \ + /* String.prototype.codePointAt ( pos ) */ \ + V("codePointAt", CodePointAt, 1, INVALID) \ + /* String.prototype.concat ( ...args ) */ \ + V("concat", Concat, 1, INVALID) \ + /* String.prototype.endsWith ( searchString [ , endPosition ] ) */ \ + V("endsWith", EndsWith, 1, INVALID) \ + /* String.prototype.includes ( searchString [ , position ] ) */ \ + V("includes", Includes, 1, INVALID) \ + /* String.prototype.indexOf ( searchString [ , position ] ) */ \ + V("indexOf", IndexOf, 1, StringIndexOf) \ + /* String.prototype.lastIndexOf ( searchString [ , position ] ) */ \ + V("lastIndexOf", LastIndexOf, 1, INVALID) \ + /* String.prototype.localeCompare ( that [ , reserved1 [ , reserved2 ] ] ) */ \ + V("localeCompare", LocaleCompare, 1, LocaleCompare) \ + /* String.prototype.match ( regexp ) */ \ + V("match", Match, 1, INVALID) \ + /* String.prototype.matchAll ( regexp ) */ \ + V("matchAll", MatchAll, 1, INVALID) \ + /* String.prototype.normalize ( [ form ] ) */ \ + V("normalize", Normalize, 0, INVALID) \ + /* String.prototype.padEnd ( maxLength [ , fillString ] ) */ \ + V("padEnd", PadEnd, 1, INVALID) \ + /* String.prototype.padStart ( maxLength [ , fillString ] ) */ \ + V("padStart", PadStart, 1, INVALID) \ + /* String.prototype.repeat ( count ) */ \ + V("repeat", Repeat, 1, INVALID) \ + /* String.prototype.replace ( searchValue, replaceValue ) */ \ + V("replace", Replace, 2, StringReplace) \ + /* String.prototype.replaceAll ( searchValue, replaceValue ) */ \ + V("replaceAll", ReplaceAll, 2, INVALID) \ + /* String.prototype.search ( regexp ) */ \ + V("search", Search, 1, INVALID) \ + /* String.prototype.slice ( start, end ) */ \ + V("slice", Slice, 2, StringSlice) \ + /* String.prototype.split ( separator, limit ) */ \ + V("split", Split, 2, INVALID) \ + /* String.prototype.startsWith ( searchString [ , position ] ) */ \ + V("startsWith", StartsWith, 1, INVALID) \ + /* In Annex B.2.2: Additional Properties of the String.prototype Object */ \ + /* String.prototype.substr ( start, length ) */ \ + V("substr", SubStr, 2, INVALID) \ + /* String.prototype.substring ( start, end ) */ \ + V("substring", Substring, 2, StringSubstring) \ + /* String.prototype.toLocaleLowerCase ( [ reserved1 [ , reserved2 ] ] ) */ \ + V("toLocaleLowerCase", ToLocaleLowerCase, 0, INVALID) \ + /* String.prototype.toLocaleUpperCase ( [ reserved1 [ , reserved2 ] ] ) */ \ + V("toLocaleUpperCase", ToLocaleUpperCase, 0, INVALID) \ + /* String.prototype.toLowerCase ( ) */ \ + V("toLowerCase", ToLowerCase, 0, INVALID) \ + /* String.prototype.toString ( ) */ \ + V("toString", ToString, 0, INVALID) \ + /* String.prototype.toUpperCase ( ) */ \ + V("toUpperCase", ToUpperCase, 0, INVALID) \ + /* String.prototype.trim ( ) */ \ + V("trim", Trim, 0, StringTrim) \ + /* String.prototype.trimEnd ( ) */ \ + V("trimEnd", TrimEnd, 0, INVALID) \ + /* In Annex B.2.2: Additional Properties of the String.prototype Object */ \ + /* Equivalent to trimStart. For compatibility only. */ \ + /* String.prototype.trimLeft ( ) */ \ + V("trimLeft", TrimLeft, 0, INVALID) \ + /* In Annex B.2.2: Additional Properties of the String.prototype Object */ \ + /* Equivalent to trimEnd. For compatibility only. */ \ + /* String.prototype.trimEnd ( ) */ \ + V("trimRight", TrimRight, 0, INVALID) \ + /* String.prototype.trimStart ( ) */ \ + V("trimStart", TrimStart, 0, INVALID) \ + /* String.prototype.valueOf ( ) */ \ + V("valueOf", ValueOf, 0, INVALID) + namespace panda::ecmascript::builtins { constexpr int32_t ENCODE_MAX_UTF16 = 0X10FFFF; constexpr uint16_t ENCODE_LEAD_LOW = 0xD800; @@ -64,6 +159,9 @@ public: static JSTaggedValue LastIndexOf(EcmaRuntimeCallInfo *argv); // 21.1.3.10 static JSTaggedValue LocaleCompare(EcmaRuntimeCallInfo *argv); + static JSTaggedValue LocaleCompareGC(JSThread *thread, JSHandle locales, + JSHandle thisHandle, JSHandle thatHandle, + JSHandle options, bool cacheable); // 21.1.3.11 static JSTaggedValue Match(EcmaRuntimeCallInfo *argv); @@ -127,10 +225,72 @@ public: static JSTaggedValue GetLength(EcmaRuntimeCallInfo *argv); + // Excluding the '@@' internal properties + static Span GetStringFunctions() + { + return Span(STRING_FUNCTIONS); + } + + // Excluding the constructor and '@@' internal properties. + static Span GetStringPrototypeFunctions() + { + return Span(STRING_PROTOTYPE_FUNCTIONS); + } + + static size_t GetNumPrototypeInlinedProperties() + { + // 3 : 3 more inline properties in String.prototype: + // (1) String.prototype.constructor + // (2) String.prototype [ @@iterator ] + // (3) get length + return GetStringPrototypeFunctions().Size() + 3; + } + private: +#define BUILTIN_STRING_FUNCTION_ENTRY(name, method, length, builtinId) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsString::method, length, kungfu::BuiltinsStubCSigns::builtinId), + + static constexpr std::array STRING_FUNCTIONS = { + BUILTIN_STRING_FUNCTIONS(BUILTIN_STRING_FUNCTION_ENTRY) + }; + static constexpr std::array STRING_PROTOTYPE_FUNCTIONS = { + BUILTIN_STRING_PROTOTYPE_FUNCTIONS(BUILTIN_STRING_FUNCTION_ENTRY) + }; +#undef BUILTIN_STRING_FUNCTION_ENTRY + static JSTaggedValue Pad(EcmaRuntimeCallInfo *argv, bool isStart); static int32_t ConvertDoubleToInt(double d); + static JSTaggedValue CreateArrayFromString(JSThread *thread, EcmaVM *ecmaVm, + const JSHandle &thisString, uint32_t thisLength, uint32_t lim); + static JSTaggedValue CreateArrayBySplitString(JSThread *thread, EcmaVM *ecmaVm, + const JSHandle &thisString, const JSHandle &seperatorString, + uint32_t thisLength, uint32_t seperatorLength, uint32_t lim); + static JSTaggedValue CreateArrayThisStringAndSeperatorStringAreNotEmpty( + JSThread *thread, EcmaVM *ecmaVm, + const JSHandle &thisString, const JSHandle &seperatorString, + uint32_t thisLength, uint32_t seperatorLength, uint32_t lim = UINT32_MAX - 1); // 21.1.3.17.1 }; + +class StringSplitResultCache : public TaggedArray { +public: + static StringSplitResultCache *Cast(TaggedObject *object) + { + return reinterpret_cast(object); + } + static JSTaggedValue CreateCacheTable(const JSThread *thread); + static JSTaggedValue FindCachedResult(const JSThread *thread, const JSHandle &cache, + const JSHandle &string, const JSHandle &pattern); + static void SetCachedResult(const JSThread *thread, const JSHandle &cache, + const JSHandle &string, const JSHandle &pattern, + const JSHandle &result); + +private: + static constexpr int CACHE_SIZE = 256; + static constexpr int STRING_INDEX = 0; + static constexpr int PATTERN_INDEX = 1; + static constexpr int ARRAY_INDEX = 2; + static constexpr int ENTRY_SIZE = 3; +}; } // namespace panda::ecmascript::builtins #endif // ECMASCRIPT_BUILTINS_BUILTINS_STRING_H diff --git a/ecmascript/builtins/builtins_string_iterator.cpp b/ecmascript/builtins/builtins_string_iterator.cpp index 4b59bc76d1afd2acf73939c81f5362b9d7877b34..a0cb96830af951cab7de4831e8896eb52e3d3725 100644 --- a/ecmascript/builtins/builtins_string_iterator.cpp +++ b/ecmascript/builtins/builtins_string_iterator.cpp @@ -32,6 +32,11 @@ JSTaggedValue BuiltinsStringIterator::Next(EcmaRuntimeCallInfo *argv) [[maybe_unused]] EcmaHandleScope handleScope(thread); // 1. Let O be the this value. JSHandle thisValue = GetThis(argv); + return NextInternal(thread, thisValue); +} + +JSTaggedValue BuiltinsStringIterator::NextInternal(JSThread *thread, JSHandle thisValue) +{ // 2. If Type(O) is not Object, throw a TypeError exception. // 3. If O does not have all of the internal slots of an String Iterator Instance (21.1.5.3), // throw a TypeError exception. diff --git a/ecmascript/builtins/builtins_string_iterator.h b/ecmascript/builtins/builtins_string_iterator.h index 32d2d59d7d7e42084db009a2267edb4342070b9d..366fbf61f14d8c787fe4299c03edeb91876c42cd 100644 --- a/ecmascript/builtins/builtins_string_iterator.h +++ b/ecmascript/builtins/builtins_string_iterator.h @@ -23,6 +23,7 @@ class BuiltinsStringIterator : public base::BuiltinsBase { public: // 21.1.5.2.1 static JSTaggedValue Next(EcmaRuntimeCallInfo *argv); + static JSTaggedValue NextInternal(JSThread *thread, JSHandle thisValue); }; } // namespace panda::ecmascript::builtins #endif // ECMASCRIPT_BUILTINS_BUILTINS_STRING_ITERATOR_H diff --git a/ecmascript/builtins/builtins_symbol.cpp b/ecmascript/builtins/builtins_symbol.cpp index d629e11eefc7cf71fcc297f1e1ef2e99e0fbf141..c58a7926fd0c52ee82455f7eba14d96df5864a22 100644 --- a/ecmascript/builtins/builtins_symbol.cpp +++ b/ecmascript/builtins/builtins_symbol.cpp @@ -110,6 +110,7 @@ JSTaggedValue BuiltinsSymbol::SymbolDescriptiveString(JSThread *thread, JSTagged JSHandle rightHandle(factory->NewFromASCII(")")); JSHandle stringLeft = factory->ConcatFromString(leftHandle, JSTaggedValue::ToString(thread, descHandle)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle str = factory->ConcatFromString(stringLeft, rightHandle); return str.GetTaggedValue(); } @@ -194,35 +195,29 @@ JSTaggedValue BuiltinsSymbol::KeyFor(EcmaRuntimeCallInfo *argv) // 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint ) JSTaggedValue BuiltinsSymbol::ToPrimitive(EcmaRuntimeCallInfo *argv) { - // The allowed values for hint are "default", "number", and "string". ASSERT(argv); BUILTINS_API_TRACE(argv->GetThread(), Symbol, ToPrimitive); JSThread *thread = argv->GetThread(); [[maybe_unused]] EcmaHandleScope handleScope(thread); - // 1.Let s be the this value. + // Let s be the this value. JSHandle sym = GetThis(argv); - // 2.If Type(s) is Symbol, return s. + // 1.If value is a Symbol, return value. if (sym->IsSymbol()) { return sym.GetTaggedValue(); } - // 3.If Type(s) is not Object, throw a TypeError exception. - if (!sym->IsHeapObject()) { - // return TypeError - THROW_TYPE_ERROR_AND_RETURN(thread, "ToPrimitive: s is not Object", JSTaggedValue::Exception()); - } - ASSERT(sym->IsHeapObject()); - // 4.If s does not have a [[SymbolData]] internal slot, throw a TypeError exception. - // 5.Return the value of s's [[SymbolData]] internal slot. - if (!sym->IsJSPrimitiveRef()) { - // If s does not have a [[SymbolData]] internal slot, throw a TypeError exception. - THROW_TYPE_ERROR_AND_RETURN(thread, "ToPrimitive: no [[SymbolData]]", JSTaggedValue::Exception()); + + // 2.If value is an Object and value has a [[SymbolData]] internal slot, then + if (sym->IsJSPrimitiveRef()) { + // Let sym be the value of s's [[SymbolData]] internal slot. + JSTaggedValue primitive = JSPrimitiveRef::Cast(sym->GetTaggedObject())->GetValue(); + if (primitive.IsSymbol()) { + return primitive; + } } - // Let sym be the value of s's [[SymbolData]] internal slot. - JSTaggedValue primitive = JSPrimitiveRef::Cast(sym->GetTaggedObject())->GetValue(); - ASSERT(primitive.IsSymbol()); - return primitive; -} + // 3.If s does not have a [[SymbolData]] internal slot, throw a TypeError exception. + THROW_TYPE_ERROR_AND_RETURN(thread, "ToPrimitive: s is not Object", JSTaggedValue::Exception()); +} JSTaggedValue BuiltinsSymbol::DescriptionGetter(EcmaRuntimeCallInfo *argv) { ASSERT(argv); diff --git a/ecmascript/builtins/builtins_symbol.h b/ecmascript/builtins/builtins_symbol.h index cc5be06ed7822881abe1ecf97b8ee121e71cea18..9d91d6af79d76ea5493ec3a90e9dad38b6f0280b 100644 --- a/ecmascript/builtins/builtins_symbol.h +++ b/ecmascript/builtins/builtins_symbol.h @@ -20,6 +20,44 @@ #include "ecmascript/ecma_runtime_call_info.h" #include "ecmascript/js_tagged_value.h" +#define BUILTIN_WELL_KNOWN_SYMBOLS(V) \ + V(hasInstance, HasInstance) \ + V(isConcatSpreadable, IsConcatSpreadable) \ + V(toStringTag, ToStringTag) + +#define BUILTIN_PUBLIC_SYMBOLS(V) \ + V(asyncIterator, AsyncIterator) \ + V(attach, Attach) \ + V(detach, Detach) \ + V(iterator, Iterator) \ + V(match, Match) \ + V(matchAll, MatchAll) \ + V(replace, Replace) \ + V(search, Search) \ + V(species, Species) \ + V(split, Split) \ + V(toPrimitive, ToPrimitive) \ + V(unscopables, Unscopables) + +#define BUILTIN_ALL_SYMBOLS(V) \ + BUILTIN_WELL_KNOWN_SYMBOLS(V) \ + BUILTIN_PUBLIC_SYMBOLS(V) + +// List of functions in Symbol, excluding the '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsSymbol::func refers to the native implementation of Symbol[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +#define BUILTIN_SYMBOL_FUNCTIONS(V) \ + V("for", For, 1, INVALID) \ + V("keyFor", KeyFor, 1, INVALID) + +// List of get accessors in Symbol.prototype, excluding the '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsSymbol::func refers to the native implementation of Symbol.prototype[name]. +#define BUILTIN_SYMBOL_PROTOTYPE_FUNCTIONS(V) \ + V("toString", ToString, 0, INVALID) \ + V("valueOf", ValueOf, 0, INVALID) + namespace panda::ecmascript::builtins { class BuiltinsSymbol : public base::BuiltinsBase { public: @@ -45,6 +83,31 @@ public: static JSTaggedValue ToPrimitive(EcmaRuntimeCallInfo *argv); static JSTaggedValue SymbolDescriptiveString(JSThread *thread, JSTaggedValue sym); + + // Excluding the '@@' internal properties + static Span GetSymbolFunctions() + { + return Span(SYMBOL_FUNCTIONS); + } + + // Excluding the constructor and '@@' internal properties. + static Span GetSymbolPrototypeFunctions() + { + return Span(SYMBOL_PROTOTYPE_FUNCTIONS); + } + +private: +#define BUILTIN_SYMBOL_FUNCTION_ENTRY(name, func, length, id) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsSymbol::func, length, kungfu::BuiltinsStubCSigns::id), + + static constexpr std::array SYMBOL_FUNCTIONS = { + BUILTIN_SYMBOL_FUNCTIONS(BUILTIN_SYMBOL_FUNCTION_ENTRY) + }; + static constexpr std::array SYMBOL_PROTOTYPE_FUNCTIONS = { + BUILTIN_SYMBOL_PROTOTYPE_FUNCTIONS(BUILTIN_SYMBOL_FUNCTION_ENTRY) + }; +#undef BUILTIN_TYPED_ARRAY_FUNCTION_ENTRY +#undef BUILTIN_TYPED_ARRAY_ACCESSOR_ENTRY }; } // namespace panda::ecmascript::builtins #endif // ECMASCRIPT_BUILTINS_BUILTINS_SYMBOL_H diff --git a/ecmascript/builtins/builtins_typedarray.cpp b/ecmascript/builtins/builtins_typedarray.cpp index 15a73f532ce06559a2caee489c494342d41a359d..f890205d5a4e6060d0b1b6d02f460b8385496cfb 100644 --- a/ecmascript/builtins/builtins_typedarray.cpp +++ b/ecmascript/builtins/builtins_typedarray.cpp @@ -20,7 +20,7 @@ #include "ecmascript/builtins/builtins_array.h" #include "ecmascript/builtins/builtins_arraybuffer.h" #include "ecmascript/ecma_runtime_call_info.h" -#include "ecmascript/ecma_string.h" +#include "ecmascript/ecma_string-inl.h" #include "ecmascript/global_env.h" #include "ecmascript/interpreter/interpreter.h" #include "ecmascript/js_array.h" @@ -184,7 +184,9 @@ JSTaggedValue BuiltinsTypedArray::From(EcmaRuntimeCallInfo *argv) JSHandle usingIterator = JSObject::GetMethod(thread, source, iteratorSymbol); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle arrIter = JSObject::GetMethod(thread, env->GetArrayProtoValuesFunction(), iteratorSymbol); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle typedArrIter = JSObject::GetMethod(thread, env->GetTypedArrayPrototype(), iteratorSymbol); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); bool isArrIter = JSTaggedValue::SameValue(usingIterator, arrIter); bool isTypedArrIter = JSTaggedValue::SameValue(usingIterator, typedArrIter); // 6. If usingIterator is not undefined, then @@ -220,7 +222,7 @@ JSTaggedValue BuiltinsTypedArray::From(EcmaRuntimeCallInfo *argv) // vi. Set k to k + 1. JSMutableHandle tKey(thread, JSTaggedValue::Undefined()); JSMutableHandle mapValue(thread, JSTaggedValue::Undefined()); - const int32_t argsLength = 2; + const uint32_t argsLength = 2; uint32_t k = 0; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); while (k < len) { @@ -275,7 +277,7 @@ JSTaggedValue BuiltinsTypedArray::From(EcmaRuntimeCallInfo *argv) // e. Perform ? Set(targetObj, Pk, mappedValue, true). // f. Set k to k + 1. JSMutableHandle tKey(thread, JSTaggedValue::Undefined()); - const int32_t argsLength = 2; + const uint32_t argsLength = 2; int64_t k = 0; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); JSMutableHandle kValue(thread, JSTaggedValue::Undefined()); @@ -337,6 +339,7 @@ JSTaggedValue BuiltinsTypedArray::Of(EcmaRuntimeCallInfo *argv) while (k < len) { tKey.Update(JSTaggedValue(k)); JSHandle kKey(JSTaggedValue::ToString(thread, tKey)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); JSHandle kValue = GetCallArg(argv, k); JSTaggedValue::SetProperty(thread, JSHandle::Cast(newObj), kKey, kValue, true); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -518,7 +521,7 @@ JSTaggedValue BuiltinsTypedArray::Every(EcmaRuntimeCallInfo *argv) // v. If testResult is false, return false. // e. Increase k by 1. JSMutableHandle key(thread, JSTaggedValue::Undefined()); - const int32_t argsLength = 3; + const uint32_t argsLength = 3; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); uint32_t k = 0; while (k < len) { @@ -611,9 +614,7 @@ JSTaggedValue BuiltinsTypedArray::Filter(EcmaRuntimeCallInfo *argv) info->SetCallArg(kValue.GetTaggedValue(), tKey.GetTaggedValue(), thisHandle.GetTaggedValue()); JSTaggedValue callResult = JSFunction::Call(info); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - bool testResult = callResult.ToBoolean(); - RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); - if (testResult) { + if (callResult.ToBoolean()) { kept->Set(thread, captured, kValue); captured++; } @@ -707,7 +708,7 @@ JSTaggedValue BuiltinsTypedArray::ForEach(EcmaRuntimeCallInfo *argv) // iv. ReturnIfAbrupt(funcResult). // e. Increase k by 1. JSMutableHandle key(thread, JSTaggedValue::Undefined()); - const int32_t argsLength = 3; + const uint32_t argsLength = 3; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); uint32_t k = 0; while (k < len) { @@ -935,7 +936,7 @@ JSTaggedValue BuiltinsTypedArray::Map(EcmaRuntimeCallInfo *argv) JSMutableHandle key(thread, JSTaggedValue::Undefined()); JSMutableHandle mapValue(thread, JSTaggedValue::Undefined()); JSMutableHandle kValue(thread, JSTaggedValue::Undefined()); - const int32_t argsLength = 3; + const uint32_t argsLength = 3; JSHandle undefined = thread->GlobalConstants()->GetHandledUndefined(); for (uint32_t k = 0; k < len; k++) { key.Update(JSTaggedValue(k)); @@ -1017,13 +1018,13 @@ JSTaggedValue BuiltinsTypedArray::Set(EcmaRuntimeCallInfo *argv) // 5. Assert: target has a [[ViewedArrayBuffer]] internal slot. // 6. Let targetOffset be ToInteger (offset). const JSHandle srcOffset = GetCallArg(argv, 1); - uint32_t targetOffset = 0; + uint64_t targetOffset = 0; if (srcOffset->IsInt()) { if (srcOffset->GetInt() < 0) { THROW_RANGE_ERROR_AND_RETURN(thread, "The targetOffset of This value is less than 0.", JSTaggedValue::Exception()); } - targetOffset = static_cast(srcOffset->GetInt()); + targetOffset = static_cast(srcOffset->GetInt()); } else { JSTaggedNumber tTargetOffset = JSTaggedValue::ToInteger(thread, srcOffset); // 7. ReturnIfAbrupt(targetOffset). @@ -1037,7 +1038,7 @@ JSTaggedValue BuiltinsTypedArray::Set(EcmaRuntimeCallInfo *argv) THROW_RANGE_ERROR_AND_RETURN(thread, "The targetOffset is infinty, which is greater than targetLength.", JSTaggedValue::Exception()); } else { - targetOffset = static_cast(rawTargetOffset); + targetOffset = static_cast(rawTargetOffset); } } // 9. Let targetBuffer be the value of target’s [[ViewedArrayBuffer]] internal slot. @@ -1092,7 +1093,7 @@ JSTaggedValue BuiltinsTypedArray::Set(EcmaRuntimeCallInfo *argv) JSTaggedValue::Exception()); } // 21. Let targetByteIndex be targetOffset × targetElementSize + targetByteOffset. - ASSERT((static_cast(targetOffset) * static_cast(targetElementSize) + + ASSERT((targetOffset * static_cast(targetElementSize) + static_cast(targetByteOffset)) <= static_cast(UINT32_MAX)); uint32_t targetByteIndex = static_cast(targetOffset * targetElementSize + targetByteOffset); // 22. Let k be 0. @@ -1116,6 +1117,7 @@ JSTaggedValue BuiltinsTypedArray::Set(EcmaRuntimeCallInfo *argv) while (targetByteIndex < limit) { tKey.Update(JSTaggedValue(k)); JSHandle kKey(JSTaggedValue::ToString(thread, tKey)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); kValue.Update(ObjectFastOperator::FastGetPropertyByValue( thread, JSHandle::Cast(src).GetTaggedValue(), kKey.GetTaggedValue())); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); @@ -1190,9 +1192,9 @@ JSTaggedValue BuiltinsTypedArray::Set(EcmaRuntimeCallInfo *argv) srcByteIndex = srcByteOffset; } // 26. Let targetByteIndex be targetOffset × targetElementSize + targetByteOffset. - ASSERT((static_cast(targetOffset) * static_cast(targetElementSize) + + ASSERT((targetOffset * static_cast(targetElementSize) + static_cast(targetByteOffset)) <= static_cast(UINT32_MAX)); - uint32_t targetByteIndex = targetOffset * targetElementSize + targetByteOffset; + uint32_t targetByteIndex = static_cast(targetOffset) * targetElementSize + targetByteOffset; // 27. Let limit be targetByteIndex + targetElementSize × srcLength. ASSERT((static_cast(targetElementSize) * static_cast(srcLength) + static_cast(targetByteIndex)) <= static_cast(UINT32_MAX)); @@ -1406,7 +1408,7 @@ JSTaggedValue BuiltinsTypedArray::Sort(EcmaRuntimeCallInfo *argv) key.GetTaggedValue())); RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); while (beginIndex < endIndex) { - uint32_t middleIndex = (beginIndex + endIndex) / 2; // 2 : half + uint32_t middleIndex = beginIndex + (endIndex - beginIndex) / 2; // 2 : half key1.Update(JSTaggedValue(middleIndex)); middleValue.Update(ObjectFastOperator::FastGetPropertyByValue(thread, thisObjHandle.GetTaggedValue(), key1.GetTaggedValue())); @@ -1511,7 +1513,7 @@ JSTaggedValue BuiltinsTypedArray::Subarray(EcmaRuntimeCallInfo *argv) // 21. Let argumentsList be «buffer, beginByteOffset, newLength». // 5. Let buffer be the value of O’s [[ViewedArrayBuffer]] internal slot. // 22. Return Construct(constructor, argumentsList). - const int32_t argsLength = 3; + const uint32_t argsLength = 3; JSTaggedType args[argsLength] = { buffer.GetRawData(), JSTaggedValue(beginByteOffset).GetRawData(), @@ -1620,7 +1622,7 @@ JSTaggedValue BuiltinsTypedArray::At(EcmaRuntimeCallInfo *argv) int64_t k = 0; // 5. If relativeIndex ≥ 0, then Let k be relativeIndex. // 6. Else, Let k be len + relativeIndex. - k = relativeIndex >= 0 ? relativeIndex : len + relativeIndex; + k = relativeIndex >= 0 ? relativeIndex : static_cast(len) + relativeIndex; // 7. If k < 0 or k ≥ len, return undefined. if (k < 0 || k >= len) { return JSTaggedValue::Undefined(); @@ -1631,6 +1633,168 @@ JSTaggedValue BuiltinsTypedArray::At(EcmaRuntimeCallInfo *argv) return kValue.GetTaggedValue(); } +// 23.2.3.33 +JSTaggedValue BuiltinsTypedArray::ToSorted(EcmaRuntimeCallInfo* argv) +{ + ASSERT(argv); + BUILTINS_API_TRACE(argv->GetThread(), TypedArray, ToSorted); + JSThread* thread = argv->GetThread(); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. If comparefn is not undefined and IsCallable(comparefn) is false, throw a TypeError exception. + JSHandle comparefnHandle = GetCallArg(argv, 0); + if (!comparefnHandle->IsUndefined() && !comparefnHandle->IsCallable()) { + THROW_TYPE_ERROR_AND_RETURN(thread, "the comparefn is not callable.", JSTaggedValue::Exception()); + } + // 2. Let O be the this value. + JSHandle thisHandle = GetThis(argv); + // 3. Perform ? ValidateTypedArray(O). + TypedArrayHelper::ValidateTypedArray(thread, thisHandle); + // ReturnIfAbrupt(valid). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + JSHandle thisObj(thisHandle); + // 4. Let len be O.[[ArrayLength]]. + uint32_t len = thisObj->GetArrayLength(); + + // 5. Let A be ? TypedArrayCreateSameType(O, « 𝔽(len) »). + JSTaggedType args[1] = { JSTaggedValue(len).GetRawData() }; + JSHandle newArrObj = TypedArrayHelper::TypedArrayCreateSameType(thread, thisObj, 1, args); // 1: one arg. + // ReturnIfAbrupt(A). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + JSHandle buffer = + JSHandle(thread, TypedArrayHelper::ValidateTypedArray(thread, thisHandle)); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + JSMutableHandle presentValue(thread, JSTaggedValue::Undefined()); + JSMutableHandle middleValue(thread, JSTaggedValue::Undefined()); + JSMutableHandle previousValue(thread, JSTaggedValue::Undefined()); + JSMutableHandle key(thread, JSTaggedValue::Undefined()); + JSMutableHandle key1(thread, JSTaggedValue::Undefined()); + JSMutableHandle key2(thread, JSTaggedValue::Undefined()); + if (len > 0) { + previousValue.Update( + ObjectFastOperator::FastGetPropertyByValue(thread, thisHandle.GetTaggedValue(), JSTaggedValue(0))); + ObjectFastOperator::FastSetPropertyByIndex( + thread, newArrObj.GetTaggedValue(), 0, previousValue.GetTaggedValue()); + } + for (uint32_t i = 1; i < len; i++) { + uint32_t beginIndex = 0; + uint32_t endIndex = i; + key.Update(JSTaggedValue(i)); + presentValue.Update( + ObjectFastOperator::FastGetPropertyByValue(thread, thisHandle.GetTaggedValue(), key.GetTaggedValue())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + while (beginIndex < endIndex) { + uint32_t middleIndex = beginIndex + (endIndex - beginIndex) / 2; // 2 : half + key1.Update(JSTaggedValue(middleIndex)); + middleValue.Update( + ObjectFastOperator::FastGetPropertyByValue(thread, newArrObj.GetTaggedValue(), key1.GetTaggedValue())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + int32_t compareResult = + TypedArrayHelper::SortCompare(thread, comparefnHandle, buffer, middleValue, presentValue); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + compareResult > 0 ? (endIndex = middleIndex) : (beginIndex = middleIndex + 1); + } + + if (endIndex < i) { + for (uint32_t j = i; j > endIndex; j--) { + key2.Update(JSTaggedValue(j - 1)); + previousValue.Update(ObjectFastOperator::FastGetPropertyByValue( + thread, newArrObj.GetTaggedValue(), key2.GetTaggedValue())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ObjectFastOperator::FastSetPropertyByIndex( + thread, newArrObj.GetTaggedValue(), j, previousValue.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } + } + ObjectFastOperator::FastSetPropertyByIndex( + thread, newArrObj.GetTaggedValue(), endIndex, presentValue.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } + return newArrObj.GetTaggedValue(); +} + +// 23.2.3.36 +JSTaggedValue BuiltinsTypedArray::With(EcmaRuntimeCallInfo* argv) +{ + ASSERT(argv); + BUILTINS_API_TRACE(argv->GetThread(), TypedArray, With); + JSThread* thread = argv->GetThread(); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. Let O be the this value. + JSHandle thisHandle = GetThis(argv); + // 2. Perform ? ValidateTypedArray(O). + TypedArrayHelper::ValidateTypedArray(thread, thisHandle); + // ReturnIfAbrupt(valid). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + JSHandle thisObj(thisHandle); + // 3. Let len be O.[[ArrayLength]]. + uint32_t len = thisObj->GetArrayLength(); + + // 4. Let relativeIndex be ? ToIntegerOrInfinity(index). + JSTaggedNumber indexVal = JSTaggedValue::ToInteger(thread, GetCallArg(argv, 0)); + // ReturnIfAbrupt(indexVal). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + int64_t relativeIndex = indexVal.GetNumber(); + // 5. If relativeIndex ≥ 0, let actualIndex be relativeIndex. + // 6. Else, let actualIndex be len + relativeIndex. + int64_t actualIndex = relativeIndex >= 0 ? relativeIndex : static_cast(len) + relativeIndex; + + // 7. If O.[[ContentType]] is BigInt, let numericValue be ? ToBigInt(value). + // 8. Else, let numericValue be ? ToNumber(value). + JSHandle value = GetCallArg(argv, 1); + ContentType contentType = thisObj->GetContentType(); + JSHandle numericValue; + if (contentType == ContentType::BigInt) { + numericValue = JSHandle(thread, JSTaggedValue::ToBigInt(thread, value)); + } else { + numericValue = JSHandle(thread, JSTaggedValue::ToNumber(thread, value)); + } + // ReturnIfAbrupt(numericValue). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + // 9. If IsValidIntegerIndex(O, 𝔽(actualIndex)) is false, throw a RangeError exception. + if (!JSTypedArray::IsValidIntegerIndex(thisHandle, JSTaggedValue(actualIndex))) { + THROW_RANGE_ERROR_AND_RETURN(thread, "Invalid typed array index", JSTaggedValue::Exception()); + } + + // 10. Let A be ? TypedArrayCreateSameType(O, « 𝔽(len) »). + JSTaggedType args[1] = { JSTaggedValue(len).GetRawData() }; + JSHandle newArrObj = TypedArrayHelper::TypedArrayCreateSameType(thread, thisObj, 1, args); // 1: one arg. + // ReturnIfAbrupt(A). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + + // 11. Let k be 0. + // 12. Repeat, while k < len, + // a. Let Pk be ! ToString(𝔽(k)). + // b. If k is actualIndex, let fromValue be numericValue. + // c. Else, let fromValue be ! Get(O, Pk). + // d. Perform ! Set(A, Pk, fromValue, true). + // e. Set k to k + 1. + JSMutableHandle tKey(thread, JSTaggedValue::Undefined()); + JSMutableHandle fromValue(thread, JSTaggedValue::Undefined()); + uint32_t k = 0; + while (k < len) { + tKey.Update(JSTaggedValue(k)); + if (k == actualIndex) { + fromValue.Update(numericValue); + } else { + fromValue.Update( + ObjectFastOperator::FastGetPropertyByValue(thread, thisHandle.GetTaggedValue(), tKey.GetTaggedValue())); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + } + ObjectFastOperator::FastSetPropertyByValue(thread, newArrObj.GetTaggedValue(), + tKey.GetTaggedValue(), fromValue.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + k++; + } + return newArrObj.GetTaggedValue(); +} + // es12 23.2.3.13 JSTaggedValue BuiltinsTypedArray::Includes(EcmaRuntimeCallInfo *argv) { @@ -1641,4 +1805,74 @@ JSTaggedValue BuiltinsTypedArray::Includes(EcmaRuntimeCallInfo *argv) } return BuiltinsArray::Includes(argv); } + +// 23.2.3.32 +JSTaggedValue BuiltinsTypedArray::ToReversed(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + JSThread *thread = argv->GetThread(); + BUILTINS_API_TRACE(thread, TypedArray, ToReversed); + [[maybe_unused]] EcmaHandleScope handleScope(thread); + + // 1. Let O be ToObject(this value). + JSHandle thisHandle = GetThis(argv); + JSHandle thisObj(thisHandle); + // 2. Perform ? ValidateTypedArray(O). + TypedArrayHelper::ValidateTypedArray(thread, thisHandle); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + JSHandle thisObjHandle = JSTaggedValue::ToObject(thread, thisHandle); + // ReturnIfAbrupt(O). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // 3. Let len be O.[[ArrayLength]]. + uint32_t len = JSHandle::Cast(thisObjHandle)->GetArrayLength(); + // ReturnIfAbrupt(len). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // 4. Let A be ? TypedArrayCreateSameType(O, « 𝔽(length) »). + JSTaggedType args[1] = {JSTaggedValue(len).GetRawData()}; + JSHandle newArrayHandle = TypedArrayHelper::TypedArrayCreateSameType(thread, thisObj, 1, args); + // ReturnIfAbrupt(newObj). + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + // 5. Let k be 0. + uint32_t k = 0; + + // 6. Repeat, while k < length, + // a. Let from be ! ToString(𝔽(length - k - 1)). + // b. Let Pk be ! ToString(𝔽(k)). + // c. Let fromValue be ! Get(O, from). + // d. Perform ! Set(A, Pk, fromValue, true). + // e. Set k to k + 1. + while (k < len) { + uint32_t from = len - k - 1; + JSHandle fromValue = JSTypedArray::GetProperty(thread, thisHandle, from).GetValue(); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ObjectFastOperator::FastSetPropertyByIndex(thread, newArrayHandle.GetTaggedValue(), k, + fromValue.GetTaggedValue()); + RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread); + ++k; + } + // 7. Return A. + return newArrayHandle.GetTaggedValue(); +} + +// 23.2.3.13 +JSTaggedValue BuiltinsTypedArray::FindLast(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + BUILTINS_API_TRACE(argv->GetThread(), TypedArray, FindLast); + if (!GetThis(argv)->IsTypedArray()) { + THROW_TYPE_ERROR_AND_RETURN(argv->GetThread(), "This is not a TypedArray.", JSTaggedValue::Exception()); + } + return BuiltinsArray::FindLast(argv); +} + +// 23.2.3.14 +JSTaggedValue BuiltinsTypedArray::FindLastIndex(EcmaRuntimeCallInfo *argv) +{ + ASSERT(argv); + BUILTINS_API_TRACE(argv->GetThread(), TypedArray, FindLastIndex); + if (!GetThis(argv)->IsTypedArray()) { + THROW_TYPE_ERROR_AND_RETURN(argv->GetThread(), "This is not a TypedArray.", JSTaggedValue::Exception()); + } + return BuiltinsArray::FindLastIndex(argv); +} } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_typedarray.h b/ecmascript/builtins/builtins_typedarray.h index 4e249b83e3f18f14d77c1b9d0563ad586040a428..a46c0a57d7989132667ca5752e558d13c3120fd1 100644 --- a/ecmascript/builtins/builtins_typedarray.h +++ b/ecmascript/builtins/builtins_typedarray.h @@ -18,6 +18,107 @@ #include "ecmascript/base/builtins_base.h" +// All types of %TypedArray%. +// V(Type, TYPE, bytesPerElement) where JSType::JS_##TYPE is the type index. +#define BUILTIN_TYPED_ARRAY_TYPES(V) \ + V(Int8Array, INT8_ARRAY, 1) \ + V(Uint8Array, UINT8_ARRAY, 1) \ + V(Uint8ClampedArray, UINT8_CLAMPED_ARRAY, 1) \ + V(Int16Array, INT16_ARRAY, 2) \ + V(Uint16Array, UINT16_ARRAY, 2) \ + V(Int32Array, INT32_ARRAY, 4) \ + V(Uint32Array, UINT32_ARRAY, 4) \ + V(Float32Array, FLOAT32_ARRAY, 4) \ + V(Float64Array, FLOAT64_ARRAY, 8) \ + V(BigInt64Array, BIGINT64_ARRAY, 8) \ + V(BigUint64Array, BIGUINT64_ARRAY, 8) + +// List of functions in %TypedArray%, excluding the '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsTypedArray::func refers to the native implementation of %TypedArray%[name]. +// kungfu::BuiltinsStubCSigns::stubIndex refers to the builtin stub index, or INVALID if no stub available. +#define BUILTIN_TYPED_ARRAY_FUNCTIONS(V) \ + /* %TypedArray%.from ( source [ , mapfn [ , thisArg ] ] ) */ \ + V("from", From, 1, INVALID) \ + /* %TypedArray%.of ( ...items ) */ \ + V("of", Of, 0, INVALID) + +// List of get accessors in %TypedArray%.prototype, excluding the '@@' properties. +// V(name, func, stubIndex) +// where BuiltinsTypedArray::func refers to the native implementation. +#define BUILTIN_TYPED_ARRAY_PROTOTYPE_GETTERS(V) \ + V("buffer", GetBuffer, INVALID) /* get %TypedArray%.prototype.buffer */ \ + V("byteLength", GetByteLength, INVALID) /* get %TypedArray%.prototype.byteLength */ \ + V("byteOffset", GetByteOffset, INVALID) /* get %TypedArray%.prototype.byteOffset */ \ + V("length", GetLength, INVALID) /* get %TypedArray%.prototype.length */ + +// List of functions in %TypedArray%.prototype, excluding the constructor and '@@' properties. +// V(name, func, length, stubIndex) +// where BuiltinsTypedArray::func refers to the native implementation of %TypedArray%.prototype[name]. +// The following functions are not included: +// - %TypedArray%.prototype.toString ( ), which is strictly equal to Array.prototype.toString +#define BUILTIN_TYPED_ARRAY_PROTOTYPE_FUNCTIONS(V) \ + /* %TypedArray%.prototype.at ( index ) */ \ + V("at", At, 1, INVALID) \ + /* %TypedArray%.prototype.copyWithin ( target, start [ , end ] ) */ \ + V("copyWithin", CopyWithin, 2, INVALID) \ + /* %TypedArray%.prototype.entries ( ) */ \ + V("entries", Entries, 0, INVALID) \ + /* %TypedArray%.prototype.every ( callbackfn [ , thisArg ] ) */ \ + V("every", Every, 1, INVALID) \ + /* %TypedArray%.prototype.fill ( value [ , start [ , end ] ] ) */ \ + V("fill", Fill, 1, INVALID) \ + /* %TypedArray%.prototype.filter ( callbackfn [ , thisArg ] ) */ \ + V("filter", Filter, 1, INVALID) \ + /* %TypedArray%.prototype.find ( predicate [ , thisArg ] ) */ \ + V("find", Find, 1, INVALID) \ + /* %TypedArray%.prototype.findIndex ( predicate [ , thisArg ] ) */ \ + V("findIndex", FindIndex, 1, INVALID) \ + /* %TypedArray%.prototype.findLast ( predicate [ , thisArg ] ) */ \ + V("findLast", FindLast, 1, INVALID) \ + /* %TypedArray%.prototype.findLastIndex ( predicate [ , thisArg ] ) */ \ + V("findLastIndex", FindLastIndex, 1, INVALID) \ + /* %TypedArray%.prototype.forEach ( callbackfn [ , thisArg ] ) */ \ + V("forEach", ForEach, 1, INVALID) \ + /* %TypedArray%.prototype.includes ( searchElement [ , fromIndex ] ) */ \ + V("includes", Includes, 1, INVALID) \ + /* %TypedArray%.prototype.indexOf ( searchElement [ , fromIndex ] ) */ \ + V("indexOf", IndexOf, 1, INVALID) \ + /* %TypedArray%.prototype.join ( separator ) */ \ + V("join", Join, 1, INVALID) \ + /* %TypedArray%.prototype.keys ( ) */ \ + V("keys", Keys, 0, INVALID) \ + /* %TypedArray%.prototype.lastIndexOf ( searchElement [ , fromIndex ] ) */ \ + V("lastIndexOf", LastIndexOf, 1, INVALID) \ + /* %TypedArray%.prototype.map ( callbackfn [ , thisArg ] ) */ \ + V("map", Map, 1, INVALID) \ + /* %TypedArray%.prototype.reduce ( callbackfn [ , initialValue ] ) */ \ + V("reduce", Reduce, 1, INVALID) \ + /* %TypedArray%.prototype.reduceRight ( callbackfn [ , initialValue ] ) */ \ + V("reduceRight", ReduceRight, 1, INVALID) \ + /* %TypedArray%.prototype.reverse ( ) */ \ + V("reverse", Reverse, 0, INVALID) \ + /* %TypedArray%.prototype.set ( source [ , offset ] ) */ \ + V("set", Set, 1, INVALID) \ + /* %TypedArray%.prototype.slice ( start, end ) */ \ + V("slice", Slice, 2, INVALID) \ + /* %TypedArray%.prototype.some ( callbackfn [ , thisArg ] ) */ \ + V("some", Some, 1, INVALID) \ + /* %TypedArray%.prototype.sort ( comparefn ) */ \ + V("sort", Sort, 1, INVALID) \ + /* %TypedArray%.prototype.subarray ( begin, end ) */ \ + V("subarray", Subarray, 2, INVALID) \ + /* %TypedArray%.prototype.toLocaleString ( [ reserved1 [ , reserved2 ] ] ) */ \ + V("toLocaleString", ToLocaleString, 0, INVALID) \ + /* %TypedArray%.prototype.toReversed ( ) */ \ + V("toReversed", ToReversed, 0, INVALID) \ + /* %TypedArray%.prototype.toSorted ( comparefn ) */ \ + V("toSorted", ToSorted, 1, INVALID) \ + /* %TypedArray%.prototype.values ( ) */ \ + V("values", Values, 0, INVALID) \ + /* %TypedArray%.prototype.with ( index, value ) */ \ + V("with", With, 2, INVALID) + namespace panda::ecmascript::builtins { class BuiltinsTypedArray : public base::BuiltinsBase { public: @@ -109,7 +210,65 @@ public: static JSTaggedValue Includes(EcmaRuntimeCallInfo *argv); // 23.2.3.1 static JSTaggedValue At(EcmaRuntimeCallInfo *argv); + // 23.2.3.32 %TypedArray%.prototype.toReversed ( ) + static JSTaggedValue ToReversed(EcmaRuntimeCallInfo *argv); + // 23.2.3.13 + static JSTaggedValue FindLast(EcmaRuntimeCallInfo *argv); + // 23.2.3.14 + static JSTaggedValue FindLastIndex(EcmaRuntimeCallInfo *argv); + // 23.2.3.33 + static JSTaggedValue ToSorted(EcmaRuntimeCallInfo *argv); + // 23.2.3.36 + static JSTaggedValue With(EcmaRuntimeCallInfo *argv); static const uint32_t MAX_ARRAY_INDEX = std::numeric_limits::max(); + + // Excluding the '@@' internal properties + static Span GetTypedArrayFunctions() + { + return Span(TYPED_ARRAY_FUNCTIONS); + } + + // Excluding the '@@' internal properties + static Span GetTypedArrayPrototypeAccessors() + { + return Span(TYPED_ARRAY_PROTOTYPE_ACCESSORS); + } + + // Excluding the constructor and '@@' internal properties. + static Span GetTypedArrayPrototypeFunctions() + { + return Span(TYPED_ARRAY_PROTOTYPE_FUNCTIONS); + } + + static size_t GetNumPrototypeInlinedProperties() + { + // 4 : 4 more inline properties in %TypedArray%.prototype for the following functions/accessors: + // (1) %TypedArray%.prototype.constructor + // (2) %TypedArray%.prototype.toString, which is strictly equal to Array.prototype.toString + // (3) %TypedArray%.prototype[@@iterator] + // (4) %TypedArray%.prototype[@@toStringTag] + return GetTypedArrayPrototypeFunctions().Size() + + GetTypedArrayPrototypeAccessors().Size() + 4; + } + +private: +#define BUILTIN_TYPED_ARRAY_FUNCTION_ENTRY(name, func, length, id) \ + base::BuiltinFunctionEntry::Create(name, BuiltinsTypedArray::func, length, kungfu::BuiltinsStubCSigns::id), +#define BUILTIN_TYPED_ARRAY_ACCESSOR_ENTRY(name, func, id) \ + base::BuiltinFunctionEntry::Create( \ + name, BuiltinsTypedArray::func, 0, kungfu::BuiltinsStubCSigns::id), + + static constexpr std::array TYPED_ARRAY_FUNCTIONS = { + BUILTIN_TYPED_ARRAY_FUNCTIONS(BUILTIN_TYPED_ARRAY_FUNCTION_ENTRY) + }; + static constexpr std::array TYPED_ARRAY_PROTOTYPE_ACCESSORS = { + BUILTIN_TYPED_ARRAY_PROTOTYPE_GETTERS(BUILTIN_TYPED_ARRAY_ACCESSOR_ENTRY) + }; + static constexpr std::array TYPED_ARRAY_PROTOTYPE_FUNCTIONS = { + BUILTIN_TYPED_ARRAY_PROTOTYPE_FUNCTIONS(BUILTIN_TYPED_ARRAY_FUNCTION_ENTRY) + }; +#undef BUILTIN_TYPED_ARRAY_FUNCTION_ENTRY +#undef BUILTIN_TYPED_ARRAY_ACCESSOR_ENTRY }; } // namespace panda::ecmascript::builtins diff --git a/ecmascript/builtins/builtins_weak_map.cpp b/ecmascript/builtins/builtins_weak_map.cpp index c0dcf62bc8ccaf3b0f5295b9a8826ca6fe007428..b2a0c216da4e0e5cfefa53fe6d26da962fbf4099 100644 --- a/ecmascript/builtins/builtins_weak_map.cpp +++ b/ecmascript/builtins/builtins_weak_map.cpp @@ -87,8 +87,8 @@ JSTaggedValue BuiltinsWeakMap::Delete(EcmaRuntimeCallInfo *argv) JSHandle weakMap(self); JSHandle key = GetCallArg(argv, 0); - // 5.if Type(key) is not Object, return false. - if (!key->IsHeapObject()) { + // 5.If CanBeHeldWeakly(key) is false, return false. + if (!JSTaggedValue::CanBeHeldWeakly(thread, key)) { return GetTaggedBoolean(false); } return GetTaggedBoolean(JSWeakMap::Delete(thread, weakMap, key)); @@ -106,10 +106,10 @@ JSTaggedValue BuiltinsWeakMap::Has(EcmaRuntimeCallInfo *argv) if (!self->IsJSWeakMap()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSWeakMap.", JSTaggedValue::Exception()); } - JSWeakMap *jsWeakMap = JSWeakMap::Cast(*JSTaggedValue::ToObject(thread, self)); + JSWeakMap *jsWeakMap = JSWeakMap::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle key = GetCallArg(argv, 0); - // 5.if Type(key) is not Object, return false. - if (!key->IsHeapObject()) { + // 5.If CanBeHeldWeakly(key) is false, return false. + if (!JSTaggedValue::CanBeHeldWeakly(thread, key)) { return GetTaggedBoolean(false); } return GetTaggedBoolean(jsWeakMap->Has(key.GetTaggedValue())); @@ -127,9 +127,10 @@ JSTaggedValue BuiltinsWeakMap::Get(EcmaRuntimeCallInfo *argv) if (!self->IsJSWeakMap()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSWeakMap.", JSTaggedValue::Exception()); } - JSWeakMap *jsWeakMap = JSWeakMap::Cast(*JSTaggedValue::ToObject(thread, self)); + JSWeakMap *jsWeakMap = JSWeakMap::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle key = GetCallArg(argv, 0); - if (!key->IsHeapObject()) { + // 4.If CanBeHeldWeakly(key) is false, return undefined. + if (!JSTaggedValue::CanBeHeldWeakly(thread, key)) { return JSTaggedValue::Undefined(); } return jsWeakMap->Get(key.GetTaggedValue()); @@ -150,11 +151,9 @@ JSTaggedValue BuiltinsWeakMap::Set(EcmaRuntimeCallInfo *argv) } JSHandle key = GetCallArg(argv, 0); - if (!key->IsHeapObject()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not an object.", JSTaggedValue::Exception()); - } - if (key->IsSymbol() || key->IsString()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "key is Symblol or String", JSTaggedValue::Exception()); + // 4.If CanBeHeldWeakly(key) is false, throw a TypeError exception. + if (!JSTaggedValue::CanBeHeldWeakly(thread, key)) { + THROW_TYPE_ERROR_AND_RETURN(thread, "invalid value used as weak map key.", JSTaggedValue::Exception()); } JSHandle value = GetCallArg(argv, 1); diff --git a/ecmascript/builtins/builtins_weak_ref.cpp b/ecmascript/builtins/builtins_weak_ref.cpp index 289c11c4cb8cee6e8d5a17e2830ac796935ce37e..ad4b85a330782bf6b7ed55fa18c6ce1d1f3917aa 100644 --- a/ecmascript/builtins/builtins_weak_ref.cpp +++ b/ecmascript/builtins/builtins_weak_ref.cpp @@ -17,7 +17,7 @@ #include "ecmascript/ecma_vm.h" #include "ecmascript/js_weak_ref.h" -#include "ecmascript/object_factory.h" +#include "ecmascript/object_factory-inl.h" namespace panda::ecmascript::builtins { JSTaggedValue BuiltinsWeakRef::WeakRefConstructor(EcmaRuntimeCallInfo *argv) @@ -32,10 +32,10 @@ JSTaggedValue BuiltinsWeakRef::WeakRefConstructor(EcmaRuntimeCallInfo *argv) if (newTarget->IsUndefined()) { THROW_TYPE_ERROR_AND_RETURN(thread, "new target can't be undefined", JSTaggedValue::Exception()); } - // 2. If Type(target) is not Object, throw a TypeError exception. + // 2. If CanBeHeldWeakly(target) is false, throw a TypeError exception. JSHandle target = GetCallArg(argv, 0); - if (!target->IsECMAObject()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "target is not object", JSTaggedValue::Exception()); + if (!JSTaggedValue::CanBeHeldWeakly(thread, target)) { + THROW_TYPE_ERROR_AND_RETURN(thread, "target invalid", JSTaggedValue::Exception()); } // 3. Let weakRef be ? OrdinaryCreateFromConstructor(NewTarget, "%WeakRef.prototype%", « [[WeakRefTarget]] »). JSHandle constructor = GetConstructor(argv); diff --git a/ecmascript/builtins/builtins_weak_set.cpp b/ecmascript/builtins/builtins_weak_set.cpp index 4205ef913ebf4321a394bab93c64ee5e0b22e6cc..12d71e83ffaba3718702366e1850d7a2362683d7 100644 --- a/ecmascript/builtins/builtins_weak_set.cpp +++ b/ecmascript/builtins/builtins_weak_set.cpp @@ -120,15 +120,12 @@ JSTaggedValue BuiltinsWeakSet::Add(EcmaRuntimeCallInfo *argv) } JSHandle value(GetCallArg(argv, 0)); - if (!value->IsHeapObject()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "value is not an object", JSTaggedValue::Exception()); + // 4.If CanBeHeldWeakly(value) is false, throw a TypeError exception. + if (!JSTaggedValue::CanBeHeldWeakly(thread, value)) { + THROW_TYPE_ERROR_AND_RETURN(thread, "invalid value used in weak set", JSTaggedValue::Exception()); } - if (value->IsSymbol() || value->IsString()) { - THROW_TYPE_ERROR_AND_RETURN(thread, "value is Symblol or String", JSTaggedValue::Exception()); - } - - JSHandle weakSet(thread, JSWeakSet::Cast(*JSTaggedValue::ToObject(thread, self))); + JSHandle weakSet(self); JSWeakSet::Add(thread, weakSet, value); return weakSet.GetTaggedValue(); } @@ -146,9 +143,10 @@ JSTaggedValue BuiltinsWeakSet::Delete(EcmaRuntimeCallInfo *argv) THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSWeakSet", JSTaggedValue::Exception()); } - JSHandle weakSet(thread, JSWeakSet::Cast(*JSTaggedValue::ToObject(thread, self))); + JSHandle weakSet(self); JSHandle value = GetCallArg(argv, 0); - if (!value->IsHeapObject()) { + // 4.If CanBeHeldWeakly(value) is false, return false. + if (!JSTaggedValue::CanBeHeldWeakly(thread, value)) { GetTaggedBoolean(false); } return GetTaggedBoolean(JSWeakSet::Delete(thread, weakSet, value)); @@ -166,9 +164,10 @@ JSTaggedValue BuiltinsWeakSet::Has(EcmaRuntimeCallInfo *argv) if (!self->IsJSWeakSet()) { THROW_TYPE_ERROR_AND_RETURN(thread, "obj is not JSWeakSet", JSTaggedValue::Exception()); } - JSWeakSet *jsWeakSet = JSWeakSet::Cast(*JSTaggedValue::ToObject(thread, self)); + JSWeakSet *jsWeakSet = JSWeakSet::Cast(self.GetTaggedValue().GetTaggedObject()); JSHandle value = GetCallArg(argv, 0); - if (!value->IsHeapObject()) { + // 4.If CanBeHeldWeakly(value) is false, return false. + if (!JSTaggedValue::CanBeHeldWeakly(thread, value)) { GetTaggedBoolean(false); } return GetTaggedBoolean(jsWeakSet->Has(value.GetTaggedValue())); diff --git a/ecmascript/builtins/tests/builtins_array_test.cpp b/ecmascript/builtins/tests/builtins_array_test.cpp index 248ac346b3220d08a8f47c265be1680b150ddfde..be2ddd938cc4f6f6b36e286a13ded4cbbef9af61 100644 --- a/ecmascript/builtins/tests/builtins_array_test.cpp +++ b/ecmascript/builtins/tests/builtins_array_test.cpp @@ -35,6 +35,26 @@ using namespace panda::ecmascript; using namespace panda::ecmascript::builtins; using namespace panda::ecmascript::base; +constexpr int32_t INT_VALUE_0 = 0; +constexpr int32_t INT_VALUE_1 = 1; +constexpr int32_t INT_VALUE_2 = 2; +constexpr int32_t INT_VALUE_3 = 3; +constexpr int32_t INT_VALUE_4 = 4; +constexpr int32_t INT_VALUE_50 = 50; +constexpr int32_t INT_VALUE_200 = 200; +constexpr int32_t INT_VALUE_666 = 666; +constexpr uint32_t RUNTIME_CALL_INFO_PARA_0 = 0; +constexpr uint32_t RUNTIME_CALL_INFO_PARA_1 = 1; +constexpr uint32_t RUNTIME_CALL_INFO_PARA_NUM_4 = 4; +constexpr uint32_t RUNTIME_CALL_INFO_PARA_NUM_8 = 8; +constexpr uint32_t RUNTIME_CALL_INFO_PARA_NUM_10 = 10; + +enum class ArrayIndex { + ARRAY_INDEX_0, + ARRAY_INDEX_1, + ARRAY_INDEX_2, + ARRAY_INDEX_3 +}; namespace panda::test { using Array = ecmascript::builtins::BuiltinsArray; @@ -141,6 +161,30 @@ public: return GetTaggedBoolean(false); } + static JSTaggedValue TestFindLastFunc(EcmaRuntimeCallInfo *argv) + { + uint32_t argc = argv->GetArgsNumber(); + if (argc > 0) { + // 20 : test case + if (GetCallArg(argv, 0)->GetInt() > 20) { + return GetTaggedBoolean(true); + } + } + return GetTaggedBoolean(false); + } + + static JSTaggedValue TestFindLastIndexFunc(EcmaRuntimeCallInfo *argv) + { + uint32_t argc = argv->GetArgsNumber(); + if (argc > 0) { + // 20 : test case + if (GetCallArg(argv, 0)->GetInt() > 20) { + return GetTaggedBoolean(true); + } + } + return GetTaggedBoolean(false); + } + static JSTaggedValue TestReduceFunc(EcmaRuntimeCallInfo *argv) { int accumulator = GetCallArg(argv, 0)->GetInt(); @@ -928,7 +972,53 @@ HWTEST_F_L0(BuiltinsArrayTest, ForEach) EXPECT_EQ(jsArray->GetArrayLength(), 3U); } -// 22.1.3.11 new Array(1,2,3,4,3).IndexOf(searchElement [ , fromIndex ]) +#define ARRAY_DEFINE_OWN_PROPERTY(dest, index, value) \ + do { \ + JSHandle key(thread, JSTaggedValue(index)); \ + PropertyDescriptor desc(thread, JSHandle(thread, JSTaggedValue(value)), true, true, true); \ + JSArray::DefineOwnProperty(thread, dest, key, desc); \ + } while (false) + +#define ARRAY_BUILTIN_METHOD_TEST_CASE_ARG0(method, target, expected) \ + do { \ + auto ecmaRuntimeCallInfo = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); \ + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); \ + ecmaRuntimeCallInfo->SetThis((target).GetTaggedValue()); \ + \ + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); \ + JSTaggedValue result = Array::method(ecmaRuntimeCallInfo); \ + TestHelper::TearDownFrame(thread, prev); \ + ASSERT_TRUE(JSTaggedValue::StrictEqual(result, JSTaggedValue(expected))); \ + } while (false) + +#define ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(method, target, expected, arg0) \ + do { \ + auto ecmaRuntimeCallInfo = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); \ + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); \ + ecmaRuntimeCallInfo->SetThis((target).GetTaggedValue()); \ + ecmaRuntimeCallInfo->SetCallArg(0, JSTaggedValue(arg0)); \ + \ + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); \ + JSTaggedValue result = Array::method(ecmaRuntimeCallInfo); \ + TestHelper::TearDownFrame(thread, prev); \ + ASSERT_TRUE(JSTaggedValue::StrictEqual(result, JSTaggedValue(expected))); \ + } while (false) + +#define ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(method, target, expected, arg0, arg1) \ + do { \ + auto ecmaRuntimeCallInfo = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); \ + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); \ + ecmaRuntimeCallInfo->SetThis((target).GetTaggedValue()); \ + ecmaRuntimeCallInfo->SetCallArg(0, JSTaggedValue(arg0)); \ + ecmaRuntimeCallInfo->SetCallArg(1, JSTaggedValue(arg1)); \ + \ + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); \ + JSTaggedValue result = Array::method(ecmaRuntimeCallInfo); \ + TestHelper::TearDownFrame(thread, prev); \ + ASSERT_TRUE(JSTaggedValue::StrictEqual(result, JSTaggedValue(expected))); \ + } while (false) + +// 22.1.3.11 Array.IndexOf(searchElement [ , fromIndex ]) HWTEST_F_L0(BuiltinsArrayTest, IndexOf) { JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); @@ -937,67 +1027,54 @@ HWTEST_F_L0(BuiltinsArrayTest, IndexOf) JSHandle obj(thread, arr); EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), lengthKeyHandle).GetValue()->GetInt(), 0); - JSHandle key0(thread, JSTaggedValue(0)); - PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(1)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key0, desc0); - JSHandle key1(thread, JSTaggedValue(1)); - PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(2)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key1, desc1); - JSHandle key2(thread, JSTaggedValue(2)); - PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(3)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key2, desc2); - JSHandle key3(thread, JSTaggedValue(3)); - PropertyDescriptor desc3(thread, JSHandle(thread, JSTaggedValue(4)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key3, desc3); - JSHandle key4(thread, JSTaggedValue(4)); - PropertyDescriptor desc4(thread, JSHandle(thread, JSTaggedValue(3)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key4, desc4); - - auto ecmaRuntimeCallInfo1 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); - ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo1->SetCallArg(0, JSTaggedValue(static_cast(3))); - ecmaRuntimeCallInfo1->SetCallArg(1, JSTaggedValue(static_cast(0))); - - [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); - JSTaggedValue result = Array::IndexOf(ecmaRuntimeCallInfo1); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(2).GetRawData()); - - auto ecmaRuntimeCallInfo2 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); - ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo2->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo2->SetCallArg(0, JSTaggedValue(static_cast(3))); - ecmaRuntimeCallInfo2->SetCallArg(1, JSTaggedValue(static_cast(3))); - - prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); - result = Array::IndexOf(ecmaRuntimeCallInfo2); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(4).GetRawData()); - - auto ecmaRuntimeCallInfo3 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); - ecmaRuntimeCallInfo3->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo3->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo3->SetCallArg(0, JSTaggedValue(static_cast(5))); - ecmaRuntimeCallInfo3->SetCallArg(1, JSTaggedValue(static_cast(0))); - - prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo3); - result = Array::IndexOf(ecmaRuntimeCallInfo3); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(-1).GetRawData()); - - auto ecmaRuntimeCallInfo4 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); - ecmaRuntimeCallInfo4->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo4->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo4->SetCallArg(0, JSTaggedValue(static_cast(3))); - - prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo4); - result = Array::IndexOf(ecmaRuntimeCallInfo4); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(2).GetRawData()); + // arr = [1, 2, 3, 4, 3, 0, 2.0, +0.0, 3.0, -0.0, , , undefined] + ARRAY_DEFINE_OWN_PROPERTY(obj, 0, 1); + ARRAY_DEFINE_OWN_PROPERTY(obj, 1, 2); + ARRAY_DEFINE_OWN_PROPERTY(obj, 2, 3); + ARRAY_DEFINE_OWN_PROPERTY(obj, 3, 4); + ARRAY_DEFINE_OWN_PROPERTY(obj, 4, 3); + ARRAY_DEFINE_OWN_PROPERTY(obj, 5, 0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 6, 2.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 7, +0.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 8, 3.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 9, -0.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 12, JSTaggedValue::Undefined()); + + // arr.indexOf(3, 0) == 2 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, 2, 3, 0); + // arr.indexOf(3, 3) == 4 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, 4, 3, 3); + // arr.indexOf(5, 0) == -1 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, -1, 5, 0); + // arr.indexOf(3) == 2 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(IndexOf, obj, 2, 3); + + // Expects int32_t(x) and double(x) to be strictly equal + // arr.indexOf(3.0) == 2 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(IndexOf, obj, 2, 3.0); + // arr.indexOf(3, 5) == 8 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, 8, 3, 5); + + // Expects 0, +0.0, -0.0 to be strictly equal + // arr.indexOf(+0.0) == 5 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(IndexOf, obj, 5, +0.0); + // arr.indexOf(-0.0) == 5 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(IndexOf, obj, 5, -0.0); + // arr.indexOf(0, 6) == 7 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, 7, 0, 6); + // arr.indexOf(-0.0, 6) == 7 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, 7, -0.0, 6); + // arr.indexOf(0, 8) == 9 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, 9, 0, 8); + // arr.indexOf(+0.0, 8) == 9 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(IndexOf, obj, 9, +0.0, 8); + + // Expects undefined to be found + // arr.indexOf() == 12, where the first argument is undefined + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG0(IndexOf, obj, 12); } -// 22.1.3.14 new Array(1,2,3,4,3).LastIndexOf(searchElement [ , fromIndex ]) +// 22.1.3.14 Array.LastIndexOf(searchElement [ , fromIndex ]) HWTEST_F_L0(BuiltinsArrayTest, LastIndexOf) { JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); @@ -1006,68 +1083,50 @@ HWTEST_F_L0(BuiltinsArrayTest, LastIndexOf) JSHandle obj(thread, arr); EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), lengthKeyHandle).GetValue()->GetInt(), 0); - JSHandle key0(thread, JSTaggedValue(0)); - PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(1)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key0, desc0); - JSHandle key1(thread, JSTaggedValue(1)); - PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(2)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key1, desc1); - JSHandle key2(thread, JSTaggedValue(2)); - PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(3)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key2, desc2); - JSHandle key3(thread, JSTaggedValue(3)); - PropertyDescriptor desc3(thread, JSHandle(thread, JSTaggedValue(4)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key3, desc3); - JSHandle key4(thread, JSTaggedValue(4)); - PropertyDescriptor desc4(thread, JSHandle(thread, JSTaggedValue(3)), true, true, true); - JSArray::DefineOwnProperty(thread, obj, key4, desc4); - - // new Array(1,2,3,4,3).LastIndexOf(3,4) - auto ecmaRuntimeCallInfo1 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); - ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo1->SetCallArg(0, JSTaggedValue(static_cast(3))); - ecmaRuntimeCallInfo1->SetCallArg(1, JSTaggedValue(static_cast(4))); - - [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); - JSTaggedValue result = Array::LastIndexOf(ecmaRuntimeCallInfo1); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(static_cast(4)).GetRawData()); - - // new Array(1,2,3,4,3).LastIndexOf(3,3) - auto ecmaRuntimeCallInfo2 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); - ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo2->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo2->SetCallArg(0, JSTaggedValue(static_cast(3))); - ecmaRuntimeCallInfo2->SetCallArg(1, JSTaggedValue(static_cast(3))); - - prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); - result = Array::LastIndexOf(ecmaRuntimeCallInfo2); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(static_cast(2)).GetRawData()); - - // new Array(1,2,3,4,3).LastIndexOf(5,4) - auto ecmaRuntimeCallInfo3 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); - ecmaRuntimeCallInfo3->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo3->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo3->SetCallArg(0, JSTaggedValue(static_cast(5))); - ecmaRuntimeCallInfo3->SetCallArg(1, JSTaggedValue(static_cast(4))); - - prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo3); - result = Array::LastIndexOf(ecmaRuntimeCallInfo3); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(-1).GetRawData()); - - // new Array(1,2,3,4,3).LastIndexOf(3) - auto ecmaRuntimeCallInfo4 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); - ecmaRuntimeCallInfo4->SetFunction(JSTaggedValue::Undefined()); - ecmaRuntimeCallInfo4->SetThis(obj.GetTaggedValue()); - ecmaRuntimeCallInfo4->SetCallArg(0, JSTaggedValue(static_cast(3))); - - prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo4); - result = Array::LastIndexOf(ecmaRuntimeCallInfo4); - TestHelper::TearDownFrame(thread, prev); - ASSERT_EQ(result.GetRawData(), JSTaggedValue(static_cast(4)).GetRawData()); + // arr = [1, 2, 3, 4, 3, 0, 2.0, +0.0, 3.0, -0.0, , , undefined, , , -1] + ARRAY_DEFINE_OWN_PROPERTY(obj, 0, 1); + ARRAY_DEFINE_OWN_PROPERTY(obj, 1, 2); + ARRAY_DEFINE_OWN_PROPERTY(obj, 2, 3); + ARRAY_DEFINE_OWN_PROPERTY(obj, 3, 4); + ARRAY_DEFINE_OWN_PROPERTY(obj, 4, 3); + ARRAY_DEFINE_OWN_PROPERTY(obj, 5, 0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 6, 2.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 7, +0.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 8, 3.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 9, -0.0); + ARRAY_DEFINE_OWN_PROPERTY(obj, 12, JSTaggedValue::Undefined()); + ARRAY_DEFINE_OWN_PROPERTY(obj, 15, -1); + + // arr.lastIndexOf(3, 4) == 4 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(LastIndexOf, obj, 4, 3, 4); + // arr.lastIndexOf(3, 3) == 2 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(LastIndexOf, obj, 2, 3, 3); + // arr.lastIndexOf(5, 4) == -1 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(LastIndexOf, obj, -1, 5, 4); + + // Expects int32_t(x) and double(x) to be strictly equal + // arr.lastIndexOf(3) == 8 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(LastIndexOf, obj, 8, 3); + // arr.lastIndexOf(1.0) == 0 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(LastIndexOf, obj, 0, 1.0); + + // Expects 0, +0.0, -0.0 to be strictly equal + // arr.indexOf(+0.0) == 9 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(LastIndexOf, obj, 9, +0.0); + // arr.indexOf(0) == 9 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG1(LastIndexOf, obj, 9, 0); + // arr.indexOf(0, 8) == 7 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(LastIndexOf, obj, 7, 0, 8); + // arr.indexOf(-0.0, 8) == 7 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(LastIndexOf, obj, 7, -0.0, 8); + // arr.indexOf(-0.0, 6) == 5 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(LastIndexOf, obj, 5, -0.0, 6); + // arr.indexOf(+0.0, 6) == 5 + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG2(LastIndexOf, obj, 5, +0.0, 6); + + // Expects undefined to be found + // arr.indexOf() == 12, where the first argument is undefined + ARRAY_BUILTIN_METHOD_TEST_CASE_ARG0(LastIndexOf, obj, 12); } // 22.1.3.11 new Array().Pop() @@ -1706,4 +1765,247 @@ HWTEST_F_L0(BuiltinsArrayTest, At) TestHelper::TearDownFrame(thread, prev6); ASSERT_EQ(result, JSTaggedValue::Undefined()); } + +HWTEST_F_L0(BuiltinsArrayTest, With) +{ + JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); + JSArray *arr = + JSArray::Cast(JSArray::ArrayCreate(thread, JSTaggedNumber(INT_VALUE_0)).GetTaggedValue().GetTaggedObject()); + EXPECT_TRUE(arr != nullptr); + JSHandle obj(thread, arr); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), + lengthKeyHandle).GetValue()->GetInt(), INT_VALUE_0); + + JSHandle key0(thread, JSTaggedValue(static_cast(ArrayIndex::ARRAY_INDEX_0))); + PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_0)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key0, desc0); + JSHandle key1(thread, JSTaggedValue(static_cast(ArrayIndex::ARRAY_INDEX_1))); + PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_1)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key1, desc1); + JSHandle key2(thread, JSTaggedValue(static_cast(ArrayIndex::ARRAY_INDEX_2))); + PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_2)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key2, desc2); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), RUNTIME_CALL_INFO_PARA_NUM_8); + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(RUNTIME_CALL_INFO_PARA_0, + JSTaggedValue(static_cast((ArrayIndex::ARRAY_INDEX_1)))); + ecmaRuntimeCallInfo1->SetCallArg(RUNTIME_CALL_INFO_PARA_1, JSTaggedValue(INT_VALUE_3)); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result = Array::With(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_TRUE(result.IsECMAObject()); + JSHandle resultArr = + JSHandle(thread, JSTaggedValue(static_cast(result.GetRawData()))); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key0).GetValue()->GetInt(), INT_VALUE_0); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key1).GetValue()->GetInt(), INT_VALUE_3); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key2).GetValue()->GetInt(), INT_VALUE_2); +} + +HWTEST_F_L0(BuiltinsArrayTest, ToSorted) +{ + JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); + JSArray *arr = + JSArray::Cast(JSArray::ArrayCreate(thread, JSTaggedNumber(INT_VALUE_0)).GetTaggedValue().GetTaggedObject()); + EXPECT_TRUE(arr != nullptr); + JSHandle obj(thread, arr); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), + lengthKeyHandle).GetValue()->GetInt(), INT_VALUE_0); + JSHandle key0(thread, JSTaggedValue(INT_VALUE_0)); + PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_3)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key0, desc0); + JSHandle key1(thread, JSTaggedValue(INT_VALUE_1)); + PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_2)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key1, desc1); + JSHandle key2(thread, JSTaggedValue(INT_VALUE_2)); + PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_1)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key2, desc2); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), RUNTIME_CALL_INFO_PARA_NUM_4); + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result2 = Array::ToSorted(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_TRUE(result2.IsECMAObject()); + JSHandle resultArr = + JSHandle(thread, JSTaggedValue(static_cast(result2.GetRawData()))); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key0).GetValue()->GetInt(), INT_VALUE_1); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key1).GetValue()->GetInt(), INT_VALUE_2); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key2).GetValue()->GetInt(), INT_VALUE_3); +} + +HWTEST_F_L0(BuiltinsArrayTest, ToSpliced) +{ + JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); + JSArray *arr = + JSArray::Cast(JSArray::ArrayCreate(thread, JSTaggedNumber(INT_VALUE_0)).GetTaggedValue().GetTaggedObject()); + EXPECT_TRUE(arr != nullptr); + JSHandle obj(thread, arr); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), + lengthKeyHandle).GetValue()->GetInt(), INT_VALUE_0); + JSHandle key0(thread, JSTaggedValue(INT_VALUE_0)); + PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_0)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key0, desc0); + JSHandle key1(thread, JSTaggedValue(INT_VALUE_1)); + PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_1)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key1, desc1); + JSHandle key2(thread, JSTaggedValue(INT_VALUE_2)); + PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_2)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key2, desc2); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), RUNTIME_CALL_INFO_PARA_NUM_10); + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(INT_VALUE_0, JSTaggedValue(INT_VALUE_1)); + ecmaRuntimeCallInfo1->SetCallArg(INT_VALUE_1, JSTaggedValue(INT_VALUE_1)); + ecmaRuntimeCallInfo1->SetCallArg(INT_VALUE_2, JSTaggedValue(INT_VALUE_666)); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result2 = Array::ToSpliced(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_TRUE(result2.IsECMAObject()); + JSHandle resultArr = + JSHandle(thread, JSTaggedValue(static_cast(result2.GetRawData()))); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key0).GetValue()->GetInt(), INT_VALUE_0); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key1).GetValue()->GetInt(), INT_VALUE_666); + EXPECT_EQ(JSArray::GetProperty(thread, resultArr, key2).GetValue()->GetInt(), INT_VALUE_2); +} + +HWTEST_F_L0(BuiltinsArrayTest, FindLast) +{ + auto ecmaVM = thread->GetEcmaVM(); + JSHandle env = ecmaVM->GetGlobalEnv(); + ObjectFactory *factory = ecmaVM->GetFactory(); + + JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); + JSArray *arr = JSArray::Cast(JSArray::ArrayCreate(thread, JSTaggedNumber(0)).GetTaggedValue().GetTaggedObject()); + EXPECT_TRUE(arr != nullptr); + JSHandle obj(thread, arr); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), lengthKeyHandle).GetValue()->GetInt(), 0); + // arr [50, 40, 2] + JSHandle key0(thread, JSTaggedValue(0)); + PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(50)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key0, desc0); + JSHandle key1(thread, JSTaggedValue(1)); + PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(40)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key1, desc1); + JSHandle key2(thread, JSTaggedValue(2)); + PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(2)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key2, desc2); + JSHandle jsArray(JSArray::ArrayCreate(thread, JSTaggedNumber(0))); + JSHandle func = factory->NewJSFunction(env, reinterpret_cast(TestClass::TestFindLastFunc)); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); // 8 means 2 call args + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, func.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(1, jsArray.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result = Array::FindLast(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_EQ(result.GetRawData(), JSTaggedValue(40).GetRawData()); +} + +HWTEST_F_L0(BuiltinsArrayTest, FindLastIndex) +{ + auto ecmaVM = thread->GetEcmaVM(); + JSHandle env = ecmaVM->GetGlobalEnv(); + ObjectFactory *factory = ecmaVM->GetFactory(); + + JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); + JSArray *arr = JSArray::Cast(JSArray::ArrayCreate(thread, JSTaggedNumber(0)).GetTaggedValue().GetTaggedObject()); + EXPECT_TRUE(arr != nullptr); + JSHandle obj(thread, arr); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), lengthKeyHandle).GetValue()->GetInt(), 0); + + // arr [50, 40, 30] + JSHandle key0(thread, JSTaggedValue(0)); + PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(50)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key0, desc0); + + JSHandle key1(thread, JSTaggedValue(1)); + PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(40)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key1, desc1); + + JSHandle key2(thread, JSTaggedValue(2)); + PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(30)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key2, desc2); + + JSHandle jsArray(JSArray::ArrayCreate(thread, JSTaggedNumber(0))); + JSHandle func = factory->NewJSFunction(env, reinterpret_cast(TestClass::TestFindLastIndexFunc)); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); // 8 means 2 call args + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, func.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(1, jsArray.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result = Array::FindLastIndex(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_EQ(result.GetRawData(), JSTaggedValue(static_cast(2)).GetRawData()); +} + +HWTEST_F_L0(BuiltinsArrayTest, ToReversed) +{ + JSHandle lengthKeyHandle = thread->GlobalConstants()->GetHandledLengthString(); + JSArray *arr = JSArray::Cast(JSArray::ArrayCreate(thread, JSTaggedNumber(0)).GetTaggedValue().GetTaggedObject()); + EXPECT_TRUE(arr != nullptr); + JSHandle obj(thread, arr); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), + lengthKeyHandle).GetValue()->GetInt(), INT_VALUE_0); + JSHandle key0(thread, JSTaggedValue(INT_VALUE_0)); + PropertyDescriptor desc0(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_50)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key0, desc0); + JSHandle key1(thread, JSTaggedValue(INT_VALUE_1)); + PropertyDescriptor desc1(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_200)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key1, desc1); + JSHandle key2(thread, JSTaggedValue(INT_VALUE_2)); + PropertyDescriptor desc2(thread, JSHandle(thread, JSTaggedValue(INT_VALUE_3)), true, true, true); + JSArray::DefineOwnProperty(thread, obj, key2, desc2); + + auto ecmaRuntimeCallInfo1 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), INT_VALUE_4); + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result = Array::ToReversed(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + JSTaggedValue value(static_cast(result.GetRawData())); + ASSERT_TRUE(value.IsECMAObject()); + + PropertyDescriptor descRes(thread); + JSHandle valueHandle(thread, value); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(valueHandle), + lengthKeyHandle).GetValue()->GetInt(), INT_VALUE_3); + JSObject::GetOwnProperty(thread, valueHandle, key0, descRes); + ASSERT_EQ(descRes.GetValue().GetTaggedValue(), JSTaggedValue(INT_VALUE_3)); + JSObject::GetOwnProperty(thread, valueHandle, key1, descRes); + ASSERT_EQ(descRes.GetValue().GetTaggedValue(), JSTaggedValue(INT_VALUE_200)); + JSObject::GetOwnProperty(thread, valueHandle, key2, descRes); + ASSERT_EQ(descRes.GetValue().GetTaggedValue(), JSTaggedValue(INT_VALUE_50)); + EXPECT_EQ(JSArray::GetProperty(thread, JSHandle(obj), + lengthKeyHandle).GetValue()->GetInt(), INT_VALUE_3); + JSObject::GetOwnProperty(thread, obj, key0, descRes); + ASSERT_EQ(descRes.GetValue().GetTaggedValue(), JSTaggedValue(INT_VALUE_50)); + JSObject::GetOwnProperty(thread, obj, key1, descRes); + ASSERT_EQ(descRes.GetValue().GetTaggedValue(), JSTaggedValue(INT_VALUE_200)); + JSObject::GetOwnProperty(thread, obj, key2, descRes); + ASSERT_EQ(descRes.GetValue().GetTaggedValue(), JSTaggedValue(INT_VALUE_3)); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_date_time_format_test.cpp b/ecmascript/builtins/tests/builtins_date_time_format_test.cpp index fd86ae446ce594777e85296bb6d3f65d72ec12cb..e0760f784cfbb6db8198472bb2e02b0615d04e33 100644 --- a/ecmascript/builtins/tests/builtins_date_time_format_test.cpp +++ b/ecmascript/builtins/tests/builtins_date_time_format_test.cpp @@ -647,4 +647,31 @@ HWTEST_F_L0(BuiltinsDateTimeFormatTest, DateTimeFormat_003) JSHandle elements(thread, resultHandle->GetElements()); EXPECT_EQ(elements->GetLength(), 16U); } + +// DateTimeFormat_004 +HWTEST_F_L0(BuiltinsDateTimeFormatTest, DateTimeFormat_004) +{ + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle localesString(factory->NewFromASCII("zh-CN")); + auto jsObj = JSHandle(thread, JSDateTimeFormatForObj_002(thread)); + JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + JSHandle objFun = env->GetObjectFunction(); + JSHandle optionsObj = factory->NewJSObjectByConstructor(JSHandle(objFun), objFun); + JSHandle fullValue(factory->NewFromASCII("full")); + JSHandle falseValue(thread, JSTaggedValue(false)); + JSHandle dateStyleValue(factory->NewFromASCII("dateStyle")); + JSHandle timeStyleeValue(factory->NewFromASCII("timeStyle")); + JSHandle hour12Value(factory->NewFromASCII("hour12")); + JSHandle timeZone(factory->NewFromASCII("timeZone")); + JSHandle timeZoneValue(factory->NewFromASCII("UTC")); + JSObject::SetProperty(thread, optionsObj, dateStyleValue, fullValue); + JSObject::SetProperty(thread, optionsObj, timeStyleeValue, fullValue); + JSObject::SetProperty(thread, optionsObj, hour12Value, falseValue); + JSObject::SetProperty(thread, optionsObj, timeZone, timeZoneValue); + auto constructorResult = JSDateTimeFormatConstructor(thread, optionsObj, localesString); + JSHandle resultStr = + JSDateTimeFormat::FormatDateTime(thread, JSHandle(thread, constructorResult), 0.0); + EXPECT_STREQ("1970年1月1日星期四 协调世界时 00:00:00", EcmaStringAccessor(resultStr).ToCString().c_str()); +} } // namespace panda::test + diff --git a/ecmascript/builtins/tests/builtins_errors_test.cpp b/ecmascript/builtins/tests/builtins_errors_test.cpp index 307341fd76589456b3c3e240c23bb562ad5e5fb5..025e6201294ec8349232589d203179332e323798 100644 --- a/ecmascript/builtins/tests/builtins_errors_test.cpp +++ b/ecmascript/builtins/tests/builtins_errors_test.cpp @@ -40,6 +40,7 @@ using TypeError = builtins::BuiltinsTypeError; using URIError = builtins::BuiltinsURIError; using EvalError = builtins::BuiltinsEvalError; using SyntaxError = builtins::BuiltinsSyntaxError; +using AggregateError = builtins::BuiltinsAggregateError; using JSType = ecmascript::JSType; class BuiltinsErrorsTest : public testing::Test { @@ -980,4 +981,55 @@ HWTEST_F_L0(BuiltinsErrorsTest, EvalErrorToString) EXPECT_EQ(EcmaStringAccessor::Compare(instance, factory->NewFromASCII("EvalError: This is EvalError!"), resultHandle), 0); } + +/* + * @tc.name: AggregateErrorParameterConstructor + * @tc.desc: new AggregateError([], "Hello AggregateError", {cause: "error cause"}) + * @tc.type: FUNC + */ +HWTEST_F_L0(BuiltinsErrorsTest, AggregateErrorParameterConstructor) +{ + ObjectFactory *factory = instance->GetFactory(); + JSHandle env = instance->GetGlobalEnv(); + + JSHandle error(env->GetAggregateErrorFunction()); + JSHandle paramMsg(factory->NewFromASCII("Hello AggregateError!")); + + JSHandle errayFunc = env->GetArrayFunction(); + JSHandle newArray = factory->NewJSObjectByConstructor(JSHandle(errayFunc), errayFunc); + + JSHandle causeKey = thread->GlobalConstants()->GetHandledCauseString(); + JSHandle objFun = env->GetObjectFunction(); + JSHandle optionsObj = factory->NewJSObjectByConstructor(JSHandle(objFun), objFun); + JSHandle causeValue(factory->NewFromASCII("error cause")); // test error cause + JSObject::SetProperty(thread, optionsObj, causeKey, causeValue); + + auto ecmaRuntimeCallInfo = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue(*error), 10); // 10 means 3 call args + ecmaRuntimeCallInfo->SetFunction(error.GetTaggedValue()); + ecmaRuntimeCallInfo->SetThis(JSTaggedValue(*error)); + ecmaRuntimeCallInfo->SetCallArg(0, newArray.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(1, paramMsg.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(2, optionsObj.GetTaggedValue()); // 2 means the options arg + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); + JSTaggedValue result = AggregateError::AggregateErrorConstructor(ecmaRuntimeCallInfo); + EXPECT_TRUE(result.IsECMAObject()); + + JSHandle errorObject(thread, reinterpret_cast(result.GetRawData())); + JSHandle msgKey(factory->NewFromASCII("message")); + JSHandle nameKey = thread->GlobalConstants()->GetHandledNameString(); + + JSHandle msgValue(JSObject::GetProperty(thread, errorObject, msgKey).GetValue()); + ASSERT_EQ(EcmaStringAccessor::Compare(instance, + factory->NewFromASCII("Hello AggregateError!"), JSHandle(msgValue)), 0); + + JSHandle nameValue(JSObject::GetProperty(thread, errorObject, nameKey).GetValue()); + ASSERT_EQ(EcmaStringAccessor::Compare(instance, + factory->NewFromASCII("AggregateError"), JSHandle(nameValue)), 0); + + JSHandle errCauseValue(JSObject::GetProperty(thread, errorObject, causeKey).GetValue()); + ASSERT_EQ(EcmaStringAccessor::Compare(instance, + factory->NewFromASCII("error cause"), JSHandle(errCauseValue)), 0); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_finalization_registry_test.cpp b/ecmascript/builtins/tests/builtins_finalization_registry_test.cpp index 2fda459022ad52f7f62dae89c3dd0862edfed871..9ff2f2ad6ba16637418ef00f16894b5c4e36957c 100644 --- a/ecmascript/builtins/tests/builtins_finalization_registry_test.cpp +++ b/ecmascript/builtins/tests/builtins_finalization_registry_test.cpp @@ -441,4 +441,96 @@ HWTEST_F_L0(BuiltinsFinalizationRegistryTest, Unregister2) vm->SetEnableForceGC(true); ASSERT_EQ(testValue, 0); } + +// finalizationRegistry.Register(target, heldValue [ , unregisterToken ]) target and unregisterToken Symbol +HWTEST_F_L0(BuiltinsFinalizationRegistryTest, RegisterTargetSymbol) +{ + testValue = 0; + EcmaVM *vm = thread->GetEcmaVM(); + + JSTaggedValue result = CreateFinalizationRegistryConstructor(thread); + JSHandle jsfinalizationRegistry(thread, result); + + vm->SetEnableForceGC(false); + JSTaggedValue target = JSTaggedValue::Undefined(); + JSTaggedValue target1 = JSTaggedValue::Undefined(); + { + [[maybe_unused]] EcmaHandleScope handleScope(thread); + JSHandle symbol1 = thread->GetEcmaVM()->GetFactory()->NewJSSymbol(); + JSHandle symbol2 = thread->GetEcmaVM()->GetFactory()->NewJSSymbol(); + target = symbol1.GetTaggedValue(); + target1 = symbol2.GetTaggedValue(); + auto ecmaRuntimeCallInfo = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 10); // 10 means 3 call args + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo->SetThis(jsfinalizationRegistry.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(0, target); + ecmaRuntimeCallInfo->SetCallArg(1, JSTaggedValue(10)); + ecmaRuntimeCallInfo->SetCallArg(2, target); // 2 means the unregisterToken arg + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); + BuiltinsFinalizationRegistry::Register(ecmaRuntimeCallInfo); + TestHelper::TearDownFrame(thread, prev); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 10); // 10 means 3 call args + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(jsfinalizationRegistry.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, target1); + ecmaRuntimeCallInfo1->SetCallArg(1, JSTaggedValue(10)); + ecmaRuntimeCallInfo1->SetCallArg(2, target1); // 2 means the unregisterToken arg + + [[maybe_unused]] auto prev1 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + BuiltinsFinalizationRegistry::Register(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev1); + } + vm->CollectGarbage(TriggerGCType::FULL_GC); + if (!thread->HasPendingException()) { + job::MicroJobQueue::ExecutePendingJob(thread, vm->GetJSThread()->GetCurrentEcmaContext()->GetMicroJobQueue()); + } + vm->SetEnableForceGC(true); + ASSERT_EQ(testValue, 2); +} + +// finalizationRegistry.Unregister(unregisterToken) unregisterToken Symbol +HWTEST_F_L0(BuiltinsFinalizationRegistryTest, UnregisterTokenSymbol) +{ + testValue = 0; + EcmaVM *vm = thread->GetEcmaVM(); + + JSTaggedValue result = CreateFinalizationRegistryConstructor(thread); + JSHandle jsfinalizationRegistry(thread, result); + vm->SetEnableForceGC(false); + JSTaggedValue target = JSTaggedValue::Undefined(); + { + [[maybe_unused]] EcmaHandleScope handleScope(thread); + JSHandle symbol = thread->GetEcmaVM()->GetFactory()->NewJSSymbol(); + target = symbol.GetTaggedValue(); + auto ecmaRuntimeCallInfo = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 10); // 10 means 3 call args + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo->SetThis(jsfinalizationRegistry.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(0, target); + ecmaRuntimeCallInfo->SetCallArg(1, JSTaggedValue(10)); + ecmaRuntimeCallInfo->SetCallArg(2, target); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); + BuiltinsFinalizationRegistry::Register(ecmaRuntimeCallInfo); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call args + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(jsfinalizationRegistry.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, target); + + BuiltinsFinalizationRegistry::Unregister(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + } + vm->CollectGarbage(TriggerGCType::FULL_GC); + if (!thread->HasPendingException()) { + job::MicroJobQueue::ExecutePendingJob(thread, vm->GetJSThread()->GetCurrentEcmaContext()->GetMicroJobQueue()); + } + vm->SetEnableForceGC(true); + ASSERT_EQ(testValue, 0); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_global_test.cpp b/ecmascript/builtins/tests/builtins_global_test.cpp index 9ed7dc1ed1157eb7324ed7a58f9da74904da54be..b6d4bd2c6d10b3fc471b1c3dce786fe39cd80a4c 100644 --- a/ecmascript/builtins/tests/builtins_global_test.cpp +++ b/ecmascript/builtins/tests/builtins_global_test.cpp @@ -120,4 +120,98 @@ HWTEST_F_L0(BuiltinsGlobalTest, CallJsProxy) EXPECT_EQ(result, JSTaggedValue::Undefined()); thread->ClearException(); } + +HWTEST_F_L0(BuiltinsGlobalTest, Escape) +{ + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle str1 = factory->NewFromASCII("?!=()#%&"); + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // NOLINT, 6 means 3 paras + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetCallArg(0, str1.GetTaggedValue()); + + [[maybe_unused]] auto prev1 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result1 = BuiltinsGlobal::Escape(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev1); + EXPECT_TRUE(result1.IsString()); + JSHandle ecmaStrHandle1(thread, result1); + EXPECT_STREQ("%3F%21%3D%28%29%23%25%26", EcmaStringAccessor(ecmaStrHandle1).ToCString().c_str()); // NOLINT + + JSHandle str2 = factory->NewFromASCII("%u%u0%u9%ua%uF%u00%u09%u0f%u0F%u000%u00a%u00F"); + auto ecmaRuntimeCallInfo2 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // NOLINT + ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetThis(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetCallArg(0, str2.GetTaggedValue()); // NOLINT + + [[maybe_unused]] auto prev2 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); + JSTaggedValue result2 = BuiltinsGlobal::Escape(ecmaRuntimeCallInfo2); + TestHelper::TearDownFrame(thread, prev2); + EXPECT_TRUE(result2.IsString()); + JSHandle ecmaStrHandle2(thread, result2); + EXPECT_STREQ("%25u%25u0%25u9%25ua%25uF%25u00%25u09%25u0f%25u0F%25u000%25u00a%25u00F", // NOLINT special value + EcmaStringAccessor(ecmaStrHandle2).ToCString().c_str()); + + JSHandle str3 = factory->NewFromASCII("Hello World!"); + auto ecmaRuntimeCallInfo3 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // NOLINT + ecmaRuntimeCallInfo3->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo3->SetThis(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo3->SetCallArg(0, str3.GetTaggedValue()); + + [[maybe_unused]] auto prev3 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo3); + JSTaggedValue result3 = BuiltinsGlobal::Escape(ecmaRuntimeCallInfo3); + TestHelper::TearDownFrame(thread, prev3); + EXPECT_TRUE(result3.IsString()); + JSHandle ecmaStrHandle3(thread, result3); + EXPECT_STREQ("Hello%20World%21", EcmaStringAccessor(ecmaStrHandle3).ToCString().c_str()); +} + +HWTEST_F_L0(BuiltinsGlobalTest, Unescape) +{ + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle str1 = factory->NewFromASCII(""); + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // NOLINT + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetCallArg(0, str1.GetTaggedValue()); + + [[maybe_unused]] auto prev1 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result1 = BuiltinsGlobal::Unescape(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev1); + EXPECT_TRUE(result1.IsString()); + JSHandle ecmaStrHandle1(thread, result1); + EXPECT_STREQ("", EcmaStringAccessor(ecmaStrHandle1).ToCString().c_str()); + + JSHandle str2 = factory->NewFromASCII("%u%u0%u9%ua%uF%u00%u09%u0f%u0F%u000%u00a%u00F"); + auto ecmaRuntimeCallInfo2 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // NOLINT + ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetThis(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetCallArg(0, str2.GetTaggedValue()); // NOLINT + + [[maybe_unused]] auto prev2 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); + JSTaggedValue result2 = BuiltinsGlobal::Unescape(ecmaRuntimeCallInfo2); + TestHelper::TearDownFrame(thread, prev2); + EXPECT_TRUE(result2.IsString()); + JSHandle ecmaStrHandle2(thread, result2); + EXPECT_STREQ("%u%u0%u9%ua%uF%u00%u09%u0f%u0F%u000%u00a%u00F", + EcmaStringAccessor(ecmaStrHandle2).ToCString().c_str()); + + JSHandle str3 = factory->NewFromASCII("Hello%20World%21"); + auto ecmaRuntimeCallInfo3 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // NOLINT 6 means 3 paras + ecmaRuntimeCallInfo3->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo3->SetThis(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo3->SetCallArg(0, str3.GetTaggedValue()); + + [[maybe_unused]] auto prev3 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo3); + JSTaggedValue result3 = BuiltinsGlobal::Escape(ecmaRuntimeCallInfo3); + TestHelper::TearDownFrame(thread, prev3); + EXPECT_TRUE(result3.IsString()); + JSHandle ecmaStrHandle3(thread, result3); + EXPECT_STREQ("Hello%2520World%2521", EcmaStringAccessor(ecmaStrHandle3).ToCString().c_str()); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_math_test.cpp b/ecmascript/builtins/tests/builtins_math_test.cpp index 5e8b7fe5df5f8ea892fd87b6f8fac95919e79e84..c7a897c573d67241efe8b50f25c4e23b7831b246 100644 --- a/ecmascript/builtins/tests/builtins_math_test.cpp +++ b/ecmascript/builtins/tests/builtins_math_test.cpp @@ -2735,7 +2735,7 @@ HWTEST_F_L0(BuiltinsMathTest, Max_3) [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread_, ecmaRuntimeCallInfo); JSTaggedValue result = BuiltinsMath::Max(ecmaRuntimeCallInfo); TestHelper::TearDownFrame(thread_, prev); - JSTaggedValue expect = BuiltinsBase::GetTaggedDouble(100.0); + JSTaggedValue expect = BuiltinsBase::GetTaggedInt(100); ASSERT_EQ(result.GetRawData(), expect.GetRawData()); } diff --git a/ecmascript/builtins/tests/builtins_regexp_test.cpp b/ecmascript/builtins/tests/builtins_regexp_test.cpp index 79df1902e4e954845a44c1d1cee80c040576c800..6727f3fc6a7bf4ca817e069fe02430cb8d1aec7c 100644 --- a/ecmascript/builtins/tests/builtins_regexp_test.cpp +++ b/ecmascript/builtins/tests/builtins_regexp_test.cpp @@ -657,4 +657,64 @@ HWTEST_F_L0(BuiltinsRegExpTest, RegExpParseCache) RegExpParserCache::CACHE_SIZE, vec).first.IsHole()); ASSERT_TRUE(regExpParserCache->GetCache(*string2, 0, vec).first.IsHole()); } + +HWTEST_F_L0(BuiltinsRegExpTest, FlagD) +{ + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + // invoke RegExpConstructor method + JSHandle pattern1 = factory->NewFromASCII("(?a)"); + JSHandle flags1 = factory->NewFromASCII("gd"); + JSTaggedValue result1 = CreateBuiltinsRegExpObjByPatternAndFlags(thread, pattern1, flags1); + JSHandle result1Handle(thread, result1); + + // invoke GetFlags method + JSHandle flags(factory->NewFromASCII("flags")); + JSHandle flagsResult(JSObject::GetProperty(thread, result1Handle, flags).GetValue()); + JSHandle expectResult = factory->NewFromASCII("dg"); + ASSERT_EQ(EcmaStringAccessor::Compare(instance, JSHandle(flagsResult), expectResult), 0); + + // invoke GetHasIndices method + JSHandle hasIndices(factory->NewFromASCII("hasIndices")); + JSTaggedValue taggedHasIndicesResult = + JSObject::GetProperty(thread, result1Handle, hasIndices).GetValue().GetTaggedValue(); + ASSERT_EQ(taggedHasIndicesResult.GetRawData(), JSTaggedValue::True().GetRawData()); + + JSHandle inputString = factory->NewFromASCII("babcae"); + auto ecmaRuntimeCallInfo = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo->SetThis(result1Handle.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(0, inputString.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); + // invoke Exec method + JSTaggedValue results = BuiltinsRegExp::Exec(ecmaRuntimeCallInfo); + TestHelper::TearDownFrame(thread, prev); + + JSHandle execResult(thread, results); + JSHandle indices(factory->NewFromASCII("indices")); + JSHandle indicesArr = JSObject::GetProperty(thread, execResult, indices).GetValue(); + EXPECT_TRUE(indicesArr->IsJSArray()); + + JSHandle indices0 = JSObject::GetProperty(thread, indicesArr, 0).GetValue(); + EXPECT_TRUE(indices0->IsJSArray()); + // indices[0] [1, 2] + EXPECT_EQ(JSObject::GetProperty(thread, indices0, 0).GetValue()->GetInt(), 1); + EXPECT_EQ(JSObject::GetProperty(thread, indices0, 1).GetValue()->GetInt(), 2); + JSHandle indices1 = JSObject::GetProperty(thread, indicesArr, 1).GetValue(); + EXPECT_TRUE(indices1->IsJSArray()); + // indices[1] [1, 2] + EXPECT_EQ(JSObject::GetProperty(thread, indices1, 0).GetValue()->GetInt(), 1); + EXPECT_EQ(JSObject::GetProperty(thread, indices1, 1).GetValue()->GetInt(), 2); + + JSHandle groups(factory->NewFromASCII("groups")); + JSHandle groupsObj = JSObject::GetProperty(thread, indicesArr, groups).GetValue(); + EXPECT_TRUE(groupsObj->IsJSObject()); + JSHandle groupName(factory->NewFromASCII("groupname")); + JSHandle groupNameArr = JSObject::GetProperty(thread, groupsObj, groupName).GetValue(); + EXPECT_TRUE(groupNameArr->IsJSArray()); + // {groupname: [1,2]]} + EXPECT_EQ(JSObject::GetProperty(thread, groupNameArr, 0).GetValue()->GetInt(), 1); + EXPECT_EQ(JSObject::GetProperty(thread, groupNameArr, 1).GetValue()->GetInt(), 2); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_typedarray_test.cpp b/ecmascript/builtins/tests/builtins_typedarray_test.cpp index 6e0dd96d626065e27f2847151491f33c82d4fa5a..95c62679f4382280e8e2638c4593a0a5880b2608 100644 --- a/ecmascript/builtins/tests/builtins_typedarray_test.cpp +++ b/ecmascript/builtins/tests/builtins_typedarray_test.cpp @@ -44,6 +44,20 @@ namespace panda::test { using Array = ecmascript::builtins::BuiltinsArray; using TypedArray = ecmascript::builtins::BuiltinsTypedArray; using TypedArrayHelper = ecmascript::base::TypedArrayHelper; +constexpr uint32_t ECMA_RUNTIME_CALL_INFO_4 = 4; +constexpr uint32_t ECMA_RUNTIME_CALL_INFO_6 = 6; + +enum class TypeArrayIndex { + TYPED_ARRAY_INDEX_0, + TYPED_ARRAY_INDEX_1, + TYPED_ARRAY_INDEX_2, + TYPED_ARRAY_INDEX_3 +}; +constexpr uint32_t TYPED_ARRAY_LENGTH_3 = 3; +constexpr int32_t INT_VALUE_0 = 0; +constexpr int32_t INT_VALUE_2 = 2; +constexpr int32_t INT_VALUE_4 = 4; +constexpr int32_t INT_VALUE_9 = 9; class BuiltinsTypedArrayTest : public testing::Test { public: @@ -143,6 +157,42 @@ protected: return GetTaggedBoolean(false); } + static JSTaggedValue TestToSortedFunc(EcmaRuntimeCallInfo *argv) + { + uint32_t argc = argv->GetArgsNumber(); + if (argc > 1) { + // x < y + if (GetCallArg(argv, 0)->GetInt() < GetCallArg(argv, 1)->GetInt()) { + return GetTaggedBoolean(true); + } + } + return GetTaggedBoolean(false); + } + + static JSTaggedValue TestFindLastFunc(EcmaRuntimeCallInfo *argv) + { + uint32_t argc = argv->GetArgsNumber(); + if (argc > 0) { + // 20 : test case + if (GetCallArg(argv, 0)->GetInt() > 20) { + return GetTaggedBoolean(true); + } + } + return GetTaggedBoolean(false); + } + + static JSTaggedValue TestFindLastIndexFunc(EcmaRuntimeCallInfo *argv) + { + uint32_t argc = argv->GetArgsNumber(); + if (argc > 0) { + // 20 : test case + if (GetCallArg(argv, 0)->GetInt() > 20) { + return GetTaggedBoolean(true); + } + } + return GetTaggedBoolean(false); + } + static JSTaggedValue TestReduceFunc(EcmaRuntimeCallInfo *argv) { int accumulator = GetCallArg(argv, 0)->GetInt(); @@ -350,4 +400,219 @@ HWTEST_F_L0(BuiltinsTypedArrayTest, At) ASSERT_TRUE(result.IsUndefined()); } + +HWTEST_F_L0(BuiltinsTypedArrayTest, ToReversed) +{ + ASSERT_NE(thread, nullptr); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + [[maybe_unused]] JSHandle array(factory->NewTaggedArray(TYPED_ARRAY_LENGTH_3)); + array->Set(thread, static_cast(TypeArrayIndex::TYPED_ARRAY_INDEX_0), JSTaggedValue(INT_VALUE_0)); + array->Set(thread, static_cast(TypeArrayIndex::TYPED_ARRAY_INDEX_1), JSTaggedValue(INT_VALUE_4)); + array->Set(thread, static_cast(TypeArrayIndex::TYPED_ARRAY_INDEX_2), JSTaggedValue(INT_VALUE_9)); + + [[maybe_unused]] JSHandle obj = + JSHandle(thread, CreateTypedArrayFromList(thread, array)); + auto ecmaRuntimeCallInfo1 = TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), + ECMA_RUNTIME_CALL_INFO_4); + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + [[maybe_unused]] JSTaggedValue result = TypedArray::ToReversed(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + auto ecmaRuntimeCallInfo2 = TestHelper::CreateEcmaRuntimeCallInfo(thread, + JSTaggedValue::Undefined(), + ECMA_RUNTIME_CALL_INFO_6); + ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo2->SetCallArg(static_cast(TypeArrayIndex::TYPED_ARRAY_INDEX_0), + JSTaggedValue(INT_VALUE_0)); + prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); + JSTaggedValue value = TypedArray::At(ecmaRuntimeCallInfo2); + TestHelper::TearDownFrame(thread, prev); + ASSERT_EQ(value, JSTaggedValue(INT_VALUE_0)); + + auto ecmaRuntimeCallInfo3 = TestHelper::CreateEcmaRuntimeCallInfo(thread, + JSTaggedValue::Undefined(), + ECMA_RUNTIME_CALL_INFO_6); + ecmaRuntimeCallInfo3->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo3->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo3->SetCallArg(static_cast(TypeArrayIndex::TYPED_ARRAY_INDEX_0), + JSTaggedValue(INT_VALUE_2)); + prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo3); + value = TypedArray::At(ecmaRuntimeCallInfo3); + TestHelper::TearDownFrame(thread, prev); + ASSERT_EQ(value, JSTaggedValue(INT_VALUE_9)); + + auto ecmaRuntimeCallInfo4 = TestHelper::CreateEcmaRuntimeCallInfo(thread, + JSTaggedValue::Undefined(), + ECMA_RUNTIME_CALL_INFO_6); + ecmaRuntimeCallInfo4->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo4->SetThis(result); + ecmaRuntimeCallInfo4->SetCallArg(static_cast(TypeArrayIndex::TYPED_ARRAY_INDEX_0), + JSTaggedValue(INT_VALUE_0)); + prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo4); + value = TypedArray::At(ecmaRuntimeCallInfo4); + TestHelper::TearDownFrame(thread, prev); + ASSERT_EQ(value, JSTaggedValue(INT_VALUE_9)); + auto ecmaRuntimeCallInfo5 = TestHelper::CreateEcmaRuntimeCallInfo(thread, + JSTaggedValue::Undefined(), + ECMA_RUNTIME_CALL_INFO_6); + ecmaRuntimeCallInfo5->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo5->SetThis(result); + ecmaRuntimeCallInfo5->SetCallArg(static_cast(TypeArrayIndex::TYPED_ARRAY_INDEX_0), + JSTaggedValue(INT_VALUE_2)); + prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo5); + value = TypedArray::At(ecmaRuntimeCallInfo5); + TestHelper::TearDownFrame(thread, prev); + ASSERT_EQ(value, JSTaggedValue(INT_VALUE_0)); +} + +HWTEST_F_L0(BuiltinsTypedArrayTest, ToSorted) +{ + ASSERT_NE(thread, nullptr); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + JSHandle array(factory->NewTaggedArray(3)); + // array [10, 8, 30] + array->Set(thread, 0, JSTaggedValue(10)); + array->Set(thread, 1, JSTaggedValue(8)); + array->Set(thread, 2, JSTaggedValue(30)); + + JSHandle obj = JSHandle(thread, CreateTypedArrayFromList(thread, array)); + JSHandle func = factory->NewJSFunction(env, reinterpret_cast(TestClass::TestToSortedFunc)); + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, func.GetTaggedValue()); + + [[maybe_unused]] auto prev1 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result1 = TypedArray::ToSorted(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev1); + + EXPECT_TRUE(result1.IsTypedArray()); + JSHandle resultArr1 = JSHandle(thread, result1); + // [30, 10, 8] + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr1, 0).GetValue()->GetInt(), 30); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr1, 1).GetValue()->GetInt(), 10); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr1, 2).GetValue()->GetInt(), 8); + + auto ecmaRuntimeCallInfo2 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 4); // 4 means 0 call arg + ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetThis(obj.GetTaggedValue()); + + [[maybe_unused]] auto prev2 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); + JSTaggedValue result2 = TypedArray::ToSorted(ecmaRuntimeCallInfo2); + TestHelper::TearDownFrame(thread, prev2); + + EXPECT_TRUE(result2.IsTypedArray()); + JSHandle resultArr2 = JSHandle(thread, result2); + // [8, 10 ,30] + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr2, 0).GetValue()->GetInt(), 8); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr2, 1).GetValue()->GetInt(), 10); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr2, 2).GetValue()->GetInt(), 30); +} + +HWTEST_F_L0(BuiltinsTypedArrayTest, With) +{ + ASSERT_NE(thread, nullptr); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle array(factory->NewTaggedArray(3)); + // array [1, 2, 3] + array->Set(thread, 0, JSTaggedValue(1)); + array->Set(thread, 1, JSTaggedValue(2)); + array->Set(thread, 2, JSTaggedValue(3)); + + JSHandle obj = JSHandle(thread, CreateTypedArrayFromList(thread, array)); + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); // 8 means 2 call args + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, JSTaggedValue(static_cast(-1))); + ecmaRuntimeCallInfo1->SetCallArg(1, JSTaggedValue(static_cast(30))); // with(-1, 30) + + [[maybe_unused]] auto prev1 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result1 = TypedArray::With(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev1); + + EXPECT_TRUE(result1.IsTypedArray()); + JSHandle resultArr1 = JSHandle(thread, result1); + // [1, 2, 30] + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr1, 0).GetValue()->GetInt(), 1); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr1, 1).GetValue()->GetInt(), 2); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr1, 2).GetValue()->GetInt(), 30); + + auto ecmaRuntimeCallInfo2 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); // 8 means 2 call args + ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo2->SetCallArg(0, JSTaggedValue(static_cast(1))); + ecmaRuntimeCallInfo2->SetCallArg(1, JSTaggedValue(static_cast(-100))); // with(1, -100) + + [[maybe_unused]] auto prev2 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); + JSTaggedValue result2 = TypedArray::With(ecmaRuntimeCallInfo2); + TestHelper::TearDownFrame(thread, prev2); + + EXPECT_TRUE(result2.IsTypedArray()); + JSHandle resultArr2 = JSHandle(thread, result2); + // [1, -100, 3] + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr2, 0).GetValue()->GetInt(), 1); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr2, 1).GetValue()->GetInt(), -100); + EXPECT_EQ(JSTypedArray::GetProperty(thread, resultArr2, 2).GetValue()->GetInt(), 3); +} + +HWTEST_F_L0(BuiltinsTypedArrayTest, FindLast) +{ + ASSERT_NE(thread, nullptr); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + JSHandle array(factory->NewTaggedArray(3)); + // array [50, 40, 2] + array->Set(thread, 0, JSTaggedValue(50)); + array->Set(thread, 1, JSTaggedValue(40)); + array->Set(thread, 2, JSTaggedValue(2)); + + JSHandle obj = JSHandle(thread, CreateTypedArrayFromList(thread, array)); + JSHandle func = factory->NewJSFunction(env, reinterpret_cast(TestClass::TestFindLastFunc)); + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, func.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result = TypedArray::FindLast(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_EQ(result.GetRawData(), JSTaggedValue(40).GetRawData()); +} + +HWTEST_F_L0(BuiltinsTypedArrayTest, FindLastIndex) +{ + ASSERT_NE(thread, nullptr); + ObjectFactory *factory = thread->GetEcmaVM()->GetFactory(); + JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + JSHandle array(factory->NewTaggedArray(3)); + // array [50, 40, 30] + array->Set(thread, 0, JSTaggedValue(50)); + array->Set(thread, 1, JSTaggedValue(40)); + array->Set(thread, 2, JSTaggedValue(30)); + + JSHandle obj = JSHandle(thread, CreateTypedArrayFromList(thread, array)); + JSHandle func = factory->NewJSFunction(env, reinterpret_cast(TestClass::TestFindLastFunc)); + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(obj.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, func.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result = TypedArray::FindLastIndex(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_EQ(result.GetRawData(), JSTaggedValue(static_cast(2)).GetRawData()); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_weak_map_test.cpp b/ecmascript/builtins/tests/builtins_weak_map_test.cpp index f3a45779c1be8d1c574034563e2826a5263193ba..ac410b067dcba99b36942bd76ca734b7f9ab5e94 100644 --- a/ecmascript/builtins/tests/builtins_weak_map_test.cpp +++ b/ecmascript/builtins/tests/builtins_weak_map_test.cpp @@ -213,4 +213,55 @@ HWTEST_F_L0(BuiltinsWeakMapTest, DeleteAndRemove) EXPECT_EQ(result4.GetRawData(), JSTaggedValue::False().GetRawData()); } + +HWTEST_F_L0(BuiltinsWeakMapTest, SymbolKey) +{ + // create jsWeakMap + JSHandle weakMap(thread, CreateBuiltinsWeakMap(thread)); + + // add 2 symbol keys + JSTaggedValue lastKey(JSTaggedValue::Undefined()); + for (int i = 0; i < 2; i++) { + JSHandle symbolKey = thread->GetEcmaVM()->GetFactory()->NewJSSymbol(); + JSHandle key(symbolKey); + auto ecmaRuntimeCallInfo = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 8); // 8 means 2 call args + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo->SetThis(weakMap.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(0, key.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(1, JSTaggedValue(static_cast(i))); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); + // set + JSTaggedValue result1 = BuiltinsWeakMap::Set(ecmaRuntimeCallInfo); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_TRUE(result1.IsECMAObject()); + JSWeakMap *jsWeakMap = JSWeakMap::Cast(reinterpret_cast(result1.GetRawData())); + EXPECT_EQ(jsWeakMap->GetSize(), static_cast(i) + 1); + lastKey = key.GetTaggedValue(); + } + + // check whether jsWeakMap can get and delete lastKey + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(weakMap.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, lastKey); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + // get + JSTaggedValue result2 = BuiltinsWeakMap::Get(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + EXPECT_EQ(result2, JSTaggedValue(1)); + + // delete + JSTaggedValue result3 = BuiltinsWeakMap::Delete(ecmaRuntimeCallInfo1); + EXPECT_EQ(result3.GetRawData(), JSTaggedValue::True().GetRawData()); + + // check deleteKey is deleted + JSTaggedValue result4 = BuiltinsWeakMap::Has(ecmaRuntimeCallInfo1); + EXPECT_EQ(result4.GetRawData(), JSTaggedValue::False().GetRawData()); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_weak_ref_test.cpp b/ecmascript/builtins/tests/builtins_weak_ref_test.cpp index 71d1bace2994dfcea9b85839d61db3416676ccf2..5c314ab2b2dd0cf3324d0486179252e6a7d04e7f 100644 --- a/ecmascript/builtins/tests/builtins_weak_ref_test.cpp +++ b/ecmascript/builtins/tests/builtins_weak_ref_test.cpp @@ -188,4 +188,38 @@ HWTEST_F_L0(BuiltinsWeakRefTest, Deref3) vm->SetEnableForceGC(true); ASSERT_TRUE(!result2.IsUndefined()); } + +// symbol target +HWTEST_F_L0(BuiltinsWeakRefTest, SymbolTarget) +{ + JSHandle env = thread->GetEcmaVM()->GetGlobalEnv(); + JSHandle symbolTarget = thread->GetEcmaVM()->GetFactory()->NewJSSymbol(); + JSHandle target(symbolTarget); + + JSHandle weakRef(env->GetBuiltinsWeakRefFunction()); + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, weakRef.GetTaggedValue(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo1->SetFunction(weakRef.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetThis(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetCallArg(0, target.GetTaggedValue()); + + // constructor + [[maybe_unused]] auto prev1 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + JSTaggedValue result1 = BuiltinsWeakRef::WeakRefConstructor(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev1); + ASSERT_TRUE(result1.IsECMAObject()); + + JSHandle jsWeakRef(thread, JSWeakRef::Cast(reinterpret_cast(result1.GetRawData()))); + auto ecmaRuntimeCallInfo2 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 4); // 4 means 0 call arg + ecmaRuntimeCallInfo2->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo2->SetThis(jsWeakRef.GetTaggedValue()); + + // weakRef.Deref() + [[maybe_unused]] auto prev2 = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo2); + JSTaggedValue result2 = BuiltinsWeakRef::Deref(ecmaRuntimeCallInfo2); + TestHelper::TearDownFrame(thread, prev2); + ASSERT_EQ(result2, target.GetTaggedValue()); +} } // namespace panda::test diff --git a/ecmascript/builtins/tests/builtins_weak_set_test.cpp b/ecmascript/builtins/tests/builtins_weak_set_test.cpp index 38d5eee994b90e6a203a8ed22bc555b51c7a7d8b..61aedcdc35b5ed09b5fbdcbdd3fe43bf21d5fcff 100644 --- a/ecmascript/builtins/tests/builtins_weak_set_test.cpp +++ b/ecmascript/builtins/tests/builtins_weak_set_test.cpp @@ -207,4 +207,54 @@ HWTEST_F_L0(BuiltinsWeakSetTest, DeleteAndRemove) EXPECT_EQ(result4.GetRawData(), JSTaggedValue::False().GetRawData()); } + +HWTEST_F_L0(BuiltinsWeakSetTest, SymbolKey) +{ + // create jsSet + JSHandle weakSet(thread, CreateBuiltinsWeakSet(thread)); + + // add 2 keys + JSTaggedValue lastKey(JSTaggedValue::Undefined()); + for (int i = 0; i < 2; i++) { + JSHandle symbolKey = thread->GetEcmaVM()->GetFactory()->NewJSSymbol(); + JSHandle key(symbolKey); + + auto ecmaRuntimeCallInfo = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo->SetThis(weakSet.GetTaggedValue()); + ecmaRuntimeCallInfo->SetCallArg(0, key.GetTaggedValue()); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo); + // add + JSTaggedValue result1 = BuiltinsWeakSet::Add(ecmaRuntimeCallInfo); + TestHelper::TearDownFrame(thread, prev); + + EXPECT_TRUE(result1.IsECMAObject()); + JSWeakSet *jsWeakSet = JSWeakSet::Cast(reinterpret_cast(result1.GetRawData())); + EXPECT_EQ(jsWeakSet->GetSize(), static_cast(i) + 1); + lastKey = key.GetTaggedValue(); + } + // whether jsWeakSet has delete lastKey + + auto ecmaRuntimeCallInfo1 = + TestHelper::CreateEcmaRuntimeCallInfo(thread, JSTaggedValue::Undefined(), 6); // 6 means 1 call arg + ecmaRuntimeCallInfo1->SetFunction(JSTaggedValue::Undefined()); + ecmaRuntimeCallInfo1->SetThis(weakSet.GetTaggedValue()); + ecmaRuntimeCallInfo1->SetCallArg(0, lastKey); + + [[maybe_unused]] auto prev = TestHelper::SetupFrame(thread, ecmaRuntimeCallInfo1); + // has + JSTaggedValue result2 = BuiltinsWeakSet::Has(ecmaRuntimeCallInfo1); + TestHelper::TearDownFrame(thread, prev); + EXPECT_EQ(result2.GetRawData(), JSTaggedValue::True().GetRawData()); + + // delete + JSTaggedValue result3 = BuiltinsWeakSet::Delete(ecmaRuntimeCallInfo1); + EXPECT_EQ(result3.GetRawData(), JSTaggedValue::True().GetRawData()); + + // check deleteKey is deleted + JSTaggedValue result4 = BuiltinsWeakSet::Has(ecmaRuntimeCallInfo1); + EXPECT_EQ(result4.GetRawData(), JSTaggedValue::False().GetRawData()); +} } // namespace panda::test diff --git a/ecmascript/common.h b/ecmascript/common.h index 7dfb6d04c5f1f8f570038d3b9339065fdf39ab2a..7eb60703ea60780c3b8e87540ac0c3f3e80deef5 100644 --- a/ecmascript/common.h +++ b/ecmascript/common.h @@ -50,6 +50,10 @@ enum class GCReason : uint8_t { OTHER, }; +enum class RequestAotMode : uint8_t { + RE_COMPILE_ON_IDLE = 0 +}; + #define SCOPE_LIST(V) \ V(TotalGC) \ V(Initialize) \ @@ -152,13 +156,13 @@ using Address = uintptr_t; #ifdef PANDA_TARGET_32 #define STATIC_ASSERT_EQ_ARCH32(a, b) static_assert(a == b) #else -#define STATIC_ASSERT_EQ_ARCH32(a, b) +#define STATIC_ASSERT_EQ_ARCH32(a, b) static_assert(true) #endif #ifdef PANDA_TARGET_64 #define STATIC_ASSERT_EQ_ARCH64(a, b) static_assert(a == b) #else -#define STATIC_ASSERT_EQ_ARCH64(a, b) +#define STATIC_ASSERT_EQ_ARCH64(a, b) static_assert(true) #endif #if defined(PANDA_TARGET_WINDOWS) || defined(PANDA_TARGET_MACOS) || defined(PANDA_TARGET_IOS) @@ -168,7 +172,7 @@ using Address = uintptr_t; #endif #define STATIC_ASSERT_EQ_ARCH(expect, valueArch32, valueArch64) \ - STATIC_ASSERT_EQ_ARCH32(expect, valueArch32) \ + STATIC_ASSERT_EQ_ARCH32(expect, valueArch32); \ STATIC_ASSERT_EQ_ARCH64(expect, valueArch64) } // namespace ecmascript } // namespace panda diff --git a/ecmascript/compiler/BUILD.gn b/ecmascript/compiler/BUILD.gn index 886b989359077db1760f5bbf418fa6c411b1ace3..2265f734be5732ff3ba728d82c6ca737629afe8e 100644 --- a/ecmascript/compiler/BUILD.gn +++ b/ecmascript/compiler/BUILD.gn @@ -54,106 +54,139 @@ config("include_llvm") { cflags_cc = [ "-DARK_GC_SUPPORT" ] } -ohos_source_set("libark_jsoptimizer_set") { - stack_protector_ret = false - sources = [ - "access_object_stub_builder.cpp", - "argument_accessor.cpp", - "assembler/aarch64/assembler_aarch64.cpp", - "assembler/aarch64/extend_assembler.cpp", - "assembler/x64/assembler_x64.cpp", - "assembler/x64/extended_assembler_x64.cpp", - "assembler_module.cpp", - "async_function_lowering.cpp", - "bc_call_signature.cpp", - "builtins/builtins_call_signature.cpp", - "builtins/builtins_string_stub_builder.cpp", - "builtins/builtins_stubs.cpp", - "builtins/containers_stub_builder.cpp", - "builtins_lowering.cpp", - "bytecode_circuit_builder.cpp", - "bytecode_info_collector.cpp", - "bytecodes.cpp", - "call_signature.cpp", - "circuit.cpp", - "circuit_builder.cpp", - "common_stubs.cpp", - "compilation_driver.cpp", - "compiler_log.cpp", - "debug_info.cpp", - "early_elimination.cpp", - "file_generators.cpp", - "frame_states.cpp", - "gate.cpp", - "gate_accessor.cpp", - "gate_meta_data.cpp", - "graph_editor.cpp", - "graph_linearizer.cpp", - "graph_visitor.cpp", - "ic_stub_builder.cpp", - "interpreter_stub.cpp", - "later_elimination.cpp", - "lcr_lowering.cpp", - "llvm_codegen.cpp", - "llvm_ir_builder.cpp", - "loop_analysis.cpp", - "loop_peeling.cpp", - "new_object_stub_builder.cpp", - "ntype_hcr_lowering.cpp", - "ntype_mcr_lowering.cpp", - "number_speculative_lowering.cpp", - "number_speculative_retype.cpp", - "number_speculative_runner.cpp", - "operations_stub_builder.cpp", - "pass_manager.cpp", - "profiler_stub_builder.cpp", - "range_analysis.cpp", - "rt_call_signature.cpp", - "scheduler.cpp", - "slowpath_lowering.cpp", - "state_split_linearizer.cpp", - "stub.cpp", - "stub_builder.cpp", - "test_stubs.cpp", - "test_stubs_signature.cpp", - "trampoline/aarch64/asm_interpreter_call.cpp", - "trampoline/aarch64/common_call.cpp", - "trampoline/aarch64/optimized_call.cpp", - "trampoline/aarch64/optimized_fast_call.cpp", - "trampoline/x64/asm_interpreter_call.cpp", - "trampoline/x64/common_call.cpp", - "trampoline/x64/optimized_call.cpp", - "trampoline/x64/optimized_fast_call.cpp", - "ts_class_analysis.cpp", - "ts_hclass_generator.cpp", - "ts_hcr_lowering.cpp", - "ts_inline_lowering.cpp", - "type.cpp", - "type_inference/global_type_infer.cpp", - "type_inference/initialization_analysis.cpp", - "type_inference/method_type_infer.cpp", - "type_inference/pgo_type_infer.cpp", - "type_mcr_lowering.cpp", - "type_recorder.cpp", - "typed_array_stub_builder.cpp", - "value_numbering.cpp", - "verifier.cpp", - ] - - if (enable_local_code_sign) { - sources += [ "$js_root/ecmascript/platform/unix/ohos/code_sign.cpp" ] - } else if (is_mingw) { - sources += [ "$js_root/ecmascript/platform/windows/code_sign.cpp" ] - } else { - sources += [ "$js_root/ecmascript/platform/unix/code_sign.cpp" ] - } - - public_configs = [ - ":include_llvm", - "$js_root:ark_jsruntime_compiler_config", - "$js_root:ark_jsruntime_public_config", +config("include_maple") { + include_dirs = [ + "${MAPLEALL_ROOT}/maple_be/include/cg", + "${MAPLEALL_ROOT}/maple_be/include/litecg", + "${MAPLEALL_ROOT}/maple_be/cg/aarch64", + "${MAPLEALL_ROOT}/maple_be/include/ad", + "${MAPLEALL_ROOT}/maple_be/include/ad/target", + "${MAPLEALL_ROOT}/maple_be/include/be/aarch64", + "${MAPLEALL_ROOT}/maple_be/include/be", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/maple_driver/defs", + "${MAPLEALL_ROOT}/maple_driver/defs/default", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/mempool/include", + "${MAPLEALL_ROOT}/maple_phase/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/maple_me/include", ] + cflags_cc = [ "-Wno-gnu-zero-variadic-macro-arguments" ] +} +libark_jsoptimizer_sources = [ + "access_object_stub_builder.cpp", + "aot_compiler_preprocessor.cpp", + "argument_accessor.cpp", + "array_bounds_check_elimination.cpp", + "assembler/aarch64/assembler_aarch64.cpp", + "assembler/aarch64/extend_assembler.cpp", + "assembler/x64/assembler_x64.cpp", + "assembler/x64/extended_assembler_x64.cpp", + "assembler_module.cpp", + "async_function_lowering.cpp", + "base/depend_chain_helper.cpp", + "bc_call_signature.cpp", + "builtins/builtins_array_stub_builder.cpp", + "builtins/builtins_call_signature.cpp", + "builtins/builtins_collection_stub_builder.cpp", + "builtins/builtins_function_stub_builder.cpp", + "builtins/builtins_number_stub_builder.cpp", + "builtins/builtins_object_stub_builder.cpp", + "builtins/builtins_string_stub_builder.cpp", + "builtins/builtins_stubs.cpp", + "builtins/containers_stub_builder.cpp", + "builtins/linked_hashtable_stub_builder.cpp", + "builtins_lowering.cpp", + "bytecode_circuit_builder.cpp", + "bytecode_info_collector.cpp", + "bytecodes.cpp", + "call_signature.cpp", + "circuit.cpp", + "circuit_builder.cpp", + "circuit_builder_helper.cpp", + "combined_pass_visitor.cpp", + "common_stubs.cpp", + "compilation_driver.cpp", + "compiler_log.cpp", + "constant_folding.cpp", + "dead_code_elimination.cpp", + "debug_info.cpp", + "early_elimination.cpp", + "file_generators.cpp", + "frame_states.cpp", + "gate.cpp", + "gate_accessor.cpp", + "graph_editor.cpp", + "graph_linearizer.cpp", + "hcr_circuit_builder.cpp", + "hcr_gate_meta_data.cpp", + "ic_stub_builder.cpp", + "instruction_combine.cpp", + "interpreter_stub.cpp", + "jit_compiler.cpp", + "later_elimination.cpp", + "lcr_circuit_builder.cpp", + "lcr_gate_meta_data.cpp", + "lexical_env_specialization.cpp", + "llvm_codegen.cpp", + "ir_builder.cpp", + "ir_module.cpp", + "llvm_ir_builder.cpp", + "loop_analysis.cpp", + "loop_peeling.cpp", + "mcr_circuit_builder.cpp", + "mcr_gate_meta_data.cpp", + "mcr_lowering.cpp", + "native_inline_lowering.cpp", + "new_object_stub_builder.cpp", + "ntype_bytecode_lowering.cpp", + "ntype_hcr_lowering.cpp", + "number_speculative_lowering.cpp", + "number_speculative_retype.cpp", + "number_speculative_runner.cpp", + "object_access_helper.cpp", + "operations_stub_builder.cpp", + "pass_manager.cpp", + "profiler_stub_builder.cpp", + "range_analysis.cpp", + "range_guard.cpp", + "rt_call_signature.cpp", + "scheduler.cpp", + "share_gate_meta_data.cpp", + "slowpath_lowering.cpp", + "state_split_linearizer.cpp", + "stub.cpp", + "stub_builder.cpp", + "trampoline/aarch64/asm_interpreter_call.cpp", + "trampoline/aarch64/common_call.cpp", + "trampoline/aarch64/optimized_call.cpp", + "trampoline/aarch64/optimized_fast_call.cpp", + "trampoline/x64/asm_interpreter_call.cpp", + "trampoline/x64/common_call.cpp", + "trampoline/x64/optimized_call.cpp", + "trampoline/x64/optimized_fast_call.cpp", + "ts_class_analysis.cpp", + "ts_hclass_generator.cpp", + "ts_hcr_opt_pass.cpp", + "ts_inline_lowering.cpp", + "type.cpp", + "type_bytecode_lowering.cpp", + "type_hcr_lowering.cpp", + "type_inference/global_type_infer.cpp", + "type_inference/initialization_analysis.cpp", + "type_inference/method_type_infer.cpp", + "type_inference/pgo_type_infer.cpp", + "type_inference/pgo_type_infer_helper.cpp", + "type_recorder.cpp", + "typed_array_stub_builder.cpp", + "value_numbering.cpp", + "verifier.cpp", +] + +config("libark_jsoptimizer_set_config") { if (compile_llvm_online) { lib_dirs = [ "//third_party/third_party_llvm-project/build/lib" ] } else { @@ -230,16 +263,14 @@ ohos_source_set("libark_jsoptimizer_set") { "LLVMBitWriter", ] + if (!is_mac && !is_ios) { + libs += [ "LLVMParts" ] + } + # Only support compiling aarch64 target at device-side(arm64 platform). - # So these os-related libs of arm and x86 are not needed on arm64 platform. + # So these os-related libs of x86 are not needed on arm64 platform. if (is_mac || current_cpu != "arm64") { libs += [ - "LLVMARMUtils", - "LLVMARMCodeGen", - "LLVMARMDisassembler", - "LLVMARMDesc", - "LLVMARMInfo", - "LLVMARMAsmParser", "LLVMX86AsmParser", "LLVMX86CodeGen", "LLVMX86Desc", @@ -259,9 +290,78 @@ ohos_source_set("libark_jsoptimizer_set") { "windowsapp", ] } +} + +ohos_source_set("libark_jsoptimizer_set") { + stack_protector_ret = false + sources = libark_jsoptimizer_sources + if (enable_local_code_sign) { + sources += [ "$js_root/ecmascript/platform/unix/ohos/code_sign.cpp" ] + } else if (is_mingw) { + sources += [ "$js_root/ecmascript/platform/windows/code_sign.cpp" ] + } else { + sources += [ "$js_root/ecmascript/platform/unix/code_sign.cpp" ] + } + public_configs = [ + ":include_llvm", + "$js_root:ark_jsruntime_compiler_config", + "$js_root:ark_jsruntime_public_config", + ":libark_jsoptimizer_set_config", + ] + + external_deps = [] + deps = [] + if (!is_arkui_x) { + external_deps += [ "runtime_core:arkfile_header_deps" ] + } else { + deps += [ "$ark_root/libpandafile:arkfile_header_deps" ] + } + + # hiviewdfx libraries + external_deps += hiviewdfx_ext_deps + if (enable_local_code_sign) { + external_deps += [ + "code_signature:libcode_sign_utils", + "code_signature:liblocal_code_sign_sdk", + ] + } + deps += hiviewdfx_deps + + part_name = "ets_runtime" + subsystem_name = "arkcompiler" +} + +config("enable_maple_config") { + cflags_cc = [ "-DCOMPILE_MAPLE" ] + cflags_c = [ "-DCOMPILE_MAPLE" ] +} + +ohos_source_set("libark_jsoptimizer_set_with_maple") { + stack_protector_ret = false + sources = libark_jsoptimizer_sources + if (enable_local_code_sign) { + sources += [ "$js_root/ecmascript/platform/unix/ohos/code_sign.cpp" ] + } else if (is_mingw) { + sources += [ "$js_root/ecmascript/platform/windows/code_sign.cpp" ] + } else { + sources += [ "$js_root/ecmascript/platform/unix/code_sign.cpp" ] + } + sources += [ + "litecg_codegen.cpp", + "litecg_ir_builder.cpp", + ] + public_configs = [ + ":include_llvm", + ":include_maple", + "$js_root:ark_jsruntime_compiler_config", + "$js_root:ark_jsruntime_public_config", + ":libark_jsoptimizer_set_config", + ":enable_maple_config", + ] + external_deps = [] deps = [] - if (!is_cross_platform_build) { + if (!is_arkui_x) { external_deps += [ "runtime_core:arkfile_header_deps" ] } else { deps += [ "$ark_root/libpandafile:arkfile_header_deps" ] @@ -309,15 +409,17 @@ ohos_source_set("libark_mock_stub_set") { ohos_shared_library("libark_jsoptimizer") { stack_protector_ret = false deps = [ - ":libark_jsoptimizer_set", + ":libark_jsoptimizer_set_with_maple", "$js_root:libark_jsruntime", + "${MAPLEALL_ROOT}/maple_be:libcg", + "$ark_third_party_root/bounds_checking_function:libsec_shared", ] if (run_with_asan) { defines = [ "RUN_WITH_ASAN" ] } - install_enable = false + install_enable = true if (!is_mingw && !is_mac) { output_extension = "so" @@ -326,29 +428,6 @@ ohos_shared_library("libark_jsoptimizer") { subsystem_name = "arkcompiler" } -ohos_shared_library("libark_jsoptimizer_test") { - stack_protector_ret = false - deps = [ - ":libark_jsoptimizer_set", - "$ark_root/libpandafile:libarkfile_static", - "$js_root:libark_jsruntime_test_set", - ] - - ldflags = [] - if (enable_coverage) { - ldflags += [ "--coverage" ] - cflags_cc = [ "--coverage" ] - } - - if (!ark_standalone_build) { - ldflags += [ "-Wl,--lto-O0" ] - } - install_enable = false - - output_extension = "so" - subsystem_name = "test" -} - ohos_executable("ark_stub_compiler") { sources = [ "stub_compiler.cpp" ] include_dirs = [ "$target_gen_dir" ] @@ -366,7 +445,7 @@ ohos_executable("ark_stub_compiler") { "$js_root:libark_jsruntime_set", ] external_deps = [] - if (!is_cross_platform_build) { + if (!is_arkui_x) { external_deps += [ "runtime_core:libarkfile_static" ] } else { deps += [ "$ark_root/libpandafile:libarkfile_static" ] @@ -399,22 +478,23 @@ ohos_executable("ark_stub_compiler") { ohos_executable("ark_aot_compiler") { sources = [ "aot_compiler.cpp" ] - configs = [ ":include_llvm", + ":include_maple", "$js_root:ark_jsruntime_compiler_config", "$js_root:ark_jsruntime_public_config", ] deps = [ ":lib_ark_builtins.d.abc", + ":libark_jsoptimizer_set_with_maple", ":libark_mock_stub_set", "$js_root:libark_js_intl_set", "$js_root:libark_jsruntime_set", - "$js_root/ecmascript/compiler:libark_jsoptimizer_set", + "${MAPLEALL_ROOT}/maple_be:libcg", ] external_deps = [] - if (!is_cross_platform_build) { + if (!is_arkui_x) { external_deps += [ "runtime_core:arkfile_header_deps", "runtime_core:libarkbase_static", @@ -487,6 +567,11 @@ action("gen_stub_file") { rebase_path(root_out_dir_with_host_toolchain) + "/${icu_subsystem_name}/${icu_part_name}:" + rebase_path(root_out_dir_with_host_toolchain) + "/thirdparty/zlib:" + + rebase_path(root_out_dir_with_host_toolchain) + + "/resourceschedule/frame_aware_sched:" + + rebase_path(root_out_dir_with_host_toolchain) + "/hiviewdfx/hilog:" + + rebase_path(root_out_dir_with_host_toolchain) + + "/thirdparty/bounds_checking_function:" + rebase_path("//prebuilts/clang/ohos/linux-x86_64/llvm/lib/"), ] diff --git a/ecmascript/compiler/access_object_stub_builder.cpp b/ecmascript/compiler/access_object_stub_builder.cpp index 20f67bcec933b8c79b60edab95c47dca690639e2..1ff52d40e6e789f0f728588812be7466fb97ef44 100644 --- a/ecmascript/compiler/access_object_stub_builder.cpp +++ b/ecmascript/compiler/access_object_stub_builder.cpp @@ -15,6 +15,7 @@ #include "ecmascript/compiler/access_object_stub_builder.h" #include "ecmascript/compiler/ic_stub_builder.h" #include "ecmascript/compiler/interpreter_stub-inl.h" +#include "ecmascript/compiler/profiler_stub_builder.h" #include "ecmascript/compiler/rt_call_signature.h" #include "ecmascript/compiler/stub_builder-inl.h" #include "ecmascript/ic/profile_type_info.h" @@ -34,25 +35,19 @@ GateRef AccessObjectStubBuilder::LoadObjByName(GateRef glue, GateRef receiver, G GateRef value = 0; ICStubBuilder builder(this); builder.SetParameters(glue, receiver, profileTypeInfo, value, slotId); - builder.LoadICByName(&result, &tryFastPath, &slowPath, &exit); + builder.LoadICByName(&result, &tryFastPath, &slowPath, &exit, callback); Bind(&tryFastPath); { GateRef propKey = ResolvePropKey(glue, prop, info); - result = GetPropertyByName(glue, receiver, propKey); - Label notHole(env); - Branch(TaggedIsHole(*result), &slowPath, ¬Hole); - Bind(¬Hole); - { - callback.ProfileObjLayoutByLoad(receiver); - Jump(&exit); - } + result = GetPropertyByName(glue, receiver, propKey, callback); + Branch(TaggedIsHole(*result), &slowPath, &exit); } Bind(&slowPath); { GateRef propKey = ResolvePropKey(glue, prop, info); result = CallRuntime(glue, RTSTUB_ID(LoadICByName), { profileTypeInfo, receiver, propKey, IntToTaggedInt(slotId) }); - callback.ProfileObjLayoutByLoad(receiver); + callback.TryPreDump(); Jump(&exit); } Bind(&exit); @@ -75,7 +70,7 @@ GateRef AccessObjectStubBuilder::DeprecatedLoadObjByName(GateRef glue, GateRef r Branch(TaggedIsHeapObject(receiver), &fastPath, &slowPath); Bind(&fastPath); { - result = GetPropertyByName(glue, receiver, propKey); + result = GetPropertyByName(glue, receiver, propKey, ProfileOperation()); Branch(TaggedIsHole(*result), &slowPath, &exit); } Bind(&slowPath); @@ -116,7 +111,7 @@ GateRef AccessObjectStubBuilder::StoreObjByName(GateRef glue, GateRef receiver, GateRef propKey = ResolvePropKey(glue, prop, info); result = CallRuntime(glue, RTSTUB_ID(StoreICByName), { profileTypeInfo, receiver, propKey, value, IntToTaggedInt(slotId) }); - callback.ProfileObjLayoutByStore(receiver); + callback.TryPreDump(); Jump(&exit); } @@ -129,8 +124,8 @@ GateRef AccessObjectStubBuilder::StoreObjByName(GateRef glue, GateRef receiver, GateRef AccessObjectStubBuilder::ResolvePropKey(GateRef glue, GateRef prop, const StringIdInfo &info) { if (jsFunc_ != Circuit::NullGate()) { - GateRef key = LoadObjectFromConstPool(jsFunc_, prop); - return key; + GateRef constpool = GetConstPoolFromFunction(jsFunc_); + return GetStringFromConstPool(glue, constpool, ChangeIntPtrToInt32(prop)); } if (!info.IsValid()) { return prop; @@ -142,7 +137,7 @@ GateRef AccessObjectStubBuilder::ResolvePropKey(GateRef glue, GateRef prop, cons } GateRef AccessObjectStubBuilder::LoadObjByValue(GateRef glue, GateRef receiver, GateRef key, GateRef profileTypeInfo, - GateRef slotId) + GateRef slotId, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -155,16 +150,17 @@ GateRef AccessObjectStubBuilder::LoadObjByValue(GateRef glue, GateRef receiver, GateRef value = 0; ICStubBuilder builder(this); builder.SetParameters(glue, receiver, profileTypeInfo, value, slotId, key); - builder.LoadICByValue(&result, &tryFastPath, &slowPath, &exit); + builder.LoadICByValue(&result, &tryFastPath, &slowPath, &exit, callback); Bind(&tryFastPath); { - result = GetPropertyByValue(glue, receiver, key); + result = GetPropertyByValue(glue, receiver, key, callback); Branch(TaggedIsHole(*result), &slowPath, &exit); } Bind(&slowPath); { result = CallRuntime(glue, RTSTUB_ID(LoadICByValue), { profileTypeInfo, receiver, key, IntToTaggedInt(slotId) }); + callback.TryPreDump(); Jump(&exit); } Bind(&exit); @@ -187,7 +183,7 @@ GateRef AccessObjectStubBuilder::DeprecatedLoadObjByValue(GateRef glue, GateRef Branch(TaggedIsHeapObject(receiver), &fastPath, &slowPath); Bind(&fastPath); { - result = GetPropertyByValue(glue, receiver, key); + result = GetPropertyByValue(glue, receiver, key, ProfileOperation()); Branch(TaggedIsHole(*result), &slowPath, &exit); } Bind(&slowPath); @@ -225,6 +221,7 @@ GateRef AccessObjectStubBuilder::StoreObjByValue(GateRef glue, GateRef receiver, { result = CallRuntime(glue, RTSTUB_ID(StoreICByValue), { profileTypeInfo, receiver, key, value, IntToTaggedInt(slotId) }); + callback.TryPreDump(); Jump(&exit); } Bind(&exit); @@ -234,7 +231,8 @@ GateRef AccessObjectStubBuilder::StoreObjByValue(GateRef glue, GateRef receiver, } GateRef AccessObjectStubBuilder::TryLoadGlobalByName(GateRef glue, GateRef prop, const StringIdInfo &info, - GateRef profileTypeInfo, GateRef slotId) + GateRef profileTypeInfo, GateRef slotId, + ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -264,7 +262,7 @@ GateRef AccessObjectStubBuilder::TryLoadGlobalByName(GateRef glue, GateRef prop, Bind(¬FoundInRecord); { GateRef globalObject = GetGlobalObject(glue); - result = GetGlobalOwnProperty(glue, globalObject, propKey); + result = GetGlobalOwnProperty(glue, globalObject, propKey, callback); Branch(TaggedIsHole(*result), &slowPath, &exit); } } @@ -283,7 +281,8 @@ GateRef AccessObjectStubBuilder::TryLoadGlobalByName(GateRef glue, GateRef prop, } GateRef AccessObjectStubBuilder::TryStoreGlobalByName(GateRef glue, GateRef prop, const StringIdInfo &info, - GateRef value, GateRef profileTypeInfo, GateRef slotId) + GateRef value, GateRef profileTypeInfo, GateRef slotId, + ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -312,7 +311,7 @@ GateRef AccessObjectStubBuilder::TryStoreGlobalByName(GateRef glue, GateRef prop Bind(¬FoundInRecord); { GateRef globalObject = GetGlobalObject(glue); - result = GetGlobalOwnProperty(glue, globalObject, propKey); + result = GetGlobalOwnProperty(glue, globalObject, propKey, callback); Label isFoundInGlobal(env); Label notFoundInGlobal(env); Branch(TaggedIsHole(*result), ¬FoundInGlobal, &isFoundInGlobal); @@ -345,7 +344,7 @@ GateRef AccessObjectStubBuilder::TryStoreGlobalByName(GateRef glue, GateRef prop } GateRef AccessObjectStubBuilder::LoadGlobalVar(GateRef glue, GateRef prop, const StringIdInfo &info, - GateRef profileTypeInfo, GateRef slotId) + GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback) { auto env = GetEnvironment(); Label entry(env); @@ -364,7 +363,7 @@ GateRef AccessObjectStubBuilder::LoadGlobalVar(GateRef glue, GateRef prop, const { GateRef globalObject = GetGlobalObject(glue); GateRef propKey = ResolvePropKey(glue, prop, info); - result = GetGlobalOwnProperty(glue, globalObject, propKey); + result = GetGlobalOwnProperty(glue, globalObject, propKey, callback); Branch(TaggedIsHole(*result), &slowPath, &exit); } Bind(&slowPath); diff --git a/ecmascript/compiler/access_object_stub_builder.h b/ecmascript/compiler/access_object_stub_builder.h index e669596129e6a7a13045bff825d8c172f655a249..3e972b6a35c701fe095da7434a91a692c54d3cd4 100644 --- a/ecmascript/compiler/access_object_stub_builder.h +++ b/ecmascript/compiler/access_object_stub_builder.h @@ -38,16 +38,17 @@ public: GateRef DeprecatedLoadObjByName(GateRef glue, GateRef receiver, GateRef propKey); GateRef StoreObjByName(GateRef glue, GateRef receiver, GateRef prop, const StringIdInfo &info, GateRef value, GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback = ProfileOperation()); - GateRef LoadObjByValue(GateRef glue, GateRef receiver, GateRef key, GateRef profileTypeInfo, GateRef slotId); + GateRef LoadObjByValue(GateRef glue, GateRef receiver, GateRef key, GateRef profileTypeInfo, GateRef slotId, + ProfileOperation callback = ProfileOperation()); GateRef StoreObjByValue(GateRef glue, GateRef receiver, GateRef key, GateRef value, GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback = ProfileOperation()); GateRef DeprecatedLoadObjByValue(GateRef glue, GateRef receiver, GateRef key); GateRef TryLoadGlobalByName(GateRef glue, GateRef prop, const StringIdInfo &info, - GateRef profileTypeInfo, GateRef slotId); + GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback); GateRef TryStoreGlobalByName(GateRef glue, GateRef prop, const StringIdInfo &info, - GateRef value, GateRef profileTypeInfo, GateRef slotId); + GateRef value, GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback); GateRef LoadGlobalVar(GateRef glue, GateRef prop, const StringIdInfo &info, - GateRef profileTypeInfo, GateRef slotId); + GateRef profileTypeInfo, GateRef slotId, ProfileOperation callback); GateRef StoreGlobalVar(GateRef glue, GateRef prop, const StringIdInfo &info, GateRef value, GateRef profileTypeInfo, GateRef slotId); private: diff --git a/ecmascript/compiler/aot_compiler.cpp b/ecmascript/compiler/aot_compiler.cpp index c213f9e600c2832ad6db5ceb035941e4c05fc531..4bf65750ac4a82a7b95bd3bbb547665ac4ca3385 100644 --- a/ecmascript/compiler/aot_compiler.cpp +++ b/ecmascript/compiler/aot_compiler.cpp @@ -15,111 +15,61 @@ #include #include +#include #include // NOLINTNEXTLINE(modernize-deprecated-headers) #include -#include "ecmascript/compiler/aot_file/aot_file_manager.h" #include "ecmascript/base/string_helper.h" -#include "ecmascript/compiler/pass_manager.h" -#include "ecmascript/compiler/compiler_log.h" +#include "ecmascript/compiler/aot_compiler_preprocessor.h" +#include "ecmascript/compiler/aot_file/aot_file_manager.h" #include "ecmascript/ecma_string.h" -#include "ecmascript/ecma_vm.h" #include "ecmascript/js_runtime_options.h" #include "ecmascript/jspandafile/js_pandafile_manager.h" +#include "ecmascript/jspandafile/program_object.h" #include "ecmascript/log.h" +#include "ecmascript/log_wrapper.h" +#include "ecmascript/module/js_module_manager.h" #include "ecmascript/napi/include/jsnapi.h" +#include "ecmascript/ohos/ohos_pkg_args.h" #include "ecmascript/platform/file.h" namespace panda::ecmascript::kungfu { -std::string GetHelper() -{ - std::string str; - str.append(COMPILER_HELP_HEAD_MSG); - str.append(HELP_OPTION_MSG); - return str; -} - -void AOTInitialize(EcmaVM *vm) +namespace { +void CompileValidFiles(PassManager &passManager, AOTFileGenerator &generator, bool &ret, + const CVector &fileInfos) { - BytecodeStubCSigns::Initialize(); - CommonStubCSigns::Initialize(); - RuntimeStubCSigns::Initialize(); - vm->GetJSThread()->GetCurrentEcmaContext()->GetTSManager()->Initialize(); -} - -JSPandaFile *CreateAndVerifyJSPandaFile(const JSRuntimeOptions &runtimeOptions, const std::string &fileName, EcmaVM *vm) -{ - JSPandaFileManager *jsPandaFileManager = JSPandaFileManager::GetInstance(); - std::shared_ptr jsPandaFile = nullptr; - if (runtimeOptions.IsTargetCompilerMode()) { - std::string hapPath = runtimeOptions.GetHapPath(); - uint32_t offset = runtimeOptions.GetHapAbcOffset(); - uint32_t size = runtimeOptions.GetHapAbcSize(); - if (size == 0) { - LOG_ECMA(ERROR) << "buffer is empty in target compiler mode!"; - return nullptr; - } - std::string realPath; - if (!RealPath(hapPath, realPath, false)) { - LOG_ECMA(ERROR) << "realpath for hap path failed!"; - return nullptr; - } - MemMap fileMapMem = FileMap(realPath.c_str(), FILE_RDONLY, PAGE_PROT_READ); - if (fileMapMem.GetOriginAddr() == nullptr) { - LOG_ECMA(ERROR) << "File mmap failed"; - return nullptr; + for (const AbcFileInfo &fileInfo : fileInfos) { + JSPandaFile *jsPandaFile = fileInfo.jsPandaFile_.get(); + const std::string &extendedFilePath = fileInfo.extendedFilePath_; + LOG_COMPILER(INFO) << "AOT compile: " << extendedFilePath; + generator.SetCurrentCompileFileName(jsPandaFile->GetNormalizedFileDesc()); + if (passManager.Compile(jsPandaFile, extendedFilePath, generator) == false) { + ret = false; + continue; } - uint8_t *buffer = reinterpret_cast(fileMapMem.GetOriginAddr()) + offset; - jsPandaFile = jsPandaFileManager->OpenJSPandaFileFromBuffer(buffer, size, fileName.c_str()); - FileUnMap(fileMapMem); - fileMapMem.Reset(); - } else { - jsPandaFile = jsPandaFileManager->OpenJSPandaFile(fileName.c_str()); } - if (jsPandaFile == nullptr) { - LOG_ECMA(ERROR) << "open file " << fileName << " error"; - return nullptr; - } - - if (!jsPandaFile->IsNewVersion()) { - LOG_COMPILER(ERROR) << "AOT only support panda file with new ISA, while the '" << - fileName << "' file is the old version"; - return nullptr; - } - - jsPandaFileManager->AddJSPandaFileVm(vm, jsPandaFile); - return jsPandaFile.get(); } +} // namespace int Main(const int argc, const char **argv) { auto startTime = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()) .count(); - std::string entrypoint = "init::func_main_0"; LOG_ECMA(DEBUG) << "Print ark_aot_compiler received args:"; for (int i = 0; i < argc; i++) { LOG_ECMA(DEBUG) << argv[i]; } - int newArgc = argc; if (argc < 2) { // 2: at least have two arguments - LOG_COMPILER(ERROR) << GetHelper(); + LOG_COMPILER(ERROR) << AotCompilerPreprocessor::GetHelper(); return -1; } - std::string files = argv[argc - 1]; - if (!base::StringHelper::EndsWith(files, ".abc")) { - LOG_COMPILER(ERROR) << "The last argument must be abc file" << std::endl; - LOG_COMPILER(ERROR) << GetHelper(); - return 1; - } - - newArgc--; JSRuntimeOptions runtimeOptions; - bool retOpt = runtimeOptions.ParseCommand(newArgc, argv); + bool retOpt = runtimeOptions.ParseCommand(argc, argv); if (!retOpt) { - LOG_COMPILER(ERROR) << GetHelper(); + LOG_COMPILER(ERROR) << AotCompilerPreprocessor::GetHelper(); return 1; } @@ -140,56 +90,66 @@ int Main(const int argc, const char **argv) { LocalScope scope(vm); - std::string delimiter = GetFileDelimiter(); - arg_list_t pandaFileNames = base::StringHelper::SplitString(files, delimiter); - - std::string triple = runtimeOptions.GetTargetTriple(); - if (runtimeOptions.GetAOTOutputFile().empty()) { - runtimeOptions.SetAOTOutputFile("aot_file"); - } - std::string outputFileName = runtimeOptions.GetAOTOutputFile(); - size_t optLevel = runtimeOptions.GetOptLevel(); - size_t relocMode = runtimeOptions.GetRelocMode(); - std::string logOption = runtimeOptions.GetCompilerLogOption(); - std::string logMethodsList = runtimeOptions.GetMethodsListForLog(); - bool compilerLogTime = runtimeOptions.IsEnableCompilerLogTime(); - size_t maxAotMethodSize = runtimeOptions.GetMaxAotMethodSize(); - size_t maxMethodsInModule = runtimeOptions.GetCompilerModuleMethods(); - bool isEnableTypeLowering = runtimeOptions.IsEnableTypeLowering(); - bool isEnableEarlyElimination = runtimeOptions.IsEnableEarlyElimination(); - bool isEnableLaterElimination = runtimeOptions.IsEnableLaterElimination(); - bool isEnableValueNumbering = runtimeOptions.IsEnableValueNumbering(); - bool isEnableOptInlining = runtimeOptions.IsEnableOptInlining(); - bool isEnableTypeInfer = isEnableTypeLowering || - vm->GetJSThread()->GetCurrentEcmaContext()->GetTSManager()->AssertTypes(); - bool isEnableOptPGOType = runtimeOptions.IsEnableOptPGOType(); - - PassOptions passOptions(isEnableTypeLowering, isEnableEarlyElimination, isEnableLaterElimination, - isEnableValueNumbering, isEnableTypeInfer, isEnableOptInlining, isEnableOptPGOType); - uint32_t hotnessThreshold = runtimeOptions.GetPGOHotnessThreshold(); - AOTInitialize(vm); - - CompilerLog log(logOption); - log.SetEnableCompilerLogTime(compilerLogTime); - AotMethodLogList logList(logMethodsList); - AOTFileGenerator generator(&log, &logList, vm, triple); - std::string profilerIn(runtimeOptions.GetPGOProfilerPath()); - - if (runtimeOptions.WasSetEntryPoint()) { - entrypoint = runtimeOptions.GetEntryPoint(); + arg_list_t pandaFileNames {}; + std::map> pkgArgsMap; + CompilationOptions cOptions(vm, runtimeOptions); + + CompilerLog log(cOptions.logOption_); + log.SetEnableCompilerLogTime(cOptions.compilerLogTime_); + AotMethodLogList logList(cOptions.logMethodsList_); + PGOProfilerDecoder profilerDecoder; + + AotCompilerPreprocessor cPreprocessor(vm, runtimeOptions, pkgArgsMap, profilerDecoder, pandaFileNames); + if (!cPreprocessor.HandleTargetCompilerMode(cOptions) || + !cPreprocessor.HandlePandaFileNames(argc, argv)) { + return 1; } - PassManager passManager(vm, entrypoint, triple, optLevel, relocMode, &log, &logList, maxAotMethodSize, - maxMethodsInModule, profilerIn, hotnessThreshold, &passOptions); - for (const auto &fileName : pandaFileNames) { - auto extendedFilePath = panda::os::file::File::GetExtendedFilePath(fileName); - LOG_COMPILER(INFO) << "AOT compile: " << extendedFilePath; - JSPandaFile *jsPandaFile = CreateAndVerifyJSPandaFile(runtimeOptions, extendedFilePath, vm); - if (passManager.Compile(jsPandaFile, extendedFilePath, generator) == false) { - ret = false; - continue; - } + profilerDecoder.SetHotnessThreshold(cOptions.hotnessThreshold_); + profilerDecoder.SetInPath(cOptions.profilerIn_); + cPreprocessor.AOTInitialize(); + cPreprocessor.SetShouldCollectLiteralInfo(cOptions, &log); + if (!cPreprocessor.GenerateAbcFileInfos()) { + return 1; } - generator.SaveAOTFile(outputFileName + AOTFileManager::FILE_EXTENSION_AN); + cPreprocessor.GenerateGlobalTypes(cOptions); + cPreprocessor.GeneratePGOTypes(cOptions); + cPreprocessor.SnapshotInitialize(); + ret = cPreprocessor.GetCompilerResult(); + + PassOptions passOptions(cOptions.isEnableArrayBoundsCheckElimination_, + cOptions.isEnableTypeLowering_, + cOptions.isEnableEarlyElimination_, + cOptions.isEnableLaterElimination_, + cOptions.isEnableValueNumbering_, + cOptions.isEnableTypeInfer_, + cOptions.isEnableOptInlining_, + cOptions.isEnableOptPGOType_, + cOptions.isEnableOptTrackField_, + cOptions.isEnableOptLoopPeeling_, + cOptions.isEnableOptOnHeapCheck_, + cOptions.isEnableOptLoopInvariantCodeMotion_, + cOptions.isEnableCollectLiteralInfo_, + cOptions.isEnableOptConstantFolding_, + cOptions.isEnableLexenvSpecialization_, + cOptions.isEnableNativeInline_, + cOptions.isEnableFastModule_); + + PassManager passManager(vm, + cOptions.triple_, + cOptions.optLevel_, + cOptions.relocMode_, + &log, + &logList, + cOptions.maxAotMethodSize_, + cOptions.maxMethodsInModule_, + profilerDecoder, + &passOptions); + + bool isEnableLiteCG = runtimeOptions.IsEnableLiteCG(); + AOTFileGenerator generator(&log, &logList, vm, cOptions.triple_, isEnableLiteCG); + const auto &fileInfos = cPreprocessor.GetAbcFileInfo(); + CompileValidFiles(passManager, generator, ret, fileInfos); + generator.SaveAOTFile(cOptions.outputFileName_ + AOTFileManager::FILE_EXTENSION_AN); generator.SaveSnapshotFile(); log.Print(); } diff --git a/ecmascript/compiler/aot_compiler_preprocessor.cpp b/ecmascript/compiler/aot_compiler_preprocessor.cpp new file mode 100644 index 0000000000000000000000000000000000000000..70e52a1ec2d301768c277151b4513ed3946e2b21 --- /dev/null +++ b/ecmascript/compiler/aot_compiler_preprocessor.cpp @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "ecmascript/compiler/aot_compiler_preprocessor.h" +#include "ecmascript/compiler/pgo_type/pgo_type_parser.h" +#include "ecmascript/jspandafile/program_object.h" +#include "ecmascript/module/js_module_manager.h" +#include "ecmascript/ohos/ohos_pgo_processor.h" +#include "ecmascript/ohos/ohos_pkg_args.h" + +namespace panda::ecmascript::kungfu { +namespace { +constexpr int32_t DEFAULT_OPT_LEVEL = 3; // 3: default opt level +} // namespace +using PGOProfilerManager = pgo::PGOProfilerManager; + +CompilationOptions::CompilationOptions(EcmaVM *vm, JSRuntimeOptions &runtimeOptions) +{ + triple_ = runtimeOptions.GetTargetTriple(); + if (runtimeOptions.GetAOTOutputFile().empty()) { + runtimeOptions.SetAOTOutputFile("aot_file"); + } + outputFileName_ = runtimeOptions.GetAOTOutputFile(); + optLevel_ = runtimeOptions.GetOptLevel(); + relocMode_ = runtimeOptions.GetRelocMode(); + logOption_ = runtimeOptions.GetCompilerLogOption(); + logMethodsList_ = runtimeOptions.GetMethodsListForLog(); + compilerLogTime_ = runtimeOptions.IsEnableCompilerLogTime(); + maxAotMethodSize_ = runtimeOptions.GetMaxAotMethodSize(); + maxMethodsInModule_ = runtimeOptions.GetCompilerModuleMethods(); + hotnessThreshold_ = runtimeOptions.GetPGOHotnessThreshold(); + profilerIn_ = std::string(runtimeOptions.GetPGOProfilerPath()); + needMerge_ = false; + isEnableArrayBoundsCheckElimination_ = runtimeOptions.IsEnableArrayBoundsCheckElimination(); + isEnableTypeLowering_ = runtimeOptions.IsEnableTypeLowering(); + isEnableEarlyElimination_ = runtimeOptions.IsEnableEarlyElimination(); + isEnableLaterElimination_ = runtimeOptions.IsEnableLaterElimination(); + isEnableValueNumbering_ = runtimeOptions.IsEnableValueNumbering(); + isEnableOptInlining_ = runtimeOptions.IsEnableOptInlining(); + isEnableTypeInfer_ = isEnableTypeLowering_ || + vm->GetJSThread()->GetCurrentEcmaContext()->GetTSManager()->AssertTypes(); + isEnableOptPGOType_ = runtimeOptions.IsEnableOptPGOType(); + isEnableOptTrackField_ = runtimeOptions.IsEnableOptTrackField(); + isEnableOptLoopPeeling_ = runtimeOptions.IsEnableOptLoopPeeling(); + isEnableOptOnHeapCheck_ = runtimeOptions.IsEnableOptOnHeapCheck(); + isEnableOptLoopInvariantCodeMotion_ = runtimeOptions.IsEnableOptLoopInvariantCodeMotion(); + isEnableOptConstantFolding_ = runtimeOptions.IsEnableOptConstantFolding(); + isEnableCollectLiteralInfo_ = false; + isEnableLexenvSpecialization_ = runtimeOptions.IsEnableLexenvSpecialization(); + isEnableNativeInline_ = runtimeOptions.IsEnableNativeInline(); + isEnableFastModule_ = runtimeOptions.IsEnableFastModule(); +} + +bool AotCompilerPreprocessor::HandleTargetCompilerMode(CompilationOptions &cOptions) +{ + if (runtimeOptions_.IsTargetCompilerMode()) { + if (!OhosPkgArgs::ParseArgs(*this, cOptions)) { + LOG_COMPILER(ERROR) << GetHelper(); + LOG_COMPILER(ERROR) << "Parse pkg info failed, exit."; + return false; + } + const auto& mainPkgArgs = GetMainPkgArgs(); + if (!mainPkgArgs) { + LOG_COMPILER(ERROR) << "No main pkg args found, exit"; + return false; + } + if (!OhosPgoProcessor::MergeAndRemoveRuntimeAp(cOptions, mainPkgArgs)) { + LOG_COMPILER(ERROR) << "Fusion runtime ap failed, exit"; + return false; + } + HandleTargetModeInfo(cOptions); + } + return true; +} + +void AotCompilerPreprocessor::HandleTargetModeInfo(CompilationOptions &cOptions) +{ + JSRuntimeOptions &vmOpt = vm_->GetJSOptions(); + ASSERT(vmOpt.IsTargetCompilerMode()); + // target need fast compiler mode + vmOpt.SetFastAOTCompileMode(true); + vmOpt.SetOptLevel(DEFAULT_OPT_LEVEL); + cOptions.optLevel_ = DEFAULT_OPT_LEVEL; + vmOpt.SetEnableOptOnHeapCheck(false); + cOptions.isEnableOptOnHeapCheck_ = false; +} + +bool AotCompilerPreprocessor::HandlePandaFileNames(const int argc, const char **argv) +{ + if (runtimeOptions_.GetCompilerPkgJsonInfo().empty() || pkgsArgs_.empty()) { + // if no pkgArgs, last param must be abc file + std::string files = argv[argc - 1]; + if (!base::StringHelper::EndsWith(files, ".abc")) { + LOG_COMPILER(ERROR) << "The last argument must be abc file" << std::endl; + LOG_COMPILER(ERROR) << GetHelper(); + return false; + } + std::string delimiter = GetFileDelimiter(); + pandaFileNames_ = base::StringHelper::SplitString(files, delimiter); + } + return true; +} + +void AotCompilerPreprocessor::AOTInitialize() +{ + BytecodeStubCSigns::Initialize(); + CommonStubCSigns::Initialize(); + RuntimeStubCSigns::Initialize(); + vm_->GetJSThread()->GetCurrentEcmaContext()->GetTSManager()->Initialize(); +} + +void AotCompilerPreprocessor::SetShouldCollectLiteralInfo(CompilationOptions &cOptions, const CompilerLog *log) +{ + TSManager *tsManager = vm_->GetJSThread()->GetCurrentEcmaContext()->GetTSManager(); + cOptions.isEnableCollectLiteralInfo_ = cOptions.isEnableTypeInfer_ && + (profilerDecoder_.IsLoaded() || tsManager->AssertTypes() || log->OutputType()); +} + +bool AotCompilerPreprocessor::GenerateAbcFileInfos() +{ + size_t size = pandaFileNames_.size(); + uint32_t checksum = 0; + for (size_t i = 0; i < size; ++i) { + const auto &fileName = pandaFileNames_.at(i); + auto extendedFilePath = panda::os::file::File::GetExtendedFilePath(fileName); + std::shared_ptr jsPandaFile = CreateAndVerifyJSPandaFile(extendedFilePath); + AbcFileInfo fileInfo(extendedFilePath, jsPandaFile); + if (jsPandaFile == nullptr) { + LOG_COMPILER(ERROR) << "Cannot execute panda file '" << extendedFilePath << "'"; + continue; + } + checksum = jsPandaFile->GetChecksum(); + ResolveModule(jsPandaFile.get(), extendedFilePath); + fileInfos_.emplace_back(fileInfo); + } + + return PGOProfilerManager::MergeApFiles(checksum, profilerDecoder_); +} + +std::shared_ptr AotCompilerPreprocessor::CreateAndVerifyJSPandaFile(const std::string &fileName) +{ + JSPandaFileManager *jsPandaFileManager = JSPandaFileManager::GetInstance(); + std::shared_ptr jsPandaFile = nullptr; + if (runtimeOptions_.IsTargetCompilerMode()) { + auto pkgArgsIter = pkgsArgs_.find(fileName); + if (pkgArgsIter == pkgsArgs_.end()) { + LOG_COMPILER(ERROR) << "Can not find file in ohos pkgs args. file name: " << fileName; + return nullptr; + } + if (!(pkgArgsIter->second->GetJSPandaFile(runtimeOptions_, jsPandaFile))) { + return nullptr; + } + } else { + jsPandaFile = jsPandaFileManager->OpenJSPandaFile(fileName.c_str()); + } + if (jsPandaFile == nullptr) { + LOG_ECMA(ERROR) << "open file " << fileName << " error"; + return nullptr; + } + + if (!jsPandaFile->IsNewVersion()) { + LOG_COMPILER(ERROR) << "AOT only support panda file with new ISA, while the '" << + fileName << "' file is the old version"; + return nullptr; + } + + jsPandaFileManager->AddJSPandaFileVm(vm_, jsPandaFile); + return jsPandaFile; +} + +void AotCompilerPreprocessor::ResolveModule(const JSPandaFile *jsPandaFile, const std::string &fileName) +{ + const auto &recordInfo = jsPandaFile->GetJSRecordInfo(); + JSThread *thread = vm_->GetJSThread(); + ModuleManager *moduleManager = thread->GetCurrentEcmaContext()->GetModuleManager(); + [[maybe_unused]] EcmaHandleScope scope(thread); + for (auto info: recordInfo) { + if (jsPandaFile->IsModule(info.second)) { + auto recordName = info.first; + JSHandle moduleRecord = moduleManager->HostResolveImportedModuleWithMerge(fileName.c_str(), + recordName); + SourceTextModule::Instantiate(thread, moduleRecord); + } + } +} + +void AotCompilerPreprocessor::GenerateGlobalTypes(const CompilationOptions &cOptions) +{ + for (const AbcFileInfo &fileInfo : fileInfos_) { + JSPandaFile *jsPandaFile = fileInfo.jsPandaFile_.get(); + TSManager *tsManager = vm_->GetJSThread()->GetCurrentEcmaContext()->GetTSManager(); + PGOTypeManager *ptManager = vm_->GetJSThread()->GetCurrentEcmaContext()->GetPTManager(); + BytecodeInfoCollector collector(vm_, jsPandaFile, profilerDecoder_, cOptions.maxAotMethodSize_, + cOptions.isEnableCollectLiteralInfo_); + BCInfo &bytecodeInfo = collector.GetBytecodeInfo(); + const PGOBCInfo *bcInfo = collector.GetPGOBCInfo(); + const auto &methodPcInfos = bytecodeInfo.GetMethodPcInfos(); + auto &methodList = bytecodeInfo.GetMethodList(); + for (const auto &method : methodList) { + uint32_t methodOffset = method.first; + tsManager->SetCurConstantPool(jsPandaFile, methodOffset); + CString recordName = MethodLiteral::GetRecordName(jsPandaFile, EntityId(methodOffset)); + auto methodLiteral = jsPandaFile->FindMethodLiteral(methodOffset); + auto &methodInfo = methodList.at(methodOffset); + auto &methodPcInfo = methodPcInfos[methodInfo.GetMethodPcInfoIndex()]; + TypeRecorder typeRecorder(jsPandaFile, methodLiteral, tsManager, recordName, &profilerDecoder_, + methodPcInfo, collector.GetByteCodes(), cOptions.isEnableOptTrackField_); + typeRecorder.BindPgoTypeToGateType(jsPandaFile, tsManager, methodLiteral); + + bcInfo->IterateInfoByType(methodOffset, PGOBCInfo::Type::ARRAY_LITERAL, + [this, tsManager, ptManager, + &recordName]([[maybe_unused]] const uint32_t bcIdx, + [[maybe_unused]] const uint32_t bcOffset, const uint32_t cpIdx) { + JSHandle constpoolHandle(tsManager->GetConstantPool()); + JSThread *thread = vm_->GetJSThread(); + JSTaggedValue arr = + ConstantPool::GetLiteralFromCache( + thread, constpoolHandle.GetTaggedValue(), cpIdx, recordName); + JSHandle arrayHandle(thread, arr); + panda_file::File::EntityId id = + ConstantPool::GetIdFromCache(constpoolHandle.GetTaggedValue(), cpIdx); + ptManager->RecordElements(id, arrayHandle->GetElements()); + }); + } + } +} + +void AotCompilerPreprocessor::GeneratePGOTypes(const CompilationOptions &cOptions) +{ + PGOTypeManager *ptManager = vm_->GetJSThread()->GetCurrentEcmaContext()->GetPTManager(); + for (const AbcFileInfo &fileInfo : fileInfos_) { + JSPandaFile *jsPandaFile = fileInfo.jsPandaFile_.get(); + BytecodeInfoCollector collector(vm_, jsPandaFile, profilerDecoder_, cOptions.maxAotMethodSize_, + cOptions.isEnableCollectLiteralInfo_); + PGOTypeParser parser(profilerDecoder_, ptManager); + parser.CreatePGOType(collector); + } +} + +void AotCompilerPreprocessor::SnapshotInitialize() +{ + PGOTypeManager *ptManager = vm_->GetJSThread()->GetCurrentEcmaContext()->GetPTManager(); + ptManager->InitAOTSnapshot(fileInfos_.size()); +} +} // namespace panda::ecmascript::kungfu \ No newline at end of file diff --git a/ecmascript/compiler/aot_compiler_preprocessor.h b/ecmascript/compiler/aot_compiler_preprocessor.h new file mode 100644 index 0000000000000000000000000000000000000000..d232f3d1fef50605543e1af5a3f054171e9bb9b6 --- /dev/null +++ b/ecmascript/compiler/aot_compiler_preprocessor.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ECMASCRIPT_COMPILER_AOT_COMPILER_PREPROCESSOR_H +#define ECMASCRIPT_COMPILER_AOT_COMPILER_PREPROCESSOR_H + +#include "ecmascript/compiler/pass_manager.h" +#include "ecmascript/ecma_vm.h" +#include "macros.h" + +namespace panda::ecmascript::kungfu { +class OhosPkgArgs; +using PGOProfilerDecoder = pgo::PGOProfilerDecoder; + +struct AbcFileInfo { + explicit AbcFileInfo(std::string extendedFilePath, std::shared_ptr jsPandaFile) + : extendedFilePath_(extendedFilePath), jsPandaFile_(jsPandaFile) {} + ~AbcFileInfo() = default; + + std::string extendedFilePath_; + std::shared_ptr jsPandaFile_; +}; + +struct CompilationOptions { + explicit CompilationOptions(EcmaVM *vm, JSRuntimeOptions &runtimeOptions); + + std::string triple_; + std::string outputFileName_; + size_t optLevel_; + size_t relocMode_; + std::string logOption_; + std::string logMethodsList_; + bool compilerLogTime_; + size_t maxAotMethodSize_; + size_t maxMethodsInModule_; + uint32_t hotnessThreshold_; + std::string profilerIn_; + bool needMerge_; + bool isEnableArrayBoundsCheckElimination_; + bool isEnableTypeLowering_; + bool isEnableEarlyElimination_; + bool isEnableLaterElimination_; + bool isEnableValueNumbering_; + bool isEnableOptInlining_; + bool isEnableTypeInfer_; + bool isEnableOptPGOType_; + bool isEnableOptTrackField_; + bool isEnableOptLoopPeeling_; + bool isEnableOptOnHeapCheck_; + bool isEnableOptLoopInvariantCodeMotion_; + bool isEnableCollectLiteralInfo_; + bool isEnableOptConstantFolding_; + bool isEnableLexenvSpecialization_; + bool isEnableNativeInline_; + bool isEnablePGOHCRLowering_; + bool isEnableFastModule_; +}; + +class AotCompilerPreprocessor { +public: + AotCompilerPreprocessor(EcmaVM *vm, JSRuntimeOptions &runtimeOptions, + std::map> &pkgsArgs, + PGOProfilerDecoder &profilerDecoder, arg_list_t &pandaFileNames) + : vm_(vm), + runtimeOptions_(runtimeOptions), + pkgsArgs_(pkgsArgs), + profilerDecoder_(profilerDecoder), + pandaFileNames_(pandaFileNames) {}; + + ~AotCompilerPreprocessor() = default; + + bool HandleTargetCompilerMode(CompilationOptions &cOptions); + + bool HandlePandaFileNames(const int argc, const char **argv); + + void AOTInitialize(); + + void SetShouldCollectLiteralInfo(CompilationOptions &cOptions, const CompilerLog *log); + + bool GenerateAbcFileInfos(); + + void GenerateGlobalTypes(const CompilationOptions &cOptions); + + void GeneratePGOTypes(const CompilationOptions &cOptions); + + void SnapshotInitialize(); + + bool GetCompilerResult() + { + // The size of fileInfos is not equal to pandaFiles size, set compiler result to false + return fileInfos_.size() == pandaFileNames_.size(); + } + + const CVector& GetAbcFileInfo() const + { + return fileInfos_; + } + + std::shared_ptr GetMainPkgArgs() const + { + if (pkgsArgs_.empty()) { + return nullptr; + } + return pkgsArgs_.at(mainPkgName_); + } + + const std::map> &GetPkgsArgs() const + { + return pkgsArgs_; + } + + static std::string GetHelper() + { + std::string str; + str.append(COMPILER_HELP_HEAD_MSG); + str.append(HELP_OPTION_MSG); + return str; + } + +private: + NO_COPY_SEMANTIC(AotCompilerPreprocessor); + NO_MOVE_SEMANTIC(AotCompilerPreprocessor); + void HandleTargetModeInfo(CompilationOptions &cOptions); + + std::shared_ptr CreateAndVerifyJSPandaFile(const std::string &fileName); + + void ResolveModule(const JSPandaFile *jsPandaFile, const std::string &fileName); + + EcmaVM *vm_; + JSRuntimeOptions &runtimeOptions_; + std::map> &pkgsArgs_; + std::string mainPkgName_; + PGOProfilerDecoder &profilerDecoder_; + arg_list_t &pandaFileNames_; + CVector fileInfos_; + friend class OhosPkgArgs; +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_AOT_COMPILER_PREPROCESSOR_H diff --git a/ecmascript/compiler/aot_file/an_file_data_manager.cpp b/ecmascript/compiler/aot_file/an_file_data_manager.cpp index 723dfacfe58247c9c83f8548b6135fee6e00acfc..609a6abc20a4f633caaef5206202b5989bf1e31a 100644 --- a/ecmascript/compiler/aot_file/an_file_data_manager.cpp +++ b/ecmascript/compiler/aot_file/an_file_data_manager.cpp @@ -15,6 +15,7 @@ #include "ecmascript/compiler/aot_file/an_file_data_manager.h" #include "ecmascript/js_file_path.h" #include "ecmascript/platform/file.h" +#include namespace panda::ecmascript { AnFileDataManager *AnFileDataManager::GetInstance() @@ -38,7 +39,7 @@ void AnFileDataManager::DestroyFileMapMem(MemMap &fileMapMem) void AnFileDataManager::SafeDestroyAllData() { - os::memory::WriteLockHolder lock(lock_); + WriteLockHolder lock(lock_); if (loadedStub_ != nullptr) { ExecutedMemoryAllocator::DestroyBuf(loadedStub_->GetStubsMem()); loadedStub_ = nullptr; @@ -53,7 +54,7 @@ void AnFileDataManager::SafeDestroyAllData() void AnFileDataManager::SafeDestroyAnData(const std::string &fileName) { - os::memory::WriteLockHolder lock(lock_); + WriteLockHolder lock(lock_); std::string anBasename = JSFilePath::GetBaseName(fileName); auto index = UnSafeGetFileInfoIndex(anBasename); if (index == INVALID_INDEX) { @@ -65,7 +66,7 @@ void AnFileDataManager::SafeDestroyAnData(const std::string &fileName) bool AnFileDataManager::SafeLoad(const std::string &fileName, Type type) { - os::memory::WriteLockHolder lock(lock_); + WriteLockHolder lock(lock_); if (type == Type::STUB) { if (loadedStub_ != nullptr) { return true; @@ -80,6 +81,12 @@ bool AnFileDataManager::SafeLoad(const std::string &fileName, Type type) } } +bool AnFileDataManager::SafeJITLoad(const std::string &fileName, uint64_t* result) +{ + WriteLockHolder lock(lock_); + return UnsafeLoadFromJIT(fileName, result); +} + std::shared_ptr AnFileDataManager::UnsafeFind(const std::string &fileName) const { // note: This method is not thread-safe @@ -122,6 +129,20 @@ bool AnFileDataManager::UnsafeLoadFromAOT(const std::string &fileName) return true; } +bool AnFileDataManager::UnsafeLoadFromJIT(const std::string &fileName, uint64_t* result) +{ + // note: This method is not thread-safe + // need to ensure that the instance of AnFileDataManager has been locked before use + std::shared_ptr info = std::make_shared(AnFileInfo()); + if (!info->LoadToCodeCache(fileName,result)) { + return false; + } + // std::string anBasename = JSFilePath::GetBaseName(fileName); + // jitFileNameToIndexMap_.insert({anBasename, loadedAn_.size()}); + loadedJit_.emplace_back(info); + return true; +} + uint32_t AnFileDataManager::UnSafeGetFileInfoIndex(const std::string &fileName) { auto iter = anFileNameToIndexMap_.find(fileName); @@ -133,19 +154,19 @@ uint32_t AnFileDataManager::UnSafeGetFileInfoIndex(const std::string &fileName) uint32_t AnFileDataManager::SafeGetFileInfoIndex(const std::string &fileName) { - os::memory::ReadLockHolder lock(lock_); + ReadLockHolder lock(lock_); return UnSafeGetFileInfoIndex(fileName); } std::shared_ptr AnFileDataManager::SafeGetAnFileInfo(uint32_t index) { - os::memory::ReadLockHolder lock(lock_); + ReadLockHolder lock(lock_); return UnSafeGetAnFileInfo(index); } std::shared_ptr AnFileDataManager::SafeGetStubFileInfo() { - os::memory::ReadLockHolder lock(lock_); + ReadLockHolder lock(lock_); return loadedStub_; } @@ -165,7 +186,7 @@ bool AnFileDataManager::SafeTryReadLock() bool AnFileDataManager::SafeInsideStub(uintptr_t pc) { - os::memory::ReadLockHolder lock(lock_); + ReadLockHolder lock(lock_); if (loadedStub_ == nullptr) { LOG_COMPILER(ERROR) << "SafeInsideStub: The stub file is not loaded."; return false; @@ -189,7 +210,7 @@ bool AnFileDataManager::SafeInsideStub(uintptr_t pc) bool AnFileDataManager::SafeInsideAOT(uintptr_t pc) { - os::memory::ReadLockHolder lock(lock_); + ReadLockHolder lock(lock_); for (auto &info : loadedAn_) { const std::vector &des = info->GetCodeUnits(); for (const auto &curDes : des) { @@ -203,7 +224,7 @@ bool AnFileDataManager::SafeInsideAOT(uintptr_t pc) AOTFileInfo::CallSiteInfo AnFileDataManager::SafeCalCallSiteInfo(uintptr_t retAddr) { - os::memory::ReadLockHolder lock(lock_); + ReadLockHolder lock(lock_); AOTFileInfo::CallSiteInfo callsiteInfo; bool ans = false; @@ -220,6 +241,14 @@ AOTFileInfo::CallSiteInfo AnFileDataManager::SafeCalCallSiteInfo(uintptr_t retAd return callsiteInfo; } } + + // jit + for (auto &info : loadedJit_) { + ans = info->CalCallSiteInfo(retAddr, callsiteInfo); + if(ans) { + return callsiteInfo; + } + } return callsiteInfo; } } // namespace panda::ecmascript diff --git a/ecmascript/compiler/aot_file/an_file_data_manager.h b/ecmascript/compiler/aot_file/an_file_data_manager.h index 555e7c9fe7f6bde0ba64f357f27a44d2b2894c3e..f2912a4d42cdc03df04d808088b5f6752fea950b 100644 --- a/ecmascript/compiler/aot_file/an_file_data_manager.h +++ b/ecmascript/compiler/aot_file/an_file_data_manager.h @@ -25,12 +25,14 @@ public: enum class Type : uint8_t { STUB = 0, AOT, + JIT, }; static AnFileDataManager *GetInstance(); ~AnFileDataManager(); bool SafeLoad(const std::string &fileName, Type type); + bool SafeJITLoad(const std::string &fileName, uint64_t* result); uint32_t SafeGetFileInfoIndex(const std::string &fileName); std::shared_ptr SafeGetAnFileInfo(uint32_t index); std::shared_ptr SafeGetStubFileInfo(); @@ -69,6 +71,7 @@ private: AnFileDataManager() = default; std::shared_ptr UnsafeFind(const std::string &fileName) const; bool UnsafeLoadFromAOT(const std::string &fileName); + bool UnsafeLoadFromJIT(const std::string &fileName, uint64_t* result); bool UnsafeLoadFromStub(); uint32_t UnSafeGetFileInfoIndex(const std::string &fileName); std::shared_ptr UnSafeGetAnFileInfo(uint32_t index) @@ -76,9 +79,12 @@ private: return loadedAn_.at(index); } - os::memory::RWLock lock_ {}; + RWLock lock_ {}; std::unordered_map anFileNameToIndexMap_ {}; + std::unordered_map jitFileNameToIndexMap_ {}; std::vector> loadedAn_ {}; + std::vector> loadedJit_ {}; + std::shared_ptr loadedStub_ {nullptr}; std::string anDir_; bool anEnable_ {false}; diff --git a/ecmascript/compiler/aot_file/an_file_info.cpp b/ecmascript/compiler/aot_file/an_file_info.cpp index 5d677e11e0c7aa9bc50a23a3d397f57577634283..a6a219d57111231aeab81846b0ada923b101a782 100644 --- a/ecmascript/compiler/aot_file/an_file_info.cpp +++ b/ecmascript/compiler/aot_file/an_file_info.cpp @@ -16,6 +16,8 @@ #include "ecmascript/compiler/aot_file/an_file_info.h" #include +#include +#include #include "ecmascript/compiler/aot_file/aot_version.h" #include "ecmascript/compiler/aot_file/elf_builder.h" #include "ecmascript/compiler/aot_file/elf_reader.h" @@ -23,6 +25,7 @@ #include "ecmascript/js_file_path.h" #include "ecmascript/log.h" #include "ecmascript/platform/file.h" +#include "macros.h" namespace panda::ecmascript { void AnFileInfo::Save(const std::string &filename, Triple triple) @@ -40,13 +43,97 @@ void AnFileInfo::Save(const std::string &filename, Triple triple) ElfBuilder builder(des_, GetDumpSectionNames()); llvm::ELF::Elf64_Ehdr header; - builder.PackELFHeader(header, base::FileHeader::ToVersionNumber(AOTFileVersion::AN_VERSION), triple); + builder.PackELFHeader(header, base::FileHeaderBase::ToVersionNumber(AOTFileVersion::AN_VERSION), triple); file.write(reinterpret_cast(&header), sizeof(llvm::ELF::Elf64_Ehdr)); builder.PackELFSections(file); builder.PackELFSegment(file); file.close(); } +bool AnFileInfo::LoadToCodeCache(const std::string &filename, uint64_t* result) +{ + std::string realPath; + if (!RealPath(filename, realPath, false)) { + LOG_COMPILER(ERROR) << "Can not load aot file from path [ " << filename << " ], " + << "please execute ark_aot_compiler with options --aot-file."; + return false; + } + + fileMapMem_ = FileMap(realPath.c_str(), FILE_RDONLY, PAGE_PROT_READ); + if (fileMapMem_.GetOriginAddr() == nullptr) { + LOG_ECMA(ERROR) << "File mmap failed"; + return false; + } + ExecutedMemoryAllocator::ExeMem code_cache; + ExecutedMemoryAllocator::AllocateBuf(fileMapMem_.GetSize(), code_cache); + void *src = fileMapMem_.GetOriginAddr(); // Source address in memory-mapped file + size_t src_size = fileMapMem_.GetSize(); // Size of the memory-mapped file content + // Assuming you have the start address and size of the allocated code cache: + void *dest = code_cache.addr_; // Destination address in code cache + size_t dest_size = code_cache.size_; // Size of the allocated code cache + // Check if the code cache buffer is large enough to hold the memory-mapped file content + if (dest_size >= src_size) { + // Perform the memory copy + memcpy(dest, src, src_size); + // Set the result pointer to the start of the copied content + *result = reinterpret_cast(dest); + } else { + // If the buffer is not large enough, log an error + LOG_ECMA(ERROR) << "Allocated buffer for code cache is too small"; + return false; + } + PageProtect(code_cache.addr_, code_cache.size_, PAGE_PROT_EXEC_READ); + + + // Parse AN File + moduleNum_ = 1; + des_.resize(moduleNum_); + ModuleSectionDes &des = des_[0]; + + ElfReader reader(fileMapMem_); + std::vector secs = GetDumpSectionNames(); + if (!reader.VerifyELFHeader(base::FileHeaderBase::ToVersionNumber(AOTFileVersion::AN_VERSION), + AOTFileVersion::AN_STRICT_MATCH)) { + return false; + } + reader.ParseELFSections(des, secs); + if (!reader.ParseELFSegment()) { + LOG_ECMA(ERROR) << "modify mmap area permission failed"; + return false; + } + + // ParseFunctionEntrySection + uint64_t secAddr = des.GetSecAddr(ElfSecName::ARK_FUNCENTRY); + uint32_t secSize = des.GetSecSize(ElfSecName::ARK_FUNCENTRY); + FuncEntryDes *entryDes = reinterpret_cast(secAddr); + entryNum_ = secSize / sizeof(FuncEntryDes); + entries_.assign(entryDes, entryDes + entryNum_); + des.SetStartIndex(0); + des.SetFuncCount(entryNum_); + // UpdateFuncEntries(); + //ASSERT(entries_.size() == 1); + std::cout << "entries size:" << entries_.size() <(funcDes.codeAddr_); +#endif + } + } + + return true; +} + + bool AnFileInfo::Load(const std::string &filename) { std::string realPath; @@ -55,6 +142,10 @@ bool AnFileInfo::Load(const std::string &filename) << "please execute ark_aot_compiler with options --aot-file."; return false; } + if (!FileExist(realPath.c_str())) { + LOG_ECMA(WARN) << "File not exist. file: " << realPath; + return false; + } fileMapMem_ = FileMap(realPath.c_str(), FILE_RDONLY, PAGE_PROT_READ); if (fileMapMem_.GetOriginAddr() == nullptr) { @@ -69,7 +160,7 @@ bool AnFileInfo::Load(const std::string &filename) ElfReader reader(fileMapMem_); std::vector secs = GetDumpSectionNames(); - if (!reader.VerifyELFHeader(base::FileHeader::ToVersionNumber(AOTFileVersion::AN_VERSION), + if (!reader.VerifyELFHeader(base::FileHeaderBase::ToVersionNumber(AOTFileVersion::AN_VERSION), AOTFileVersion::AN_STRICT_MATCH)) { return false; } @@ -180,4 +271,16 @@ void AnFileInfo::AddFuncEntrySec() uint32_t funcEntrySize = sizeof(FuncEntryDes) * entryNum_; des.SetSecAddrAndSize(ElfSecName::ARK_FUNCENTRY, funcEntryAddr, funcEntrySize); } + +void AnFileInfo::GenerateMethodToEntryIndexMap() +{ + const std::vector &entries = GetStubs(); + uint32_t entriesSize = entries.size(); + for (uint32_t i = 0; i < entriesSize; ++i) { + const AOTFileInfo::FuncEntryDes &entry = entries[i]; + std::string &fileName = entryIdxToFileNameMap_.at(i); + auto key = std::make_pair(fileName, entry.indexInKindOrMethodId_); + methodToEntryIndexMap_[key] = i; + } +} } // namespace panda::ecmascript diff --git a/ecmascript/compiler/aot_file/an_file_info.h b/ecmascript/compiler/aot_file/an_file_info.h index 1ee48111479eb1e872e70795bd81457feebee40b..ad06b34eae07e4f5583fc91e9fdeddcb07768877 100644 --- a/ecmascript/compiler/aot_file/an_file_info.h +++ b/ecmascript/compiler/aot_file/an_file_info.h @@ -17,10 +17,12 @@ #include "ecmascript/compiler/aot_file/aot_file_info.h" #include "ecmascript/compiler/assembler/assembler.h" +#include namespace panda::ecmascript { class PUBLIC_API AnFileInfo : public AOTFileInfo { public: + using FuncEntryIndexKey = std::pair; // (compilefileName, MethodID) AnFileInfo() = default; ~AnFileInfo() override = default; void Save(const std::string &filename, Triple triple); @@ -47,9 +49,9 @@ public: void TryRemoveAnFile(const char *filename); - void AlignTextSec() + void AlignTextSec(uint32_t alignSize) { - curTextSecOffset_ = AlignUp(curTextSecOffset_, TEXT_SEC_ALIGN); + curTextSecOffset_ = AlignUp(curTextSecOffset_, alignSize); } void UpdateCurTextSecOffset(uint64_t size) @@ -71,7 +73,23 @@ public: void Destroy() override; + void MappingEntryFuncsToAbcFiles(std::string curCompileFileName, uint32_t start, uint32_t end) + { + while (start < end) { + entryIdxToFileNameMap_[start] = curCompileFileName; + ++start; + } + } + + const CMap& GetMethodToEntryIndexMap() const + { + return methodToEntryIndexMap_; + } + + void GenerateMethodToEntryIndexMap(); + void Dump() const; + bool LoadToCodeCache(const std::string &filename, uint64_t* result); private: static const std::vector &GetDumpSectionNames(); @@ -82,6 +100,8 @@ private: uint64_t curTextSecOffset_ {0}; std::unordered_map> mainEntryMap_ {}; bool isLoad_ {false}; + CUnorderedMap entryIdxToFileNameMap_ {}; + CMap methodToEntryIndexMap_ {}; friend class AnFileDataManager; }; diff --git a/ecmascript/compiler/aot_file/aot_file_info.h b/ecmascript/compiler/aot_file/aot_file_info.h index dddb7c43ff81a420100ebd0d2af439260fadba83..716049c5ccef56662469602049c3e37df64a7353 100644 --- a/ecmascript/compiler/aot_file/aot_file_info.h +++ b/ecmascript/compiler/aot_file/aot_file_info.h @@ -20,10 +20,12 @@ #include "ecmascript/compiler/aot_file/module_section_des.h" #include "ecmascript/compiler/bc_call_signature.h" #include "ecmascript/deoptimizer/calleeReg.h" +#include "ecmascript/compiler/aot_file/func_entry_des.h" namespace panda::ecmascript { class PUBLIC_API AOTFileInfo { public: + using FuncEntryDes = ecmascript::FuncEntryDes; using CallSignature = kungfu::CallSignature; using CalleeRegAndOffsetVec = kungfu::CalleeRegAndOffsetVec; using DwarfRegType = kungfu::LLVMStackMapType::DwarfRegType; @@ -32,52 +34,9 @@ public: AOTFileInfo() = default; virtual ~AOTFileInfo() = default; + static constexpr uint32_t TEXT_SEC_ALIGN = 16; static constexpr uint32_t DATA_SEC_ALIGN = 8; - static constexpr uint32_t TEXT_SEC_ALIGN = 4096; - - struct FuncEntryDes { - uint64_t codeAddr_ {}; - CallSignature::TargetKind kind_; - bool isMainFunc_ {}; - bool isFastCall_ {}; - uint32_t indexInKindOrMethodId_ {}; - uint32_t moduleIndex_ {}; - int fpDeltaPrevFrameSp_ {}; - uint32_t funcSize_ {}; - uint32_t calleeRegisterNum_ {}; - int32_t CalleeReg2Offset_[2 * kungfu::MAX_CALLEE_SAVE_REIGISTER_NUM]; - bool IsStub() const - { - return CallSignature::TargetKind::STUB_BEGIN <= kind_ && kind_ < CallSignature::TargetKind::STUB_END; - } - - bool IsBCStub() const - { - return CallSignature::TargetKind::BCHANDLER_BEGIN <= kind_ && - kind_ < CallSignature::TargetKind::BCHANDLER_END; - } - - bool IsBCHandlerStub() const - { - return (kind_ == CallSignature::TargetKind::BYTECODE_HANDLER); - } - - bool IsBuiltinsStub() const - { - return (kind_ == CallSignature::TargetKind::BUILTINS_STUB || - kind_ == CallSignature::TargetKind::BUILTINS_WITH_ARGV_STUB); - } - - bool IsCommonStub() const - { - return (kind_ == CallSignature::TargetKind::COMMON_STUB); - } - - bool IsGeneralRTStub() const - { - return (kind_ >= CallSignature::TargetKind::RUNTIME_STUB && kind_ <= CallSignature::TargetKind::DEOPT_STUB); - } - }; + static constexpr uint32_t PAGE_ALIGN = 4096; const FuncEntryDes &GetStubDes(int index) const { @@ -149,7 +108,7 @@ public: des.SetArkStackMapSize(size); } - size_t GetCodeUnitsNum() + size_t GetCodeUnitsNum() const { return des_.size(); } diff --git a/ecmascript/compiler/aot_file/aot_file_manager.cpp b/ecmascript/compiler/aot_file/aot_file_manager.cpp index 6428e23b96a3bd87697236b06726e551447b645f..a87bc2225e8da8d82ab23c5d2d5590a86431e3c5 100644 --- a/ecmascript/compiler/aot_file/aot_file_manager.cpp +++ b/ecmascript/compiler/aot_file/aot_file_manager.cpp @@ -20,6 +20,7 @@ #include "ecmascript/compiler/aot_file/elf_builder.h" #include "ecmascript/compiler/aot_file/elf_reader.h" #include "ecmascript/compiler/bc_call_signature.h" +#include "ecmascript/compiler/call_signature.h" #include "ecmascript/compiler/common_stubs.h" #include "ecmascript/compiler/compiler_log.h" #include "ecmascript/deoptimizer/deoptimizer.h" @@ -35,6 +36,7 @@ #include "ecmascript/snapshot/mem/snapshot.h" #include "ecmascript/stackmap/ark_stackmap_parser.h" #include "ecmascript/stackmap/llvm_stackmap_parser.h" +#include namespace panda::ecmascript { using CommonStubCSigns = kungfu::CommonStubCSigns; @@ -42,9 +44,13 @@ using BytecodeStubCSigns = kungfu::BytecodeStubCSigns; void AOTFileManager::Iterate(const RootVisitor &v) { - for (auto &iter : desCPs_) { - for (auto &curCP : iter.second) { - v(Root::ROOT_VM, ObjectSlot(reinterpret_cast(&iter.second.at(curCP.first)))); + for (auto &iter : aiDatum_) { + auto &aiData = iter.second; + for (auto &eachFileData : aiData) { + auto &cpMap = eachFileData.second; + for (auto &eachCpPair : cpMap) { + v(Root::ROOT_VM, ObjectSlot(reinterpret_cast(&eachCpPair.second))); + } } } } @@ -72,6 +78,13 @@ bool AOTFileManager::LoadAnFile(const std::string &fileName) return anFileDataManager->SafeLoad(fileName, AnFileDataManager::Type::AOT); } +bool AOTFileManager::LoadJITFile(const std::string &fileName, uint64_t* result) +{ + AnFileDataManager *anFileDataManager = AnFileDataManager::GetInstance(); + return anFileDataManager->SafeJITLoad(fileName, result); +} + + bool AOTFileManager::LoadAiFile([[maybe_unused]] const std::string &filename) { Snapshot snapshot(vm_); @@ -82,24 +95,25 @@ bool AOTFileManager::LoadAiFile([[maybe_unused]] const std::string &filename) #endif } -void AOTFileManager::LoadAiFile(const JSPandaFile *jsPandaFile) +bool AOTFileManager::LoadAiFile(const JSPandaFile *jsPandaFile) { uint32_t anFileInfoIndex = GetAnFileIndex(jsPandaFile); // this abc file does not have corresponding an file if (anFileInfoIndex == INVALID_INDEX) { - return; + return false; } - auto iter = desCPs_.find(anFileInfoIndex); + auto iter = aiDatum_.find(anFileInfoIndex); // already loaded - if (iter != desCPs_.end()) { - return; + if (iter != aiDatum_.end()) { + return false; } AnFileDataManager *anFileDataManager = AnFileDataManager::GetInstance(); std::string aiFilename = anFileDataManager->GetDir(); aiFilename += JSFilePath::GetHapName(jsPandaFile) + AOTFileManager::FILE_EXTENSION_AI; LoadAiFile(aiFilename); + return true; } const std::shared_ptr AOTFileManager::GetAnFileInfo(const JSPandaFile *jsPandaFile) const @@ -217,6 +231,10 @@ void AOTFileManager::SetAOTMainFuncEntry(JSHandle mainFunc, const JS #ifndef NDEBUG PrintAOTEntry(jsPandaFile, method, mainEntry); #endif + + MethodLiteral *methodLiteral = method->GetMethodLiteral(); + methodLiteral->SetAotCodeBit(true); + methodLiteral->SetIsFastCall(isFastCall); } void AOTFileManager::SetAOTFuncEntry(const JSPandaFile *jsPandaFile, Method *method, @@ -239,8 +257,36 @@ void AOTFileManager::SetAOTFuncEntry(const JSPandaFile *jsPandaFile, Method *met if (canFastCall != nullptr) { *canFastCall = entry.isFastCall_; } + + MethodLiteral *methodLiteral = method->GetMethodLiteral(); + methodLiteral->SetAotCodeBit(true); + methodLiteral->SetIsFastCall(entry.isFastCall_); +} + + +void AOTFileManager::SetJITFuncEntry(uint64_t codeAddr, Method *method, bool *canFastCall) +{ + uint64_t codeEntry = codeAddr; + if (!codeEntry) { + return; + } + method->SetDeoptThreshold(vm_->GetJSOptions().GetDeoptThreshold()); + method->SetCodeEntryAndMarkAOT(codeEntry); + + // TODO fastcall + (void) canFastCall; + method->SetIsFastCall(true); + // if (canFastCall != nullptr) { + // *canFastCall = entry.isFastCall_; + // } + + MethodLiteral *methodLiteral = method->GetMethodLiteral(); + + methodLiteral->SetAotCodeBit(true); + methodLiteral->SetIsFastCall(true); } + kungfu::ArkStackMapParser *AOTFileManager::GetStackMapParser() const { return arkStackMapParser_; @@ -291,7 +337,7 @@ void AOTFileManager::InitializeStubEntries(const std::vectorSetBuiltinStubEntry(des.indexInKindOrMethodId_, des.codeAddr_); #if ECMASCRIPT_ENABLE_ASM_FILE_LOAD_LOG - int start = GET_MESSAGE_STRING_ID(CharCodeAt); + int start = GET_MESSAGE_STRING_ID(StringCharCodeAt); std::string format = MessageString::GetMessageString(des.indexInKindOrMethodId_ + start - 1); // -1: NONE LOG_ECMA(DEBUG) << "builtins index: " << std::dec << des.indexInKindOrMethodId_ << " :" << format << " addr: 0x" << std::hex << des.codeAddr_; @@ -320,21 +366,31 @@ bool AOTFileManager::RewriteDataSection(uintptr_t dataSec, size_t size, uintptr_ return true; } -void AOTFileManager::AddConstantPool(const CString &snapshotFileName, JSTaggedValue deserializedCPList) +void AOTFileManager::ParseDeserializedData(const CString &snapshotFileName, JSTaggedValue deserializedData) { AnFileDataManager *anFileDataManager = AnFileDataManager::GetInstance(); std::string baseName = JSFilePath::GetFileName(snapshotFileName.c_str()); uint32_t anFileInfoIndex = anFileDataManager->SafeGetFileInfoIndex(baseName + FILE_EXTENSION_AN); - desCPs_.insert({anFileInfoIndex, CMap {}}); - CMap &cpMap = desCPs_[anFileInfoIndex]; - - JSHandle cpList(vm_->GetJSThread(), deserializedCPList); - uint32_t len = cpList->GetLength(); - for (uint32_t pos = 0; pos < len; pos += DESERI_CP_ITEM_SIZE) { - int32_t constantPoolID = cpList->Get(pos).GetInt(); - JSTaggedValue cp = cpList->Get(pos + 1); - cpMap.insert({constantPoolID, cp}); + JSThread *thread = vm_->GetJSThread(); + JSHandle aiData(thread, deserializedData); + uint32_t aiDataLen = aiData->GetLength(); + ASSERT(aiDataLen % AOTSnapshotConstants::SNAPSHOT_DATA_ITEM_SIZE == 0); + aiDatum_.insert({ anFileInfoIndex, CMap> {} }); + FileNameToMultiConstantPoolMap &fileNameToMulCpMap = aiDatum_.at(anFileInfoIndex); + + for (uint32_t i = 0; i < aiDataLen; i += AOTSnapshotConstants::SNAPSHOT_DATA_ITEM_SIZE) { + CString fileNameStr = EcmaStringAccessor(aiData->Get(i)).ToCString(); + JSHandle cpList(thread, aiData->Get(i + 1)); + uint32_t cpLen = cpList->GetLength(); + ASSERT(cpLen % AOTSnapshotConstants::SNAPSHOT_CP_ARRAY_ITEM_SIZE == 0); + fileNameToMulCpMap.insert({fileNameStr, CMap{}}); + MultiConstantPoolMap &cpMap = fileNameToMulCpMap.at(fileNameStr); + for (uint32_t pos = 0; pos < cpLen; pos += AOTSnapshotConstants::SNAPSHOT_CP_ARRAY_ITEM_SIZE) { + int32_t constantPoolID = cpList->Get(pos).GetInt(); + JSTaggedValue cp = cpList->Get(pos + 1); + cpMap.insert({constantPoolID, cp}); + } } } @@ -342,12 +398,23 @@ JSHandle AOTFileManager::GetDeserializedConstantPool(const JSPand { // The deserialization of the 'ai' data used by the multi-work // is not implemented yet, so there may be a case where - // desCPs_ is empty, in which case the Hole will be returned - if (desCPs_.empty()) { + // aiDatum_ is empty, in which case the Hole will be returned + if (aiDatum_.empty()) { return JSHandle(vm_->GetJSThread(), JSTaggedValue::Hole()); } uint32_t anFileInfoIndex = jsPandaFile->GetAOTFileInfoIndex(); - CMap &cpMap = desCPs_.at(anFileInfoIndex); + auto aiDatumIter = aiDatum_.find(anFileInfoIndex); + if (aiDatumIter == aiDatum_.end()) { + LOG_COMPILER(FATAL) << "can not find aiData by anFileInfoIndex " << anFileInfoIndex; + UNREACHABLE(); + } + const auto &fileNameToMulCpMap = aiDatumIter->second; + auto cpMapIter = fileNameToMulCpMap.find(jsPandaFile->GetNormalizedFileDesc()); + if (cpMapIter == fileNameToMulCpMap.end()) { + LOG_COMPILER(FATAL) << "can not find constpools by fileName " << jsPandaFile->GetNormalizedFileDesc().c_str(); + UNREACHABLE(); + } + const CMap &cpMap = cpMapIter->second; auto iter = cpMap.find(cpID); if (iter == cpMap.end()) { LOG_COMPILER(FATAL) << "can not find deserialized constantpool in anFileInfo, constantPoolID is " << cpID; @@ -393,4 +460,12 @@ bool AOTFileManager::GetAbsolutePath(const CString &relativePathCstr, CString &a } return false; } + +const Heap *AOTFileManager::GetHeap() +{ + if (vm_ == nullptr) { + return nullptr; + } + return vm_->GetHeap(); +} } // namespace panda::ecmascript diff --git a/ecmascript/compiler/aot_file/aot_file_manager.h b/ecmascript/compiler/aot_file/aot_file_manager.h index 9e429e2f43e6e9e816eca525b61ffe5faefa9e3d..6afa55a2a39013e7a04d6afeb4d778214d43a3cf 100644 --- a/ecmascript/compiler/aot_file/aot_file_manager.h +++ b/ecmascript/compiler/aot_file/aot_file_manager.h @@ -15,6 +15,7 @@ #ifndef ECMASCRIPT_COMPILER_AOT_FILE_AOT_FILE_MANAGER_H #define ECMASCRIPT_COMPILER_AOT_FILE_AOT_FILE_MANAGER_H +#include #include #include @@ -45,10 +46,13 @@ class JSThread; * | AOT Function Entry Index | | * +--------------------------------+---- * | AOT Instance Hclass (IHC) | + * | AOT Constructor Hclass (CHC) | * +--------------------------------+ */ class AOTLiteralInfo : public TaggedArray { public: + static constexpr size_t NO_FUNC_ENTRY_VALUE = -1; + static AOTLiteralInfo *Cast(TaggedObject *object) { ASSERT(JSTaggedValue(object).IsTaggedArray()); @@ -64,6 +68,7 @@ public: { TaggedArray::InitializeWithSpecialValue(initValue, capacity + RESERVED_LENGTH, extraLength); SetIhc(JSTaggedValue::Undefined()); + SetChc(JSTaggedValue::Undefined()); } inline uint32_t GetCacheLength() const @@ -81,6 +86,16 @@ public: return JSTaggedValue(Barriers::GetValue(GetData(), GetIhcOffset())); } + inline void SetChc(JSTaggedValue value) + { + Barriers::SetPrimitive(GetData(), GetChcOffset(), value.GetRawData()); + } + + inline JSTaggedValue GetChc() const + { + return JSTaggedValue(Barriers::GetValue(GetData(), GetChcOffset())); + } + inline void SetObjectToCache(JSThread *thread, uint32_t index, JSTaggedValue value) { Set(thread, index, value); @@ -91,13 +106,19 @@ public: return Get(index); } private: - static constexpr size_t AOT_IHC_INDEX = 1; + static constexpr size_t AOT_CHC_INDEX = 1; + static constexpr size_t AOT_IHC_INDEX = 2; static constexpr size_t RESERVED_LENGTH = AOT_IHC_INDEX; inline size_t GetIhcOffset() const { return JSTaggedValue::TaggedTypeSize() * (GetLength() - AOT_IHC_INDEX); } + + inline size_t GetChcOffset() const + { + return JSTaggedValue::TaggedTypeSize() * (GetLength() - AOT_CHC_INDEX); + } }; class AOTFileManager { @@ -107,10 +128,10 @@ public: static constexpr char FILE_EXTENSION_AN[] = ".an"; static constexpr char FILE_EXTENSION_AI[] = ".ai"; - static constexpr uint8_t DESERI_CP_ITEM_SIZE = 2; void LoadStubFile(const std::string &fileName); static bool LoadAnFile(const std::string &fileName); + static bool LoadJITFile(const std::string &fileName, uint64_t* result); static AOTFileInfo::CallSiteInfo CalCallSiteInfo(uintptr_t retAddr); static bool TryReadLock(); static bool InsideStub(uintptr_t pc); @@ -125,18 +146,24 @@ public: std::string_view entryPoint); void SetAOTFuncEntry(const JSPandaFile *jsPandaFile, Method *method, uint32_t entryIndex, bool *canFastCall = nullptr); + void SetJITFuncEntry(uint64_t codeAddr, Method *method, bool *canFastCall = nullptr); bool LoadAiFile([[maybe_unused]] const std::string &filename); - void LoadAiFile(const JSPandaFile *jsPandaFile); + bool LoadAiFile(const JSPandaFile *jsPandaFile); kungfu::ArkStackMapParser* GetStackMapParser() const; static JSTaggedValue GetAbsolutePath(JSThread *thread, JSTaggedValue relativePathVal); static bool GetAbsolutePath(const CString &relativePathCstr, CString &absPathCstr); static bool RewriteDataSection(uintptr_t dataSec, size_t size, uintptr_t newData, size_t newSize); - void AddConstantPool(const CString &snapshotFileName, JSTaggedValue deserializedCPList); + void ParseDeserializedData(const CString &snapshotFileName, JSTaggedValue deserializedData); JSHandle GetDeserializedConstantPool(const JSPandaFile *jsPandaFile, int32_t cpID); + const Heap *GetHeap(); static void DumpAOTInfo() DUMP_API_ATTR; private: + using MultiConstantPoolMap = CMap; // key: constpool id, value: constantpool + using FileNameToMultiConstantPoolMap = CMap; + using AIDatum = CUnorderedMap; // key: ai file index + static void PrintAOTEntry(const JSPandaFile *file, const Method *method, uintptr_t entry); void InitializeStubEntries(const std::vector& stubs); static void AdjustBCStubAndDebuggerStubEntries(JSThread *thread, @@ -144,7 +171,7 @@ private: const AsmInterParsedOption &asmInterOpt); EcmaVM *vm_ {nullptr}; ObjectFactory *factory_ {nullptr}; - std::unordered_map> desCPs_ {}; + AIDatum aiDatum_ {}; kungfu::ArkStackMapParser *arkStackMapParser_ {nullptr}; friend class AnFileInfo; diff --git a/ecmascript/compiler/aot_file/aot_version.h b/ecmascript/compiler/aot_file/aot_version.h index 0e50be1746469f82461e0d71cc3cf5b780f4dc42..936c1d0f2d3e7da5f6bb62bb282e0217d6e46b5f 100644 --- a/ecmascript/compiler/aot_file/aot_version.h +++ b/ecmascript/compiler/aot_file/aot_version.h @@ -25,10 +25,10 @@ public: // Release Version Snapshot Version // 3.2 0.0.0.x // 4.0 4.0.0.x - static constexpr base::FileHeader::VersionType AN_VERSION = {4, 0, 0, 3}; + static constexpr base::FileHeaderBase::VersionType AN_VERSION = {4, 0, 0, 5}; static constexpr bool AN_STRICT_MATCH = true; - static constexpr base::FileHeader::VersionType AI_VERSION = {4, 0, 0, 1}; + static constexpr base::FileHeaderBase::VersionType AI_VERSION = {4, 0, 0, 3}; static constexpr bool AI_STRICT_MATCH = true; }; } // namespace panda::ecmascript -#endif // ECMASCRIPT_AOT_VERSION_H \ No newline at end of file +#endif // ECMASCRIPT_AOT_VERSION_H diff --git a/ecmascript/compiler/aot_file/elf_builder.cpp b/ecmascript/compiler/aot_file/elf_builder.cpp index 044b2ab20b36aba52ccf4e62e15f76ee2929834f..8edd523abe874ba90d024d8bf12273b68d58c3a8 100644 --- a/ecmascript/compiler/aot_file/elf_builder.cpp +++ b/ecmascript/compiler/aot_file/elf_builder.cpp @@ -23,7 +23,7 @@ void ElfBuilder::AddShStrTabSection() { std::map> §ions = des_[ShStrTableModuleDesIndex].GetSectionsInfo(); - + uint32_t size = 1; for (auto &s : sections_) { std::string str = ModuleSectionDes::GetSecName(s); @@ -83,12 +83,7 @@ void ElfBuilder::Initialize() des_[i].AddArkStackMapSection(); } sectionToAlign_ = { - {ElfSecName::RODATA, AOTFileInfo::TEXT_SEC_ALIGN}, - {ElfSecName::RODATA_CST4, AOTFileInfo::TEXT_SEC_ALIGN}, - {ElfSecName::RODATA_CST8, AOTFileInfo::TEXT_SEC_ALIGN}, - {ElfSecName::RODATA_CST16, AOTFileInfo::TEXT_SEC_ALIGN}, - {ElfSecName::RODATA_CST32, AOTFileInfo::TEXT_SEC_ALIGN}, - {ElfSecName::TEXT, AOTFileInfo::TEXT_SEC_ALIGN}, + {ElfSecName::TEXT, AOTFileInfo::PAGE_ALIGN}, {ElfSecName::STRTAB, 1}, {ElfSecName::SYMTAB, AOTFileInfo::DATA_SEC_ALIGN}, {ElfSecName::SHSTRTAB, AOTFileInfo::DATA_SEC_ALIGN}, @@ -155,7 +150,7 @@ uint32_t ElfBuilder::GetShIndex(ElfSecName section) const int ElfBuilder::GetSecNum() const { - return sections_.size(); + return sections_.size() + 1; // add first empty section. } /* @@ -218,7 +213,7 @@ void ElfBuilder::PackELFHeader(llvm::ELF::Elf64_Ehdr &header, uint32_t version, // size of section headers header.e_shentsize = sizeof(llvm::ELF::Elf64_Shdr); // number of section headers - header.e_shnum = GetSecNum() + 1; // 1: skip null section and ark stackmap + header.e_shnum = GetSecNum(); // section header string table index header.e_shstrndx = static_cast(GetShIndex(ElfSecName::SHSTRTAB)); // section header stub sec info index @@ -301,8 +296,10 @@ std::pair ElfBuilder::FindShStrTab() const void ElfBuilder::AllocateShdr(std::unique_ptr &shdr, const uint32_t &secNum) { shdr = std::make_unique(secNum); - if (memset_s(reinterpret_cast(&shdr[0]), sizeof(llvm::ELF::Elf64_Shdr), - 0, sizeof(llvm::ELF::Elf64_Shdr)) != EOK) { + if (memset_s(reinterpret_cast(&shdr[0]), + sizeof(llvm::ELF::Elf64_Shdr), + 0, + sizeof(llvm::ELF::Elf64_Shdr)) != EOK) { UNREACHABLE(); } } @@ -331,19 +328,32 @@ void ElfBuilder::MergeTextSections(std::ofstream &file, ModuleSectionDes::ModuleRegionInfo &curInfo = moduleInfo[i]; uint32_t curSecSize = des.GetSecSize(ElfSecName::TEXT); uint64_t curSecAddr = des.GetSecAddr(ElfSecName::TEXT); - curSecOffset = AlignUp(curSecOffset, AOTFileInfo::TEXT_SEC_ALIGN); + curSecOffset = AlignUp(curSecOffset, AOTFileInfo::PAGE_ALIGN); file.seekp(curSecOffset); auto curModuleSec = des.GetSectionsInfo(); - if (curModuleSec.find(ElfSecName::RODATA_CST8) != curModuleSec.end()) { - uint32_t rodataSize = des.GetSecSize(ElfSecName::RODATA_CST8); - uint64_t rodataAddr = des.GetSecAddr(ElfSecName::RODATA_CST8); - file.write(reinterpret_cast(rodataAddr), rodataSize); - curInfo.rodataSize = rodataSize; - curSecOffset += rodataSize; + uint64_t rodataAddrBeforeText = 0; + uint32_t rodataSizeBeforeText = 0; + uint64_t rodataAddrAfterText = 0; + uint32_t rodataSizeAfterText = 0; + std::tie(rodataAddrBeforeText, rodataSizeBeforeText, rodataAddrAfterText, rodataSizeAfterText) = + des.GetMergedRODataAddrAndSize(curSecAddr); + if (rodataSizeBeforeText != 0) { + file.write(reinterpret_cast(rodataAddrBeforeText), rodataSizeBeforeText); + curInfo.rodataSizeBeforeText = rodataSizeBeforeText; + curSecOffset += rodataSizeBeforeText; + curSecOffset = AlignUp(curSecOffset, AOTFileInfo::TEXT_SEC_ALIGN); + file.seekp(curSecOffset); } - curInfo.textSize = curSecSize; file.write(reinterpret_cast(curSecAddr), curSecSize); + curInfo.textSize = curSecSize; curSecOffset += curSecSize; + if (rodataSizeAfterText != 0) { + curSecOffset = AlignUp(curSecOffset, AOTFileInfo::DATA_SEC_ALIGN); + file.seekp(curSecOffset); + file.write(reinterpret_cast(rodataAddrAfterText), rodataSizeAfterText); + curInfo.rodataSizeAfterText = rodataSizeAfterText; + curSecOffset += rodataSizeAfterText; + } } } @@ -366,6 +376,38 @@ void ElfBuilder::MergeArkStackMapSections(std::ofstream &file, } } +void ElfBuilder::FixSymtab(llvm::ELF::Elf64_Shdr* shdr) +{ + using Elf64_Sym = llvm::ELF::Elf64_Sym; + + uint32_t secSize = des_[FullSecIndex].GetSecSize(ElfSecName::SYMTAB); + uint64_t secAddr = des_[FullSecIndex].GetSecAddr(ElfSecName::SYMTAB); + uint32_t secNum = static_cast(GetSecNum()); + uint64_t textSecOffset = sectionToShdr_[ElfSecName::TEXT].sh_offset; + uint32_t shStrTabIndex = GetShIndex(ElfSecName::SHSTRTAB); + uint32_t textSecIndex = GetShIndex(ElfSecName::TEXT); + + Elf64_Sym *syms = reinterpret_cast(secAddr); + size_t n = secSize / sizeof(Elf64_Sym); + int localCount = -1; + for (size_t i = 0; i < n; ++i) { + Elf64_Sym* sy = &syms[i]; + if (sy->getBinding() != llvm::ELF::STB_LOCAL && localCount == -1) { + localCount = static_cast(i); + } + if (sy->getType() == llvm::ELF::STT_SECTION) { + sy->st_shndx = static_cast(shStrTabIndex); + } else if (sy->getType() == llvm::ELF::STT_FUNC) { + sy->st_shndx = static_cast(textSecIndex); + sy->st_value += textSecOffset; + } + if (sy->st_shndx > secNum) { + sy->st_shndx = 0; + } + } + shdr->sh_info = static_cast(localCount); +} + /* section of aot.an layout as follows: @@ -410,7 +452,7 @@ void ElfBuilder::PackELFSections(std::ofstream &file) llvm::ELF::Elf64_Off curSecOffset = ComputeEndAddrOfShdr(secNum); file.seekp(curSecOffset); - int i = 1; // 1: skip null section + int i = static_cast(GetShIndex(ElfSecName::TEXT)); auto shStrTab = FindShStrTab(); for (auto const &[secName, secInfo] : sections) { @@ -427,7 +469,7 @@ void ElfBuilder::PackELFSections(std::ofstream &file) std::string secNameStr = ModuleSectionDes::GetSecName(secName); // text section address needs 16 bytes alignment if (secName == ElfSecName::TEXT) { - curSecOffset = AlignUp(curSecOffset, AOTFileInfo::TEXT_SEC_ALIGN); + curSecOffset = AlignUp(curSecOffset, AOTFileInfo::PAGE_ALIGN); file.seekp(curSecOffset); } llvm::ELF::Elf64_Word shName = FindShName(secNameStr, shStrTab.first, shStrTab.second); @@ -437,6 +479,7 @@ void ElfBuilder::PackELFSections(std::ofstream &file) curShdr.sh_flags = section.Flag(); curShdr.sh_addr = curSecOffset; curShdr.sh_offset = static_cast(curSecOffset); + curShdr.sh_info = 0; sectionToFileOffset_[secName] = static_cast(file.tellp()); switch (secName) { case ElfSecName::ARK_MODULEINFO: { @@ -463,6 +506,9 @@ void ElfBuilder::PackELFSections(std::ofstream &file) case ElfSecName::SHSTRTAB: case ElfSecName::ARK_FUNCENTRY: case ElfSecName::ARK_ASMSTUB: { + if (secName == ElfSecName::SYMTAB) { + FixSymtab(&curShdr); + } uint32_t curSecSize = des_[FullSecIndex].GetSecSize(secName); uint64_t curSecAddr = des_[FullSecIndex].GetSecAddr(secName); file.write(reinterpret_cast(curSecAddr), curSecSize); @@ -480,7 +526,6 @@ void ElfBuilder::PackELFSections(std::ofstream &file) file.seekp(curSecOffset); } curShdr.sh_link = static_cast(section.Link()); - curShdr.sh_info = 0; curShdr.sh_entsize = static_cast(section.Entsize()); sectionToShdr_[secName] = curShdr; LOG_COMPILER(DEBUG) << " shdr[i].sh_entsize " << std::hex << curShdr.sh_entsize << std::endl; diff --git a/ecmascript/compiler/aot_file/elf_builder.h b/ecmascript/compiler/aot_file/elf_builder.h index a1722003fa0c84e5098ce93c381a6d4cd1357123..710ae95fc0c377525de2953075b03ac5be618b05 100644 --- a/ecmascript/compiler/aot_file/elf_builder.h +++ b/ecmascript/compiler/aot_file/elf_builder.h @@ -65,10 +65,12 @@ private: void Initialize(); void SetLastSection(); void RemoveNotNeedSection(); + void FixSymtab(llvm::ELF::Elf64_Shdr* shdr); static constexpr uint32_t ASMSTUB_MODULE_NUM = 3; static constexpr uint32_t ShStrTableModuleDesIndex = 0; static constexpr uint32_t FullSecIndex = 0; + std::vector des_ {}; std::unique_ptr shStrTabPtr_ {nullptr}; std::map sectionToShdr_; diff --git a/ecmascript/compiler/aot_file/elf_reader.cpp b/ecmascript/compiler/aot_file/elf_reader.cpp index bc1f56c9688396d32e794e8edb85cd6944a06921..8223e230850d26eb19f95ef8460cb95a15f368c9 100644 --- a/ecmascript/compiler/aot_file/elf_reader.cpp +++ b/ecmascript/compiler/aot_file/elf_reader.cpp @@ -32,7 +32,7 @@ bool ElfReader::VerifyELFHeader(uint32_t version, bool strictMatch) << header.e_ident[llvm::ELF::EI_MAG2] << header.e_ident[llvm::ELF::EI_MAG3]; return false; } - if (!base::FileHeader::VerifyVersion("Elf ", header.e_version, version, strictMatch)) { + if (!base::FileHeaderBase::VerifyVersion("Elf ", header.e_version, version, strictMatch)) { return false; } return true; @@ -256,15 +256,20 @@ void ElfReader::SeparateTextSections(std::vector &des, { for (size_t i = 0; i < des.size(); ++i) { auto moduleInfo = GetCurModuleInfo(i, moduleInfoOffset); - secOffset = AlignUp(secOffset, TEXT_SEC_ALIGN); - uint32_t rodataSize = moduleInfo->rodataSize; - if (rodataSize > 0) { - des[i].SetSecAddrAndSize(ElfSecName::RODATA_CST8, secAddr + secOffset, rodataSize); - secOffset += rodataSize; + secOffset = AlignUp(secOffset, AOTFileInfo::PAGE_ALIGN); + uint32_t rodataSizeBeforeText = moduleInfo->rodataSizeBeforeText; + uint32_t rodataSizeAfterText = moduleInfo->rodataSizeAfterText; + if (rodataSizeBeforeText != 0) { + secOffset += rodataSizeBeforeText; + secOffset = AlignUp(secOffset, AOTFileInfo::TEXT_SEC_ALIGN); } uint32_t textSize = moduleInfo->textSize; des[i].SetSecAddrAndSize(ElfSecName::TEXT, secAddr + secOffset, textSize); secOffset += textSize; + if (rodataSizeAfterText != 0) { + secOffset = AlignUp(secOffset, AOTFileInfo::DATA_SEC_ALIGN); + secOffset += rodataSizeAfterText; + } } } @@ -294,17 +299,25 @@ void ElfReader::SeparateTextSections(BinaryBufferParser &parser, { for (size_t i = 0; i < des.size(); ++i) { auto moduleInfo = moduleInfo_[i]; - secOffset = AlignUp(secOffset, TEXT_SEC_ALIGN); - uint32_t rodataSize = moduleInfo.rodataSize; - if (rodataSize > 0) { - parser.ParseBuffer(reinterpret_cast(secAddr + secOffset), rodataSize, curShOffset + secOffset); - des[i].SetSecAddrAndSize(ElfSecName::RODATA_CST8, secAddr + secOffset, rodataSize); - secOffset += rodataSize; + secOffset = AlignUp(secOffset, AOTFileInfo::PAGE_ALIGN); + uint32_t rodataSizeBeforeText = moduleInfo.rodataSizeBeforeText; + uint32_t rodataSizeAfterText = moduleInfo.rodataSizeAfterText; + if (rodataSizeBeforeText != 0) { + parser.ParseBuffer(reinterpret_cast(secAddr + secOffset), rodataSizeBeforeText, + curShOffset + secOffset); + secOffset += rodataSizeBeforeText; + secOffset = AlignUp(secOffset, AOTFileInfo::TEXT_SEC_ALIGN); } uint32_t textSize = moduleInfo.textSize; parser.ParseBuffer(reinterpret_cast(secAddr + secOffset), textSize, curShOffset + secOffset); des[i].SetSecAddrAndSize(ElfSecName::TEXT, secAddr + secOffset, textSize); secOffset += textSize; + if (rodataSizeAfterText != 0) { + secOffset = AlignUp(secOffset, AOTFileInfo::DATA_SEC_ALIGN); + parser.ParseBuffer(reinterpret_cast(secAddr + secOffset), rodataSizeAfterText, + curShOffset + secOffset); + secOffset += rodataSizeAfterText; + } } } diff --git a/ecmascript/compiler/aot_file/elf_reader.h b/ecmascript/compiler/aot_file/elf_reader.h index 24c2752782705107c1766d142a5eca461fd8b03a..4a6ef0a7f44f3bf292ec095503a83c6bd6f3e997 100644 --- a/ecmascript/compiler/aot_file/elf_reader.h +++ b/ecmascript/compiler/aot_file/elf_reader.h @@ -53,7 +53,6 @@ private: return moduleInfoSize / sizeof(ModuleSectionDes::ModuleRegionInfo); } - static constexpr uint32_t TEXT_SEC_ALIGN = 4096; static constexpr uint32_t ASMSTUB_MODULE_NUM = 3; ExecutedMemoryAllocator::ExeMem stubsMem_ {}; MemMap fileMapMem_ {}; diff --git a/ecmascript/compiler/aot_file/func_entry_des.h b/ecmascript/compiler/aot_file/func_entry_des.h new file mode 100644 index 0000000000000000000000000000000000000000..5329648c5dfc6ecbd93b5709bb32fb8d8f7a5365 --- /dev/null +++ b/ecmascript/compiler/aot_file/func_entry_des.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ECMASCRIPT_COMPILER_FUNC_ENTRY_DES_H +#define ECMASCRIPT_COMPILER_FUNC_ENTRY_DES_H + +#include "ecmascript/common.h" +#include "ecmascript/compiler/aot_file/executed_memory_allocator.h" +#include "ecmascript/compiler/bc_call_signature.h" +#include "ecmascript/deoptimizer/calleeReg.h" + +namespace panda::ecmascript { +struct FuncEntryDes { + uint64_t codeAddr_ {}; + CallSignature::TargetKind kind_; + bool isMainFunc_ {}; + bool isFastCall_ {}; + uint32_t indexInKindOrMethodId_ {}; + uint32_t moduleIndex_ {}; + int fpDeltaPrevFrameSp_ {}; + uint32_t funcSize_ {}; + uint32_t calleeRegisterNum_ {}; + int32_t CalleeReg2Offset_[2 * kungfu::MAX_CALLEE_SAVE_REIGISTER_NUM]; + bool IsStub() const + { + return CallSignature::TargetKind::STUB_BEGIN <= kind_ && kind_ < CallSignature::TargetKind::STUB_END; + } + + bool IsBCStub() const + { + return CallSignature::TargetKind::BCHANDLER_BEGIN <= kind_ && + kind_ < CallSignature::TargetKind::BCHANDLER_END; + } + + bool IsBCHandlerStub() const + { + return (kind_ == CallSignature::TargetKind::BYTECODE_HANDLER); + } + + bool IsBuiltinsStub() const + { + return (kind_ == CallSignature::TargetKind::BUILTINS_STUB || + kind_ == CallSignature::TargetKind::BUILTINS_WITH_ARGV_STUB); + } + + bool IsCommonStub() const + { + return (kind_ == CallSignature::TargetKind::COMMON_STUB); + } + + bool IsGeneralRTStub() const + { + return (kind_ >= CallSignature::TargetKind::RUNTIME_STUB && kind_ <= CallSignature::TargetKind::DEOPT_STUB); + } +}; +} // namespace panda::ecmascript +#endif // ECMASCRIPT_COMPILER_FUNC_ENTRY_DES_H + diff --git a/ecmascript/compiler/aot_file/module_section_des.h b/ecmascript/compiler/aot_file/module_section_des.h index 5e292e21edb0960b88aefa3adc15d3bd455f44f5..b92e67b8c5a62cff788dadccd4a0f501511e6ade 100644 --- a/ecmascript/compiler/aot_file/module_section_des.h +++ b/ecmascript/compiler/aot_file/module_section_des.h @@ -18,6 +18,7 @@ #include #include +#include "ecmascript/base/number_helper.h" #include "ecmascript/compiler/aot_file/binary_buffer_parser.h" #include "ecmascript/compiler/binary_section.h" @@ -27,12 +28,45 @@ public: struct ModuleRegionInfo { uint32_t startIndex {0}; uint32_t funcCount {0}; - uint32_t rodataSize {0}; + uint32_t rodataSizeBeforeText {0}; + uint32_t rodataSizeAfterText {0}; uint32_t textSize {0}; uint32_t stackMapSize {0}; }; static std::string GetSecName(ElfSecName idx); + void UpdateRODataInfo(uint64_t textAddr, uint64_t &addrBeforeText, uint32_t &sizeBeforeText, + uint64_t &addrAfterText, uint32_t &sizeAfterText, ElfSecName sec) const + { + if (sectionsInfo_.find(sec) == sectionsInfo_.end()) { + return; + } + uint64_t curSectionAddr = GetSecAddr(sec); + ASSERT(curSectionAddr != 0); + ASSERT(curSectionAddr != textAddr); + if (curSectionAddr < textAddr) { + addrBeforeText = (curSectionAddr < addrBeforeText) ? curSectionAddr : addrBeforeText; + sizeBeforeText += GetSecSize(sec); + } else { + addrAfterText = (curSectionAddr < addrAfterText) ? curSectionAddr : addrAfterText; + sizeAfterText += GetSecSize(sec); + } + } + + std::tuple GetMergedRODataAddrAndSize(uint64_t textAddr) const + { + uint64_t addrBeforeText = base::MAX_UINT64_VALUE; + uint32_t sizeBeforeText = 0; + uint64_t addrAfterText = base::MAX_UINT64_VALUE; + uint32_t sizeAfterText = 0; + for (uint8_t i = static_cast(ElfSecName::RODATA); i <= static_cast(ElfSecName::RODATA_CST32); + i++) { + UpdateRODataInfo(textAddr, addrBeforeText, sizeBeforeText, addrAfterText, sizeAfterText, + static_cast(i)); + } + return std::make_tuple(addrBeforeText, sizeBeforeText, addrAfterText, sizeAfterText); + } + void SetArkStackMapPtr(std::shared_ptr ptr) { arkStackMapPtr_ = std::move(ptr); diff --git a/ecmascript/compiler/aot_file/stub_file_info.cpp b/ecmascript/compiler/aot_file/stub_file_info.cpp index 5fabb372fccc8584664f47c02095518c0aa52c8a..4078ddd05ff66b27a262cd7e6819173cf76eb990 100644 --- a/ecmascript/compiler/aot_file/stub_file_info.cpp +++ b/ecmascript/compiler/aot_file/stub_file_info.cpp @@ -49,7 +49,7 @@ void StubFileInfo::Save(const std::string &filename, Triple triple) ElfBuilder builder(des_, GetDumpSectionNames()); llvm::ELF::Elf64_Ehdr header; - builder.PackELFHeader(header, base::FileHeader::ToVersionNumber(AOTFileVersion::AN_VERSION), triple); + builder.PackELFHeader(header, base::FileHeaderBase::ToVersionNumber(AOTFileVersion::AN_VERSION), triple); file.write(reinterpret_cast(&header), sizeof(llvm::ELF::Elf64_Ehdr)); builder.PackELFSections(file); builder.PackELFSegment(file); @@ -66,6 +66,11 @@ bool StubFileInfo::MmapLoad() return false; } + if (!FileExist(realPath.c_str())) { + LOG_ECMA(WARN) << "File not exist. file: " << realPath; + return false; + } + fileMapMem_ = FileMap(realPath.c_str(), FILE_RDONLY, PAGE_PROT_READ); if (fileMapMem_.GetOriginAddr() == nullptr) { LOG_ECMA(ERROR) << "File mmap failed"; diff --git a/ecmascript/compiler/aot_snapshot/aot_snapshot.cpp b/ecmascript/compiler/aot_snapshot/aot_snapshot.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1d8909ce846e02017b3773ba9e11c43f1d0df0ab --- /dev/null +++ b/ecmascript/compiler/aot_snapshot/aot_snapshot.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/aot_snapshot/aot_snapshot.h" + +#include "ecmascript/jspandafile/program_object.h" +#include "ecmascript/ts_types/ts_manager.h" +#include "ecmascript/global_env_constants-inl.h" + +namespace panda::ecmascript::kungfu { +void AOTSnapshot::InitSnapshot(uint32_t compileFilesCount) +{ + JSHandle data = factory_->NewTaggedArray(compileFilesCount * + AOTSnapshotConstants::SNAPSHOT_DATA_ITEM_SIZE); + snapshotData_.SetData(data.GetTaggedValue()); +} + +JSHandle AOTSnapshot::NewSnapshotConstantPool(uint32_t cacheSize) +{ + JSHandle cp = factory_->NewConstantPool(cacheSize); + + ASSERT(!snapshotData_.GetHClassInfo().IsHole()); + cp->SetAotHClassInfo(snapshotData_.GetHClassInfo()); + cp->SetAotArrayInfo(snapshotData_.GetArrayInfo()); + cp->SetConstantIndexInfo(snapshotData_.GetConstantIndexInfo()); + return cp; +} + +void AOTSnapshot::GenerateSnapshotConstantPools(const CMap &allConstantPools, + const CString &fileName) +{ + JSHandle snapshotCpArr = factory_->NewTaggedArray(allConstantPools.size() * + AOTSnapshotConstants::SNAPSHOT_CP_ARRAY_ITEM_SIZE); + snapshotData_.AddSnapshotCpArrayToData(thread_, fileName, snapshotCpArr); + + JSMutableHandle cp(thread_, thread_->GlobalConstants()->GetUndefined()); + uint32_t pos = 0; + for (auto &iter : allConstantPools) { + int32_t cpId = iter.first; + cp.Update(iter.second); + uint32_t cacheSize = cp->GetCacheLength(); + if (vm_->GetJSOptions().IsEnableCompilerLogSnapshot()) { + LOG_COMPILER(INFO) << "[aot-snapshot] constantPoolID: " << cpId; + LOG_COMPILER(INFO) << "[aot-snapshot] cacheSize: " << cacheSize; + } + + JSHandle newCp = NewSnapshotConstantPool(cacheSize); + snapshotCpArr->Set(thread_, pos++, JSTaggedValue(cpId)); + snapshotData_.RecordCpArrIdx(cpId, pos); + snapshotCpArr->Set(thread_, pos++, newCp.GetTaggedValue()); + } +} + +void AOTSnapshot::StoreConstantPoolInfo(BytecodeInfoCollector *bcInfoCollector) +{ + const JSPandaFile *jsPandaFile = bcInfoCollector->GetJSPandaFile(); + const CMap &allConstantPools = vm_->GetJSThread()-> + GetCurrentEcmaContext()->FindConstpools(jsPandaFile).value(); + GenerateSnapshotConstantPools(allConstantPools, jsPandaFile->GetNormalizedFileDesc()); + bcInfoCollector->StoreDataToGlobalData(snapshotData_); +} +} // namespace panda::ecmascript diff --git a/ecmascript/compiler/aot_snapshot/aot_snapshot.h b/ecmascript/compiler/aot_snapshot/aot_snapshot.h new file mode 100644 index 0000000000000000000000000000000000000000..33d5bc7cbd4e27ef7b4be242c976886552f6468f --- /dev/null +++ b/ecmascript/compiler/aot_snapshot/aot_snapshot.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ECMASCRIPT_COMPILER_AOT_SNAPSHOT_AOT_SNAPSHOT_H +#define ECMASCRIPT_COMPILER_AOT_SNAPSHOT_AOT_SNAPSHOT_H + +#include "ecmascript/ecma_vm.h" +#include "ecmascript/object_factory.h" +#include "ecmascript/compiler/aot_snapshot/snapshot_global_data.h" +#include "ecmascript/compiler/bytecode_info_collector.h" + +namespace panda::ecmascript::kungfu { +class AOTSnapshot { +public: + explicit AOTSnapshot(EcmaVM *vm) + : vm_(vm), factory_(vm->GetFactory()), thread_(vm_->GetJSThread()) {} + + void Iterate(const RootVisitor &v) + { + snapshotData_.Iterate(v); + } + + JSTaggedValue GetSnapshotData() const + { + return snapshotData_.GetData(); + } + + void StoreHClassInfo(JSHandle info) + { + snapshotData_.StoreHClassInfo(info); + } + + void StoreArrayInfo(JSHandle info) + { + snapshotData_.StoreArrayInfo(info); + } + + void StoreConstantIndexInfo(JSHandle info) + { + snapshotData_.StoreConstantIndexInfo(info); + } + + void InitSnapshot(uint32_t compileFilesCount); + + void StoreConstantPoolInfo(BytecodeInfoCollector *bcInfoCollector); + + void ResolveSnapshotData(const CMap, uint32_t> &methodToEntryIndexMap) + { + snapshotData_.ResolveSnapshotData(thread_, methodToEntryIndexMap); + } + +private: + JSHandle NewSnapshotConstantPool(uint32_t cacheSize); + + void GenerateSnapshotConstantPools(const CMap &allConstantPools, const CString &fileName); + + EcmaVM *vm_ {nullptr}; + ObjectFactory *factory_ {nullptr}; + JSThread *thread_ {nullptr}; + SnapshotGlobalData snapshotData_ {}; +}; +} // panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_AOT_SNAPSHOT_AOT_SNAPSHOT_H diff --git a/ecmascript/compiler/aot_snapshot/aot_snapshot_constants.h b/ecmascript/compiler/aot_snapshot/aot_snapshot_constants.h new file mode 100644 index 0000000000000000000000000000000000000000..9bef3512e717bc34b8fa271cd09fe09b6397d380 --- /dev/null +++ b/ecmascript/compiler/aot_snapshot/aot_snapshot_constants.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_AOT_SNAPSHOT_AOT_SNAPSHOT_CONSTANTS_H +#define ECMASCRIPT_COMPILER_AOT_SNAPSHOT_AOT_SNAPSHOT_CONSTANTS_H + +#include "ecmascript/mem/mem_common.h" + +namespace panda::ecmascript { +class AOTSnapshotConstants final { +public: + static constexpr uint8_t SNAPSHOT_DATA_ITEM_SIZE = 2; + static constexpr uint8_t SNAPSHOT_CP_ARRAY_ITEM_SIZE = 2; + +private: + AOTSnapshotConstants() = default; + ~AOTSnapshotConstants() = default; +}; +} // panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_AOT_SNAPSHOT_AOT_SNAPSHOT_CONSTANTS_H diff --git a/ecmascript/compiler/aot_snapshot/snapshot_constantpool_data.cpp b/ecmascript/compiler/aot_snapshot/snapshot_constantpool_data.cpp new file mode 100644 index 0000000000000000000000000000000000000000..98f9a69d633d6037028c8546894a228e1658d1d8 --- /dev/null +++ b/ecmascript/compiler/aot_snapshot/snapshot_constantpool_data.cpp @@ -0,0 +1,308 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/aot_snapshot/snapshot_constantpool_data.h" + +#include "ecmascript/compiler/pgo_type/pgo_type_location.h" +#include "ecmascript/compiler/pgo_type/pgo_type_manager.h" +#include "ecmascript/jspandafile/program_object.h" +#include "ecmascript/ts_types/global_type_info.h" +#include "ecmascript/global_env_constants-inl.h" + +namespace panda::ecmascript::kungfu { +uint64_t BaseSnapshotInfo::GetItemKey(uint32_t constantPoolId, uint32_t constantPoolIdx) +{ + uint64_t result = constantPoolId; + result = result << CONSTPOOL_MASK; + result |= constantPoolIdx; + return result; +} + +void BaseSnapshotInfo::Record(ItemData &data) +{ + ItemKey key = GetItemKey(data.constantPoolId_, data.constantPoolIdx_); + info_.emplace(key, data); +} + +void BaseSnapshotInfo::CollectLiteralInfo(EcmaVM *vm, JSHandle array, uint32_t constantPoolIndex, + JSHandle snapshotConstantPool, + const std::set &skippedMethods, + JSHandle ihc, JSHandle chc) +{ + JSThread *thread = vm->GetJSThread(); + ObjectFactory *factory = vm->GetFactory(); + JSMutableHandle valueHandle(thread, JSTaggedValue::Undefined()); + uint32_t len = array->GetLength(); + std::vector methodOffsetVec; + for (uint32_t i = 0; i < len; i++) { + valueHandle.Update(array->Get(i)); + if (valueHandle->IsJSFunction()) { + auto methodOffset = JSHandle(valueHandle)->GetCallTarget()->GetMethodId().GetOffset(); + if (skippedMethods.find(methodOffset) != skippedMethods.end()) { + methodOffsetVec.emplace_back(AOTLiteralInfo::NO_FUNC_ENTRY_VALUE); + } else { + methodOffsetVec.emplace_back(methodOffset); + } + } + } + + uint32_t methodSize = methodOffsetVec.size(); + JSHandle aotLiteralInfo = factory->NewAOTLiteralInfo(methodSize); + for (uint32_t i = 0; i < methodSize; ++i) { + auto methodOffset = methodOffsetVec[i]; + aotLiteralInfo->SetObjectToCache(thread, i, JSTaggedValue(methodOffset)); + } + + if (!ihc->IsUndefined()) { + aotLiteralInfo->SetIhc(ihc.GetTaggedValue()); + } + + if (!chc->IsUndefined()) { + aotLiteralInfo->SetChc(chc.GetTaggedValue()); + } + + snapshotConstantPool->SetObjectToCache(thread, constantPoolIndex, aotLiteralInfo.GetTaggedValue()); +} + +void StringSnapshotInfo::StoreDataToGlobalData(EcmaVM *vm, const JSPandaFile *jsPandaFile, + SnapshotGlobalData &globalData, const std::set&) +{ + JSThread *thread = vm->GetJSThread(); + for (auto item : info_) { + const ItemData &data = item.second; + JSTaggedValue cp = thread->GetCurrentEcmaContext()->FindConstpool(jsPandaFile, data.constantPoolId_); + JSTaggedValue str = ConstantPool::GetStringFromCache(thread, cp, data.constantPoolIdx_); + + uint32_t snapshotCpArrIdx = globalData.GetCpArrIdxByConstanPoolId(data.constantPoolId_); + JSHandle snapshotCpArr(thread, globalData.GetCurSnapshotCpArray()); + JSHandle snapshotCp(thread, snapshotCpArr->Get(snapshotCpArrIdx)); + snapshotCp->SetObjectToCache(thread, data.constantPoolIdx_, str); + } +} + +void MethodSnapshotInfo::StoreDataToGlobalData(EcmaVM *vm, const JSPandaFile *jsPandaFile, + SnapshotGlobalData &globalData, + const std::set &skippedMethods) +{ + JSThread *thread = vm->GetJSThread(); + for (auto item : info_) { + const ItemData &data = item.second; + JSHandle cp(thread, + thread->GetCurrentEcmaContext()->FindConstpool(jsPandaFile, data.constantPoolId_)); + uint32_t methodOffset = cp->GetEntityId(data.constantPoolIdx_).GetOffset(); + + uint32_t snapshotCpArrIdx = globalData.GetCpArrIdxByConstanPoolId(data.constantPoolId_); + JSHandle snapshotCpArr(thread, globalData.GetCurSnapshotCpArray()); + JSHandle snapshotCp(thread, snapshotCpArr->Get(snapshotCpArrIdx)); + if (skippedMethods.find(methodOffset) == skippedMethods.end()) { + globalData.RecordReviseData(SnapshotReviseInfo::Type::METHOD, + BaseReviseData::ItemData {globalData.GetCurDataIdx(), snapshotCpArrIdx, data.constantPoolIdx_}); + snapshotCp->SetObjectToCache(thread, data.constantPoolIdx_, JSTaggedValue(methodOffset)); + } + } +} + +void ClassLiteralSnapshotInfo::StoreDataToGlobalData(EcmaVM *vm, const JSPandaFile *jsPandaFile, + SnapshotGlobalData &globalData, + const std::set &skippedMethods) +{ + JSThread *thread = vm->GetJSThread(); + PGOTypeManager *ptManager = thread->GetCurrentEcmaContext()->GetPTManager(); + for (auto item : info_) { + const ItemData &data = item.second; + JSHandle cp(thread, + thread->GetCurrentEcmaContext()->FindConstpool(jsPandaFile, data.constantPoolId_)); + auto literalObj = ConstantPool::GetClassLiteralFromCache(thread, cp, data.constantPoolIdx_, data.recordName_); + JSHandle classLiteral(thread, literalObj); + JSHandle arrayHandle(thread, classLiteral->GetArray()); + + uint32_t snapshotCpArrIdx = globalData.GetCpArrIdxByConstanPoolId(data.constantPoolId_); + JSHandle snapshotCpArr(thread, globalData.GetCurSnapshotCpArray()); + JSHandle snapshotCp(thread, snapshotCpArr->Get(snapshotCpArrIdx)); + + PGOTypeLocation loc(jsPandaFile, data.methodOffset_, data.bcIndex_); + PGOTypeLocation ctorLoc = loc.ChangeType(PGOTypeLocation::Type::CONSTRUCTOR); + ProfileType pt = ptManager->GetRootIdByLocation(loc); + ProfileType ctorPt = ptManager->GetRootIdByLocation(ctorLoc); + JSHandle ihc = JSHandle(thread, ptManager->QueryHClass(pt, pt)); + JSHandle chc = JSHandle(thread, ptManager->QueryHClass(ctorPt, ctorPt)); + + CollectLiteralInfo(vm, arrayHandle, data.constantPoolIdx_, snapshotCp, skippedMethods, ihc, chc); + globalData.RecordReviseData(SnapshotReviseInfo::Type::LITERAL, + BaseReviseData::ItemData {globalData.GetCurDataIdx(), snapshotCpArrIdx, data.constantPoolIdx_}); + } +} + + +void ObjectLiteralSnapshotInfo::StoreDataToGlobalData(EcmaVM *vm, const JSPandaFile *jsPandaFile, + SnapshotGlobalData &globalData, + const std::set &skippedMethods) +{ + JSThread *thread = vm->GetJSThread(); + PGOTypeManager *ptManager = thread->GetCurrentEcmaContext()->GetPTManager(); + for (auto item : info_) { + const ItemData &data = item.second; + JSHandle cp(thread, + thread->GetCurrentEcmaContext()->FindConstpool(jsPandaFile, data.constantPoolId_)); + panda_file::File::EntityId id = cp->GetEntityId(data.constantPoolIdx_); + JSMutableHandle elements(thread, JSTaggedValue::Undefined()); + JSMutableHandle properties(thread, JSTaggedValue::Undefined()); + LiteralDataExtractor::ExtractObjectDatas(thread, jsPandaFile, id, elements, + properties, cp, data.recordName_); + + uint32_t snapshotCpArrIdx = globalData.GetCpArrIdxByConstanPoolId(data.constantPoolId_); + JSHandle snapshotCpArr(thread, globalData.GetCurSnapshotCpArray()); + JSHandle snapshotCp(thread, snapshotCpArr->Get(snapshotCpArrIdx)); + + PGOTypeLocation loc(jsPandaFile, data.methodOffset_, data.bcIndex_); + PGOTypeLocation ctorLoc = loc.ChangeType(PGOTypeLocation::Type::CONSTRUCTOR); + ProfileType pt = ptManager->GetRootIdByLocation(loc); + ProfileType ctorPt = ptManager->GetRootIdByLocation(ctorLoc); + JSHandle ihc = JSHandle(thread, ptManager->QueryHClass(pt, pt)); + JSHandle chc = JSHandle(thread, ptManager->QueryHClass(ctorPt, ctorPt)); + + CollectLiteralInfo(vm, properties, data.constantPoolIdx_, snapshotCp, skippedMethods, ihc, chc); + globalData.RecordReviseData(SnapshotReviseInfo::Type::LITERAL, + BaseReviseData::ItemData {globalData.GetCurDataIdx(), snapshotCpArrIdx, data.constantPoolIdx_}); + } +} + +void ArrayLiteralSnapshotInfo::StoreDataToGlobalData(EcmaVM *vm, const JSPandaFile *jsPandaFile, + SnapshotGlobalData &globalData, + const std::set &skippedMethods) +{ + JSThread *thread = vm->GetJSThread(); + for (auto item : info_) { + const ItemData &data = item.second; + JSHandle cp(thread, + thread->GetCurrentEcmaContext()->FindConstpool(jsPandaFile, data.constantPoolId_)); + panda_file::File::EntityId id = cp->GetEntityId(data.constantPoolIdx_); + JSHandle literal = LiteralDataExtractor::GetDatasIgnoreType( + thread, jsPandaFile, id, cp, data.recordName_); + + uint32_t snapshotCpArrIdx = globalData.GetCpArrIdxByConstanPoolId(data.constantPoolId_); + JSHandle snapshotCpArr(thread, globalData.GetCurSnapshotCpArray()); + JSHandle snapshotCp(thread, snapshotCpArr->Get(snapshotCpArrIdx)); + JSHandle ihc = thread->GlobalConstants()->GetHandledUndefined(); + JSHandle chc = thread->GlobalConstants()->GetHandledUndefined(); + CollectLiteralInfo(vm, literal, data.constantPoolIdx_, snapshotCp, skippedMethods, ihc, chc); + globalData.RecordReviseData(SnapshotReviseInfo::Type::LITERAL, + BaseReviseData::ItemData {globalData.GetCurDataIdx(), snapshotCpArrIdx, data.constantPoolIdx_}); + } +} + +void SnapshotConstantPoolData::Record(const BytecodeInstruction &bcIns, int32_t bcIndex, + const CString &recordName, const MethodLiteral *method) +{ + BytecodeInstruction::Opcode opcode = static_cast(bcIns.GetOpcode()); + uint32_t methodOffset = method->GetMethodId().GetOffset(); + panda_file::IndexAccessor indexAccessor(*jsPandaFile_->GetPandaFile(), + panda_file::File::EntityId(methodOffset)); + uint32_t constantPoolId = static_cast(indexAccessor.GetHeaderIndex()); + + switch (opcode) { + case BytecodeInstruction::Opcode::LDA_STR_ID16: + case BytecodeInstruction::Opcode::STOWNBYNAME_IMM8_ID16_V8: + case BytecodeInstruction::Opcode::STOWNBYNAME_IMM16_ID16_V8: + case BytecodeInstruction::Opcode::CREATEREGEXPWITHLITERAL_IMM8_ID16_IMM8: + case BytecodeInstruction::Opcode::CREATEREGEXPWITHLITERAL_IMM16_ID16_IMM8: + case BytecodeInstruction::Opcode::STCONSTTOGLOBALRECORD_IMM16_ID16: + case BytecodeInstruction::Opcode::TRYLDGLOBALBYNAME_IMM8_ID16: + case BytecodeInstruction::Opcode::TRYLDGLOBALBYNAME_IMM16_ID16: + case BytecodeInstruction::Opcode::TRYSTGLOBALBYNAME_IMM8_ID16: + case BytecodeInstruction::Opcode::TRYSTGLOBALBYNAME_IMM16_ID16: + case BytecodeInstruction::Opcode::STTOGLOBALRECORD_IMM16_ID16: + case BytecodeInstruction::Opcode::STOWNBYNAMEWITHNAMESET_IMM8_ID16_V8: + case BytecodeInstruction::Opcode::STOWNBYNAMEWITHNAMESET_IMM16_ID16_V8: + case BytecodeInstruction::Opcode::LDTHISBYNAME_IMM8_ID16: + case BytecodeInstruction::Opcode::LDTHISBYNAME_IMM16_ID16: + case BytecodeInstruction::Opcode::STTHISBYNAME_IMM8_ID16: + case BytecodeInstruction::Opcode::STTHISBYNAME_IMM16_ID16: + case BytecodeInstruction::Opcode::LDGLOBALVAR_IMM16_ID16: + case BytecodeInstruction::Opcode::LDOBJBYNAME_IMM8_ID16: + case BytecodeInstruction::Opcode::LDOBJBYNAME_IMM16_ID16: + case BytecodeInstruction::Opcode::STOBJBYNAME_IMM8_ID16_V8: + case BytecodeInstruction::Opcode::STOBJBYNAME_IMM16_ID16_V8: + case BytecodeInstruction::Opcode::LDSUPERBYNAME_IMM8_ID16: + case BytecodeInstruction::Opcode::LDSUPERBYNAME_IMM16_ID16: + case BytecodeInstruction::Opcode::STSUPERBYNAME_IMM8_ID16_V8: + case BytecodeInstruction::Opcode::STSUPERBYNAME_IMM16_ID16_V8: + case BytecodeInstruction::Opcode::STGLOBALVAR_IMM16_ID16: + case BytecodeInstruction::Opcode::LDBIGINT_ID16: { + auto constantPoolIdx = bcIns.GetId().AsRawValue(); + BaseSnapshotInfo::ItemData itemData = {recordName, constantPoolId, constantPoolIdx, methodOffset, bcIndex}; + RecordInfo(Type::STRING, itemData); + break; + } + case BytecodeInstruction::Opcode::DEFINEFUNC_IMM8_ID16_IMM8: + case BytecodeInstruction::Opcode::DEFINEFUNC_IMM16_ID16_IMM8: + case BytecodeInstruction::Opcode::DEFINEMETHOD_IMM8_ID16_IMM8: + case BytecodeInstruction::Opcode::DEFINEMETHOD_IMM16_ID16_IMM8: { + auto constantPoolIdx = bcIns.GetId().AsRawValue(); + BaseSnapshotInfo::ItemData itemData = {recordName, constantPoolId, constantPoolIdx, methodOffset, bcIndex}; + RecordInfo(Type::METHOD, itemData); + break; + } + case BytecodeInstruction::Opcode::CREATEOBJECTWITHBUFFER_IMM8_ID16: + case BytecodeInstruction::Opcode::CREATEOBJECTWITHBUFFER_IMM16_ID16: { + auto constantPoolIdx = bcIns.GetId().AsRawValue(); + BaseSnapshotInfo::ItemData itemData = {recordName, constantPoolId, constantPoolIdx, methodOffset, bcIndex}; + RecordInfo(Type::OBJECT_LITERAL, itemData); + break; + } + case BytecodeInstruction::Opcode::CREATEARRAYWITHBUFFER_IMM8_ID16: + case BytecodeInstruction::Opcode::CREATEARRAYWITHBUFFER_IMM16_ID16: { + auto constantPoolIdx = bcIns.GetId().AsRawValue(); + BaseSnapshotInfo::ItemData itemData = {recordName, constantPoolId, constantPoolIdx, methodOffset, bcIndex}; + RecordInfo(Type::ARRAY_LITERAL, itemData); + break; + } + case BytecodeInstruction::Opcode::DEFINECLASSWITHBUFFER_IMM8_ID16_ID16_IMM16_V8: { + auto methodCPIdx = (bcIns.GetId ()).AsRawValue(); + BaseSnapshotInfo::ItemData methodItemData = {recordName, constantPoolId, + methodCPIdx, methodOffset, bcIndex}; + RecordInfo(Type::METHOD, methodItemData); + + auto literalCPIdx = (bcIns.GetId ()).AsRawValue(); + BaseSnapshotInfo::ItemData literalItemData = {recordName, constantPoolId, + literalCPIdx, methodOffset, bcIndex}; + RecordInfo(Type::CLASS_LITERAL, literalItemData); + break; + } + case BytecodeInstruction::Opcode::DEFINECLASSWITHBUFFER_IMM16_ID16_ID16_IMM16_V8: { + auto methodCPIdx = (bcIns.GetId ()).AsRawValue(); + BaseSnapshotInfo::ItemData methodItemData = {recordName, constantPoolId, + methodCPIdx, methodOffset, bcIndex}; + RecordInfo(Type::METHOD, methodItemData); + + auto literalCPIdx = (bcIns.GetId ()).AsRawValue(); + BaseSnapshotInfo::ItemData literalItemData = {recordName, constantPoolId, + literalCPIdx, methodOffset, bcIndex}; + RecordInfo(Type::CLASS_LITERAL, literalItemData); + break; + } + default: + break; + } +} + +void SnapshotConstantPoolData::StoreDataToGlobalData(SnapshotGlobalData &snapshotData, + const std::set &skippedMethods) const +{ + for (auto &info : infos_) { + info->StoreDataToGlobalData(vm_, jsPandaFile_, snapshotData, skippedMethods); + } +} +} // namespace panda::ecmascript diff --git a/ecmascript/compiler/aot_snapshot/snapshot_constantpool_data.h b/ecmascript/compiler/aot_snapshot/snapshot_constantpool_data.h new file mode 100644 index 0000000000000000000000000000000000000000..67b46a5d28c06c9966d385bbc284342e4a87813c --- /dev/null +++ b/ecmascript/compiler/aot_snapshot/snapshot_constantpool_data.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ECMASCRIPT_COMPILER_AOT_SNAPSHOT_AOT_SNAPSHOT_DATA_H +#define ECMASCRIPT_COMPILER_AOT_SNAPSHOT_AOT_SNAPSHOT_DATA_H + +#include "ecmascript/compiler/aot_snapshot/snapshot_global_data.h" +#include "ecmascript/jspandafile/js_pandafile.h" +#include "ecmascript/mem/c_containers.h" + +namespace panda::ecmascript::kungfu { +#define DATA_TYPE_LIST(V) \ + V(STRING, StringSnapshot) \ + V(METHOD, MethodSnapshot) \ + V(CLASS_LITERAL, ClassLiteralSnapshot) \ + V(OBJECT_LITERAL, ObjectLiteralSnapshot) \ + V(ARRAY_LITERAL, ArrayLiteralSnapshot) + +class BaseSnapshotInfo { +public: + struct ItemData { + CString recordName_; + int32_t constantPoolId_ {0}; + uint32_t constantPoolIdx_ {0}; + uint32_t methodOffset_ {0}; + uint32_t bcIndex_ {0}; + }; + + BaseSnapshotInfo() = default; + virtual ~BaseSnapshotInfo() = default; + + virtual void StoreDataToGlobalData(EcmaVM *vm, const JSPandaFile *jsPandaFile, + SnapshotGlobalData &globalData, const std::set &skippedMethods) = 0; + + void Record(ItemData &data); + +protected: + using ItemKey = uint64_t; + + static constexpr uint32_t CONSTPOOL_MASK = 32; + + static ItemKey GetItemKey(uint32_t constantPoolId, uint32_t constantPoolIdx); + + void CollectLiteralInfo(EcmaVM *vm, JSHandle array, uint32_t constantPoolIndex, + JSHandle snapshotConstantPool, const std::set &skippedMethods, + JSHandle ihc, JSHandle chc); + + CUnorderedMap info_; +}; + +#define DEFINE_INFO_CLASS(V, name) \ + class name##Info final : public BaseSnapshotInfo { \ + public: \ + virtual void StoreDataToGlobalData(EcmaVM *vm, const JSPandaFile *jsPandaFile, \ + SnapshotGlobalData &globalData, const std::set &skippedMethods) override; \ + }; + + DATA_TYPE_LIST(DEFINE_INFO_CLASS) +#undef DEFINE_INFO_CLASS + +class SnapshotConstantPoolData { +public: + SnapshotConstantPoolData(EcmaVM *vm, const JSPandaFile *jsPandaFile) + : vm_(vm), jsPandaFile_(jsPandaFile) + { +#define ADD_INFO(V, name) \ + infos_.emplace_back(std::make_unique()); + DATA_TYPE_LIST(ADD_INFO) +#undef ADD_INFO + } + ~SnapshotConstantPoolData() = default; + + void Record(const BytecodeInstruction &bcIns, int32_t bcIndex, + const CString &recordName, const MethodLiteral *method); + + void StoreDataToGlobalData(SnapshotGlobalData &snapshotData, const std::set &skippedMethods) const; + +private: + enum class Type { +#define DEFINE_TYPE(type, ...) type, + DATA_TYPE_LIST(DEFINE_TYPE) +#undef DEFINE_TYPE + }; + + void RecordInfo(Type type, BaseSnapshotInfo::ItemData &itemData) + { + size_t infoIdx = static_cast(type); + infos_.at(infoIdx)->Record(itemData); + } + + EcmaVM *vm_; + const JSPandaFile *jsPandaFile_; + CVector> infos_ {}; +}; +#undef DATA_TYPE_LIST +} // panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_AOT_SNAPSHOT_AOT_SNAPSHOT_DATA_H + diff --git a/ecmascript/compiler/aot_snapshot/snapshot_global_data.cpp b/ecmascript/compiler/aot_snapshot/snapshot_global_data.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b0b6206cfc521101065058c990640a10c0b0ce3f --- /dev/null +++ b/ecmascript/compiler/aot_snapshot/snapshot_global_data.cpp @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/aot_snapshot/snapshot_global_data.h" + +#include "ecmascript/compiler/aot_snapshot/aot_snapshot_constants.h" +#include "ecmascript/jspandafile/program_object.h" +#include "ecmascript/tagged_array-inl.h" + +namespace panda::ecmascript::kungfu { +JSHandle BaseReviseData::GetConstantPoolFromSnapshotData(JSThread *thread, + const SnapshotGlobalData *globalData, + uint32_t dataIdx, uint32_t cpArrayIdx) +{ + JSHandle data(thread, globalData->GetData()); + JSHandle cpArr(thread, data->Get(dataIdx + SnapshotGlobalData::CP_ARRAY_OFFSET)); + return JSHandle(thread, cpArr->Get(cpArrayIdx)); +} + +void MethodReviseData::Resolve(JSThread *thread, const SnapshotGlobalData *globalData, + const CMap, uint32_t> &methodToEntryIndexMap) +{ + for (auto &item: data_) { + JSHandle newCP = GetConstantPoolFromSnapshotData(thread, globalData, + item.dataIdx_, item.cpArrayIdx_); + JSTaggedValue val = newCP->GetObjectFromCache(item.constpoolIdx_); + uint32_t methodOffset = static_cast(val.GetInt()); + if (thread->GetEcmaVM()->GetJSOptions().IsEnableCompilerLogSnapshot()) { + LOG_COMPILER(INFO) << "[aot-snapshot] store AOT entry index of method (offset: " << methodOffset << ") "; + } + std::string name = globalData->GetFileNameByDataIdx(item.dataIdx_).c_str(); + AnFileInfo::FuncEntryIndexKey key = std::make_pair(name, methodOffset); + if (methodToEntryIndexMap.find(key) != methodToEntryIndexMap.end()) { + uint32_t entryIndex = methodToEntryIndexMap.at(key); + newCP->SetObjectToCache(thread, item.constpoolIdx_, JSTaggedValue(entryIndex)); + } + } +} + +void LiteralReviseData::Resolve(JSThread *thread, const SnapshotGlobalData *globalData, + const CMap, uint32_t> &methodToEntryIndexMap) +{ + for (auto &item: data_) { + JSHandle newCP = GetConstantPoolFromSnapshotData(thread, globalData, + item.dataIdx_, item.cpArrayIdx_); + + JSTaggedValue val = newCP->GetObjectFromCache(item.constpoolIdx_); + AOTLiteralInfo *aotLiteralInfo = AOTLiteralInfo::Cast(val.GetTaggedObject()); + uint32_t aotLiteralInfoLen = aotLiteralInfo->GetCacheLength(); + std::string name = globalData->GetFileNameByDataIdx(item.dataIdx_).c_str(); + for (uint32_t i = 0; i < aotLiteralInfoLen; ++i) { + JSTaggedValue methodOffsetVal = aotLiteralInfo->GetObjectFromCache(i); + if (methodOffsetVal.GetInt() == -1) { + continue; + } + uint32_t methodOffset = static_cast(methodOffsetVal.GetInt()); + if (thread->GetEcmaVM()->GetJSOptions().IsEnableCompilerLogSnapshot()) { + LOG_COMPILER(INFO) << "[aot-snapshot] store AOT entry index of method (offset: " + << methodOffset << ") "; + } + AnFileInfo::FuncEntryIndexKey key = std::make_pair(name, methodOffset); + uint32_t entryIndex = methodToEntryIndexMap.at(key); + aotLiteralInfo->SetObjectToCache(thread, i, JSTaggedValue(entryIndex)); + } + } +} + +void SnapshotGlobalData::AddSnapshotCpArrayToData(JSThread *thread, CString fileName, + JSHandle snapshotCpArray) +{ + if (isFirstData_) { + isFirstData_ = false; + } else { + curDataIdx_ += AOTSnapshotConstants::SNAPSHOT_DATA_ITEM_SIZE; + } + JSHandle nameStr = thread->GetEcmaVM()->GetFactory()->NewFromStdString(fileName.c_str()); + JSHandle dataHandle(thread, data_); + dataHandle->Set(thread, curDataIdx_, nameStr); + curSnapshotCpArray_ = snapshotCpArray.GetTaggedValue(); + dataHandle->Set(thread, curDataIdx_ + CP_ARRAY_OFFSET, curSnapshotCpArray_); + dataIdxToFileNameMap_[curDataIdx_] = fileName; +} + +CString SnapshotGlobalData::GetFileNameByDataIdx(uint32_t dataIdx) const +{ + auto it = dataIdxToFileNameMap_.find(dataIdx); + if (it != dataIdxToFileNameMap_.end()) { + return it->second; + } + LOG_COMPILER(FATAL) << "Can't find snapshot data by index '" << dataIdx << "'"; + UNREACHABLE(); +} +} // namespace panda::ecmascript diff --git a/ecmascript/compiler/aot_snapshot/snapshot_global_data.h b/ecmascript/compiler/aot_snapshot/snapshot_global_data.h new file mode 100644 index 0000000000000000000000000000000000000000..48df77576bf3a114fe834f36909cf9ee9877d3ec --- /dev/null +++ b/ecmascript/compiler/aot_snapshot/snapshot_global_data.h @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef ECMASCRIPT_COMPILER_AOT_SNAPSHOT_SNAPSHOT_GLOBAL_DATA_H +#define ECMASCRIPT_COMPILER_AOT_SNAPSHOT_SNAPSHOT_GLOBAL_DATA_H + +#include "ecmascript/ecma_vm.h" +#include "ecmascript/object_factory.h" +#include "ecmascript/compiler/aot_snapshot/aot_snapshot_constants.h" + +namespace panda::ecmascript::kungfu { +class SnapshotGlobalData; +/* + * The information that needs to be revised before saving the 'ai' file is recorded in SnapshotReviseData. + * Currently, the revised information includes the entry index of each method in the 'an' file. + */ +#define REVISE_DATA_TYPE_LIST(V) \ + V(METHOD, Method) \ + V(LITERAL, Literal) + +class BaseReviseData { +public: + struct ItemData { + uint32_t dataIdx_; + uint32_t cpArrayIdx_; + int32_t constpoolIdx_; + }; + + BaseReviseData() = default; + virtual ~BaseReviseData() = default; + + void Record(ItemData data) + { + data_.emplace_back(data); + } + + virtual void Resolve(JSThread *thread, const SnapshotGlobalData *globalData, + const CMap, uint32_t> &methodToEntryIndexMap) = 0; + +protected: + JSHandle GetConstantPoolFromSnapshotData(JSThread *thread, const SnapshotGlobalData *globalData, + uint32_t dataIdx, uint32_t cpArrayIdx); + + using ReviseData = std::vector; + ReviseData data_ {}; +}; + +#define DEFINE_REVISE_CLASS(V, name) \ + class name##ReviseData final : public BaseReviseData { \ + public: \ + virtual void Resolve(JSThread *thread, const SnapshotGlobalData *globalData, \ + const CMap, uint32_t> &methodToEntryIndexMap) override; \ + }; + + REVISE_DATA_TYPE_LIST(DEFINE_REVISE_CLASS) +#undef DEFINE_REVISE_CLASS + +class SnapshotReviseInfo { +public: + enum class Type { +#define DEFINE_TYPE(type, ...) type, + REVISE_DATA_TYPE_LIST(DEFINE_TYPE) +#undef DEFINE_TYPE + }; + + SnapshotReviseInfo() + { +#define ADD_REVISE_DATA(V, name) \ + reviseData_.emplace_back(std::make_unique()); + REVISE_DATA_TYPE_LIST(ADD_REVISE_DATA) +#undef ADD_REVISE_DATA + } + ~SnapshotReviseInfo() = default; + + void Record(Type type, BaseReviseData::ItemData data) + { + size_t reviseDataIdx = static_cast(type); + reviseData_.at(reviseDataIdx)->Record(data); + } + + void ResolveData(JSThread *thread, const SnapshotGlobalData *globalData, + const CMap, uint32_t> &methodToEntryIndexMap) + { + for (auto &data : reviseData_) { + data->Resolve(thread, globalData, methodToEntryIndexMap); + } + } +private: + CVector> reviseData_ {}; +}; +#undef REVISE_DATA_TYPE_LIST + +class SnapshotGlobalData { +public: + static constexpr uint32_t CP_ARRAY_OFFSET = 1; + + SnapshotGlobalData() = default; + ~SnapshotGlobalData() = default; + + void Iterate(const RootVisitor &v) + { + v(Root::ROOT_VM, ObjectSlot(reinterpret_cast(&data_))); + v(Root::ROOT_VM, ObjectSlot(reinterpret_cast(&curSnapshotCpArray_))); + v(Root::ROOT_VM, ObjectSlot(reinterpret_cast(&hclassInfo_))); + v(Root::ROOT_VM, ObjectSlot(reinterpret_cast(&arrayInfo_))); + v(Root::ROOT_VM, ObjectSlot(reinterpret_cast(&constantIndexInfo_))); + } + + void SetData(JSTaggedValue data) + { + data_ = data; + } + + JSTaggedValue GetData() const + { + return data_; + } + + uint32_t GetCurDataIdx() const + { + return curDataIdx_; + } + + JSTaggedValue GetCurSnapshotCpArray() const + { + return curSnapshotCpArray_; + } + + void AddSnapshotCpArrayToData(JSThread *thread, CString fileName, JSHandle snapshotCpArray); + + CString GetFileNameByDataIdx(uint32_t dataIdx) const; + + void RecordReviseData(SnapshotReviseInfo::Type type, BaseReviseData::ItemData data) + { + reviseInfo_.Record(type, data); + } + + void ResolveSnapshotData(JSThread *thread, + const CMap, uint32_t> &methodToEntryIndexMap) + { + reviseInfo_.ResolveData(thread, this, methodToEntryIndexMap); + } + + void RecordCpArrIdx(int32_t constantPoolId, uint32_t cpArrIdx) + { + dataIdxToCpArrIdxMap_[curDataIdx_][constantPoolId] = cpArrIdx; + } + + uint32_t GetCpArrIdxByConstanPoolId(int32_t constantPoolId) + { + return GetCpIdToCpArrIdxMap().at(constantPoolId); + } + + const CUnorderedMap& GetCpIdToCpArrIdxMap() + { + return dataIdxToCpArrIdxMap_.at(curDataIdx_); + } + + JSTaggedValue GetHClassInfo() + { + return hclassInfo_; + } + + JSTaggedValue GetArrayInfo() + { + return arrayInfo_; + } + + JSTaggedValue GetConstantIndexInfo() + { + return constantIndexInfo_; + } + + void StoreHClassInfo(JSHandle info) + { + hclassInfo_ = info.GetTaggedValue(); + } + + void StoreArrayInfo(JSHandle info) + { + arrayInfo_ = info.GetTaggedValue(); + } + + void StoreConstantIndexInfo(JSHandle info) + { + constantIndexInfo_ = info.GetTaggedValue(); + } + +private: + using CpIdToCpArrIdxMap = CUnorderedMap; + + bool isFirstData_ {true}; + uint32_t curDataIdx_ {0}; + JSTaggedValue data_ {JSTaggedValue::Hole()}; + JSTaggedValue curSnapshotCpArray_ {JSTaggedValue::Hole()}; + CUnorderedMap dataIdxToCpArrIdxMap_; + CUnorderedMap dataIdxToFileNameMap_ {}; + + SnapshotReviseInfo reviseInfo_; + JSTaggedValue hclassInfo_ {JSTaggedValue::Hole()}; + JSTaggedValue arrayInfo_ {JSTaggedValue::Hole()}; + JSTaggedValue constantIndexInfo_ {JSTaggedValue::Hole()}; +}; +} // panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_AOT_SNAPSHOT_SNAPSHOT_GLOBAL_DATA_H diff --git a/ecmascript/compiler/argument_accessor.cpp b/ecmascript/compiler/argument_accessor.cpp index 9835e50b47c0bb43f01decef4f8e26b78ee38eb4..d1544279a42930e126b5faa71c3d00d605afe35e 100644 --- a/ecmascript/compiler/argument_accessor.cpp +++ b/ecmascript/compiler/argument_accessor.cpp @@ -156,8 +156,14 @@ GateRef ArgumentAccessor::GetFrameArgsIn(GateRef gate, FrameArgIdx idx) ASSERT(gateAcc.GetOpCode(gate) == OpCode::JS_BYTECODE || gateAcc.GetOpCode(gate) == OpCode::FRAME_STATE); GateRef frameArgs = Circuit::NullGate(); if (gateAcc.GetOpCode(gate) == OpCode::JS_BYTECODE) { - frameArgs = gateAcc.GetFrameState(gate); - ASSERT(gateAcc.GetOpCode(frameArgs) == OpCode::FRAME_ARGS); + GateRef frameState = gateAcc.GetFrameState(gate); + OpCode op = gateAcc.GetOpCode(frameState); + if (op == OpCode::FRAME_STATE) { + frameArgs = gateAcc.GetValueIn(frameState, 0); // 0: frame args + } else { + ASSERT(op == OpCode::FRAME_ARGS); + frameArgs = frameState; + } } else { frameArgs = gateAcc.GetValueIn(gate, 0); // 0: frame args } diff --git a/ecmascript/compiler/array_bounds_check_elimination.cpp b/ecmascript/compiler/array_bounds_check_elimination.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4b33611dd4875e861be79b5cf84dfb32ad72d519 --- /dev/null +++ b/ecmascript/compiler/array_bounds_check_elimination.cpp @@ -0,0 +1,940 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/array_bounds_check_elimination.h" + +namespace panda::ecmascript::kungfu { +void ArrayBoundsCheckElimination::Run() +{ + bounds_.resize(circuit_->GetMaxGateId() + 1, nullptr); // 1: +1 for size + indexCheckInfo_.resize(circuit_->GetMaxGateId() + 1, nullptr); + graphLinearizer_.SetScheduleJSOpcode(); + graphLinearizer_.LinearizeGraph(); + + CalcBounds(graphLinearizer_.GetEntryRegion(), nullptr); + + if (IsLogEnabled()) { + LOG_COMPILER(INFO) << ""; + LOG_COMPILER(INFO) << "\033[34m" + << "====================" + << " After array bounds check elimination " + << "[" << GetMethodName() << "]" + << "====================" + << "\033[0m"; + circuit_->PrintAllGatesWithBytecode(); + LOG_COMPILER(INFO) << "\033[34m" << "========================= End ==========================" << "\033[0m"; + } +} + +/* + i_lower + c_lower <= x <= i_upper + c_upper + Initially, when nothing about the bounds is known yet, every instrution has the bounds: + MIN <= x <= MAX +*/ +ArrayBoundsCheckElimination::Bound::Bound() +{ + lower_ = INT_MIN; + upper_ = INT_MAX; + lowerGate_ = Circuit::NullGate(); + upperGate_ = Circuit::NullGate(); +} + +ArrayBoundsCheckElimination::Bound::Bound(int lower, GateRef lowerGate, int upper, GateRef upperGate) +{ + lower_ = lower; + upper_ = upper; + lowerGate_ = lowerGate; + upperGate_ = upperGate; +} + +ArrayBoundsCheckElimination::Bound::Bound(TypedBinOp op, GateRef gate, int constant) +{ + switch (op) { + case TypedBinOp::TYPED_EQ: + lower_ = constant; + lowerGate_ = gate; + upper_ = constant; + upperGate_ = gate; + break; + case TypedBinOp::TYPED_NOTEQ: + lower_ = INT_MIN; + lowerGate_ = Circuit::NullGate(); + upper_ = INT_MAX; + upperGate_ = Circuit::NullGate(); + if (gate == Circuit::NullGate()) { + if (constant == INT_MIN) { + lower_++; + } + if (constant == INT_MAX) { + upper_--; + } + } + break; + case TypedBinOp::TYPED_GREATEREQ: + lower_ = constant; + lowerGate_ = gate; + upper_ = INT_MAX; + upperGate_ = Circuit::NullGate(); + break; + case TypedBinOp::TYPED_LESSEQ: + lower_ = INT_MIN; + lowerGate_ = Circuit::NullGate(); + upper_ = constant; + upperGate_ = gate; + break; + default: + UNREACHABLE(); + } +} + +ArrayBoundsCheckElimination::Bound *ArrayBoundsCheckElimination::AndOp(Bound *bound, Bound *b) +{ + // Update lower bound + if (bound->lowerGate_ == b->lowerGate_) { + bound->lower_ = std::max(bound->lower_, b->lower_); + } + if (b->HasLower()) { + bool set = true; + if (bound->lowerGate_ != Circuit::NullGate() && b->lowerGate_ != Circuit::NullGate()) { + auto boundLowerGateRegion = graphLinearizer_.GateToRegion(bound->lowerGate_); + auto bLowerGateRegion = graphLinearizer_.GateToRegion(b->lowerGate_); + int32_t boundLowerDominatorDepth = -1; + if (boundLowerGateRegion) { + boundLowerDominatorDepth = boundLowerGateRegion->GetDepth(); + } + int32_t bLowerDominatorDepth = -1; + if (bLowerGateRegion) { + bLowerDominatorDepth = bLowerGateRegion->GetDepth(); + } + set = (boundLowerDominatorDepth > bLowerDominatorDepth); + } + if (set) { + bound->lower_ = b->lower_; + bound->lowerGate_ = b->lowerGate_; + } + } + + // Update upper bound + if (bound->upperGate_ == b->upperGate_) { + bound->upper_ = std::min(bound->upper_, b->upper_); + } + if (b->HasUpper()) { + bool set = true; + if (bound->upperGate_ != Circuit::NullGate() && b->upperGate_ != Circuit::NullGate()) { + auto boundUpperGateRegion = graphLinearizer_.GateToRegion(bound->upperGate_); + auto bUpperGateRegion = graphLinearizer_.GateToRegion(b->upperGate_); + int32_t boundUpperDominatorDepth = -1; + if (boundUpperGateRegion) { + boundUpperDominatorDepth = boundUpperGateRegion->GetDepth(); + } + int32_t bUpperDominatorDepth = -1; + if (bUpperGateRegion) { + bUpperDominatorDepth = bUpperGateRegion->GetDepth(); + } + set = (boundUpperDominatorDepth > bUpperDominatorDepth); + } + if (set) { + bound->upper_ = b->upper_; + bound->upperGate_ = b->upperGate_; + } + } + + return bound; +} + +ArrayBoundsCheckElimination::Bound *ArrayBoundsCheckElimination::OrOp(Bound *bound, Bound *b) +{ + // Update lower bound + if (bound->lowerGate_ != b->lowerGate_) { + bound->lowerGate_ = Circuit::NullGate(); + bound->lower_ = INT_MIN; + } else { + bound->lower_ = std::min(bound->lower_, b->lower_); + } + // Update upper bound + if (bound->upperGate_ != b->upperGate_) { + bound->upperGate_ = Circuit::NullGate(); + bound->upper_ = INT_MAX; + } else { + bound->upper_ = std::max(bound->upper_, b->upper_); + } + + return bound; +} + +ArrayBoundsCheckElimination::Bound *ArrayBoundsCheckElimination::DoConstant(GateRef gate) +{ + int constValue = static_cast(acc_.GetConstantValue(gate)); + return new Bound(constValue, Circuit::NullGate(), constValue, Circuit::NullGate()); +} + +ArrayBoundsCheckElimination::Bound *ArrayBoundsCheckElimination::DoArithmeticOp(GateRef gate) +{ + auto op = acc_.GetTypedBinaryOp(gate); + auto x = acc_.GetValueIn(gate, 0); + auto y = acc_.GetValueIn(gate, 1); + if (!acc_.IsConstant(x) || !acc_.IsConstant(y)) { // One of the operands must be non-constant! + if (op == TypedBinOp::TYPED_AND && (acc_.IsConstant(x) || acc_.IsConstant(y))) { + int constValue = 0; + if (acc_.IsConstant(x)) { + constValue = static_cast(acc_.GetConstantValue(x)); + } else { + constValue = static_cast(acc_.GetConstantValue(y)); + } + if (constValue >= 0) { + return new Bound(0, Circuit::NullGate(), constValue, Circuit::NullGate()); + } + } else if (op == TypedBinOp::TYPED_MOD) { + Bound *xBound = GetBound(x); + if (xBound->Lower() >= 0 && xBound->LowerGate() == Circuit::NullGate() && IsArrayLength(y)) { + return new Bound(0, Circuit::NullGate(), -1, y); + } else if (xBound->HasLower() && xBound->Lower() >= 0 && acc_.IsConstant(y) + && acc_.GetConstantValue(y) != 0) { + int constValue = static_cast(acc_.GetConstantValue(y)); + if (constValue != INT_MIN) { + return new Bound(0, Circuit::NullGate(), abs(constValue) - 1, Circuit::NullGate()); + } else { + return new Bound(); + } + } else { + return new Bound(); + } + } else if (((acc_.IsConstant(x) || acc_.IsConstant(y)) && op == TypedBinOp::TYPED_ADD) || + (acc_.IsConstant(y) && op == TypedBinOp::TYPED_SUB)) { + // x is constant, y is variable. + if (acc_.IsConstant(y)) { + std::swap(x, y); + } + + // Add, Constant now in x + int constValue = static_cast(acc_.GetConstantValue(x)); + if (op == TypedBinOp::TYPED_SUB) { + constValue = -constValue; + } + + Bound *bound = GetBound(y); + if (!bound->HasUpper() || !bound->HasLower()) { + return new Bound(); + } + + int lower = bound->Lower(); + int upper = bound->Upper(); + int newLower = lower + constValue; + int newUpper = upper + constValue; + bool overflow = ((constValue < 0 && (newLower > lower)) || + (constValue > 0 && (newUpper < upper))); + if (overflow) { + return new Bound(); + } else { + return new Bound(newLower, bound->LowerGate(), newUpper, bound->UpperGate()); + } + } else if (op == TypedBinOp::TYPED_SUB) { + Bound *bound = GetBound(x); + if (bound->LowerGate() == y) { + return new Bound(TypedBinOp::TYPED_GREATEREQ, Circuit::NullGate(), bound->Lower()); + } else { + return new Bound(); + } + } else { + return new Bound(); + } + } + return nullptr; +} + +bool ArrayBoundsCheckElimination::InLoop(GateRef loopHeader, GateRef gate) +{ + while (gate != acc_.GetStateRoot()) { + if (gate == loopHeader) { + return true; + } else { + gate = acc_.GetState(gate, 0); + } + } + return false; +} + +/* +Do phi +*/ +ArrayBoundsCheckElimination::Bound *ArrayBoundsCheckElimination::DoPhi(GateRef gate) +{ + Bound *bound = nullptr; + size_t valueSize = acc_.GetInValueCount(gate); + GateRef stateIn = acc_.GetState(gate); + bool isLoopHead = acc_.IsLoopHead(stateIn); + bool hasUpper = true; + bool hasLower = true; + for (size_t i = 0; i < valueSize; i++) { + GateRef value = acc_.GetValueIn(gate, i); + // Check if instruction is connected with phi itself + if (isLoopHead && acc_.GetOpCode(value) == OpCode::TYPED_UNARY_OP + && InLoop(stateIn, value)) { + auto unOp = acc_.GetTypedUnAccessor(value).GetTypedUnOp(); + switch (unOp) { + case TypedUnOp::TYPED_INC: + hasUpper = false; + break; + case TypedUnOp::TYPED_DEC: + hasLower = false; + break; + default: + break; + } + continue; + } + + Bound *vBound = GetBound(value); + Bound *curBound; + GateRef curGate; + int curConstant; + GetInstrAndConstValueFromOp(value, curGate, curConstant); + if (!vBound->HasUpper() || !vBound->HasLower()) { + curBound = new Bound(curConstant, curGate, curConstant, curGate); + } else { + curBound = vBound; + } + + if (curBound) { + if (!bound) { + bound = curBound->Copy(); + } else { + bound = OrOp(bound, curBound); + } + } else { + bound = new Bound(); + break; + } + } + + if (!hasUpper) { + bound->RemoveUpper(); + } + if (!hasLower) { + bound->RemoveLower(); + } + return bound; +} + +ArrayBoundsCheckElimination::Bound *ArrayBoundsCheckElimination::VisitGate(GateRef gate) +{ + OpCode op = acc_.GetOpCode(gate); + switch (op) { + case OpCode::CONSTANT: + return DoConstant(gate); + case OpCode::TYPED_BINARY_OP: + return DoArithmeticOp(gate); + case OpCode::VALUE_SELECTOR: + return DoPhi(gate); + default: + return nullptr; + } + return nullptr; +} + +// y = a + b - c ..... +void ArrayBoundsCheckElimination::GetInstrAndConstValueFromOp(GateRef gate, GateRef& instrValue, int& constValue) +{ + int base = 0; + constValue = 0; + instrValue = gate; + if (acc_.IsConstant(gate)) { + constValue = static_cast(acc_.GetConstantValue(gate)); + instrValue = Circuit::NullGate(); + } else { + while (acc_.GetOpCode(gate) == OpCode::TYPED_BINARY_OP) { + auto op = acc_.GetTypedBinaryOp(gate); + auto x = acc_.GetValueIn(gate, 0); + auto y = acc_.GetValueIn(gate, 1); + GateRef other = x; + if ((op == TypedBinOp::TYPED_ADD && (acc_.IsConstant(x) || acc_.IsConstant(y))) + || (op == TypedBinOp::TYPED_SUB && acc_.IsConstant(y))) { + int value = 0; + if (acc_.IsConstant(x)) { + value = static_cast(acc_.GetConstantValue(x)); + other = y; + } else { + value = static_cast(acc_.GetConstantValue(y)); + other = x; + } + + while (acc_.GetOpCode(other) == OpCode::INDEX_CHECK) { // Get IndexCheck Index + other = acc_.GetValueIn(other, 1); + } + + if (op == TypedBinOp::TYPED_SUB) { + value = -value; + } + + if (acc_.IsConstant(other)) { + base += value + static_cast(acc_.GetConstantValue(other)); + constValue = base; + instrValue = Circuit::NullGate(); + break ; + } else { + base += value; + constValue = base; + instrValue = other; + gate = other; + } + } else { + break; + } + } + } +} + +ArrayBoundsCheckElimination::Bound *ArrayBoundsCheckElimination::GetBound(GateRef gate) +{ + if (gate == Circuit::NullGate()) { + return nullptr; + } + if (!bounds_[acc_.GetId(gate)]) { + bounds_[acc_.GetId(gate)] = new BoundStack(chunk_); + Bound *bound = VisitGate(gate); + if (bound) { + bounds_[acc_.GetId(gate)]->push_back(bound); + } + if (bounds_[acc_.GetId(gate)]->size() == 0) { + bounds_[acc_.GetId(gate)]->push_back(new Bound()); + } + } else if (bounds_[acc_.GetId(gate)]->size() == 0) { + return new Bound(); + } + return bounds_[acc_.GetId(gate)]->back(); +} + +void ArrayBoundsCheckElimination::UpdateBound(IntegerStack &pushed, GateRef gate, Bound *bound) +{ + if (acc_.IsConstant(gate)) { + // No bound update for constants + return; + } + if (!bounds_[acc_.GetId(gate)]) { + GetBound(gate); + } + Bound* top = nullptr; + if (bounds_[acc_.GetId(gate)]->size() > 0) { + top = bounds_[acc_.GetId(gate)]->back(); + } + if (top) { + bound = AndOp(bound, top); + } + bounds_[acc_.GetId(gate)]->push_back(bound); + pushed.push_back(acc_.GetId(gate)); +} + +/* +x op y + constValue +for example: + x >= Circuit::NullGate() + 0 + x < Length + 0 +*/ +void ArrayBoundsCheckElimination::UpdateBound(IntegerStack &pushed, GateRef x, TypedBinOp op, + GateRef instrValue, int constValue) +{ + if (op == TypedBinOp::TYPED_GREATER) { // x < 3 -> x <= 4 + op = TypedBinOp::TYPED_GREATEREQ; + // Cannot Represent c > INT_MAX, do not update bounds + if (constValue == INT_MAX && instrValue == Circuit::NullGate()) { + return; + } else { + constValue++; + } + } else if (op == TypedBinOp::TYPED_LESS) { // x > 3 -> x >= 2 + op = TypedBinOp::TYPED_LESSEQ; + // Cannot Represent c < INT_MIN, do not update bounds + if (constValue == INT_MIN && instrValue == Circuit::NullGate()) { + return; + } else { + constValue--; + } + } + Bound *bound = new Bound(op, instrValue, constValue); + UpdateBound(pushed, x, bound); +} + +// Add if condition when x is a variable, x op y +void ArrayBoundsCheckElimination::AddIfCondition(IntegerStack &pushed, GateRef x, GateRef y, TypedBinOp op) +{ + if (acc_.IsConstant(x)) { // x must be non-constant! + return; + } + int constValue; + GateRef instrValue; + GetInstrAndConstValueFromOp(y, instrValue, constValue); + UpdateBound(pushed, x, op, instrValue, constValue); +} + +bool ArrayBoundsCheckElimination::IsArrayLength(GateRef gate) +{ + if (gate == Circuit::NullGate()) { + return false; + } + OpCode op = acc_.GetOpCode(gate); + switch (op) { + case OpCode::LOAD_ARRAY_LENGTH: + case OpCode::LOAD_TYPED_ARRAY_LENGTH: + return true; + default: + return false; + } + UNREACHABLE(); + return false; +} + +bool ArrayBoundsCheckElimination::InArrayBound(Bound *bound, GateRef length, GateRef array) +{ + if (!bound || array == Circuit::NullGate()) { + return false; + } + + if (bound->Lower() >= 0 && bound->LowerGate() == Circuit::NullGate() && + bound->Upper() < 0 && bound->UpperGate() != Circuit::NullGate()) { + if (length != Circuit::NullGate() && bound->UpperGate() == length) { + return true; + } + } + + return false; +} + +void ArrayBoundsCheckElimination::RemoveIndexCheck(GateRef gate) +{ + ASSERT(acc_.GetDependCount(gate) == 1); + ASSERT(acc_.GetStateCount(gate) == 1); + ASSERT(acc_.GetInValueCount(gate) == 2); // 2: ValueCount + + GateRef depend = acc_.GetDep(gate); + GateRef state = acc_.GetState(gate); + GateRef value = acc_.GetValueIn(gate, 1); // Index + + acc_.ReplaceGate(gate, state, depend, value); +} + +bool ArrayBoundsCheckElimination::CheckLoop(GateRef array, GateRef lowerGate, int lower, GateRef upperGate, int upper) +{ + if (IsArrayLength(upperGate) && acc_.GetValueIn(upperGate, 0) == array) { + if (upper >= 0) { + return false; + } + } + if (IsArrayLength(lowerGate) && acc_.GetValueIn(lowerGate, 0) == array) { + if (lower >= 0) { + return false; + } + } + return true; +} + +bool ArrayBoundsCheckElimination::LoopInvariant(GateRegion *loopHeader, GateRef gate) +{ + if (gate == Circuit::NullGate()) { + return true; + } + auto gateRegion = graphLinearizer_.GateToRegion(gate); + GateRegion* g = loopHeader->GetDominator(); + while (g != nullptr) { + if (g == gateRegion) { + return true; + } + if (g == g->GetDominator()) { // entry + break ; + } + g = g->GetDominator(); + } + return false; +} + +GateRef ArrayBoundsCheckElimination::Predicate(GateRef left, TypedBinOp cond, GateRef right) +{ + return builder_.InsertRangeCheckPredicate(left, cond, right); +} + +GateRef ArrayBoundsCheckElimination::PredicateCmpWithConst(GateRef left, TypedBinOp cond, int32_t right) +{ + GateRef constGate = builder_.Int32(right); + return Predicate(left, cond, constGate); +} + +GateRef ArrayBoundsCheckElimination::PredicateAdd(GateRef left, int32_t leftConst, TypedBinOp cond, GateRef right) +{ + GateRef constGate = builder_.Int32(leftConst); + GateRef binaryOpGate = builder_.InsertTypedBinaryop(left, constGate, GateType::NumberType(), + GateType::NumberType(), GateType::AnyType(), + PGOTypeRef::NoneType(), TypedBinOp::TYPED_ADD); + return Predicate(binaryOpGate, cond, right); +} + +GateRef ArrayBoundsCheckElimination::PredicateAddCmpWithConst(GateRef left, int32_t leftConst, + TypedBinOp cond, int32_t right) +{ + GateRef constGate = builder_.Int32(right); + return PredicateAdd(left, leftConst, cond, constGate); +} + +void ArrayBoundsCheckElimination::LoopInvariantMotionForIndexCheck(GateRef array, GateRef length, + GateRef lowerGate, int lower, + GateRef upperGate, int upper, + bool isTypedArray) +{ + // lower > 0 + if (lowerGate != Circuit::NullGate()) { + if (lower == 0) { + // lowerGate >= 0 + PredicateCmpWithConst(lowerGate, TypedBinOp::TYPED_GREATEREQ, 0); + } else if (lower > 0) { + // lowerGate + lower >= 0 + PredicateAddCmpWithConst(lowerGate, lower, TypedBinOp::TYPED_GREATEREQ, 0); + } else { + // lowerGate + lower < 0 + // lower < 0 + // lowerGate < -lower + lower++; + lower = -lower; + PredicateCmpWithConst(lowerGate, TypedBinOp::TYPED_GREATER, lower); + } + } + + // LOAD LENGTH if necessary + if (length == Circuit::NullGate()) { + length = builder_.InsertLoadArrayLength(array, isTypedArray); + } + + if (upperGate == Circuit::NullGate()) { + ASSERT(upper >= 0); + PredicateCmpWithConst(length, TypedBinOp::TYPED_GREATER, upper); + } else { + if (upper == 0) { + Predicate(upperGate, TypedBinOp::TYPED_LESS, length); + } else if (upper > 0) { + // upperGate + upper < length + PredicateAdd(upperGate, upper, TypedBinOp::TYPED_LESS, length); + } else { + // upperGate + upper < length + // upper < 0 + // upperGate < length + (-upper) + PredicateAdd(length, -upper, TypedBinOp::TYPED_GREATER, upperGate); + } + } +} + +void ArrayBoundsCheckElimination::ProcessIndexCheck(GateRegion *loopHeader, GateRef gate) +{ + auto length = acc_.GetValueIn(gate, 0); + auto array = acc_.GetValueIn(length, 0); + auto index = acc_.GetValueIn(gate, 1); + Bound *indexBound = GetBound(index); + if (!indexBound->HasLower() || !indexBound->HasUpper()) { + return; + } + + if (InArrayBound(indexBound, length, array)) { + RemoveIndexCheck(gate); + } else if (loopHeader) { + if (!LoopInvariant(loopHeader, array) + || !LoopInvariant(loopHeader, indexBound->LowerGate()) + || !LoopInvariant(loopHeader, indexBound->UpperGate()) + || (indexBound->LowerGate() == Circuit::NullGate() && indexBound->Lower() < 0) + || (indexBound->UpperGate() == Circuit::NullGate() && indexBound->Upper() < 0)) { + return; + } + + ASSERT(length != Circuit::NullGate()); + bool isTypedArray = false; + if (acc_.GetOpCode(length) == OpCode::LOAD_TYPED_ARRAY_LENGTH) { + isTypedArray = true; + } + + // Length instrution + if (!LoopInvariant(loopHeader, length)) { + // Generate length instruction yourself + length = Circuit::NullGate(); + } + + // Insert Before loopHeader State, and if find IF_TRUE and IF_FALSE, insert after the DEPEND_RELAY + // if find MERGE, insert after DEPEND_SELECTOR + GateRef insertAfter = acc_.GetState(loopHeader->GetState(), 0); // after end + GateRef stateIn = insertAfter; + GateRef dependIn = insertAfter; + acc_.GetStateInAndDependIn(insertAfter, stateIn, dependIn); + + if (!CheckLoop(array, indexBound->LowerGate(), indexBound->Lower(), + indexBound->UpperGate(), indexBound->Upper())) { + return; + } + + Environment env(stateIn, dependIn, {}, circuit_, &builder_); + LoopInvariantMotionForIndexCheck(array, length, indexBound->LowerGate(), indexBound->Lower(), + indexBound->UpperGate(), indexBound->Upper(), isTypedArray); + RemoveIndexCheck(gate); + } +} + +void ArrayBoundsCheckElimination::ProcessIf(IntegerStack &pushed, GateRegion *parent, OpCode cond) +{ + auto& gateLists = parent->GetGates(); + for (int i = static_cast(gateLists.size()) - 1; i >= 0; i--) { // Found the last BinaryOp + GateRef gate = gateLists[i]; + if (gate == Circuit::NullGate()) continue; + OpCode opGate = acc_.GetOpCode(gate); + if (opGate != OpCode::TYPED_BINARY_OP) { + continue ; + } + + TypedBinOp op = acc_.GetTypedBinaryOp(gate); + GateRef x = acc_.GetValueIn(gate, 0); + GateRef y = acc_.GetValueIn(gate, 1); + + switch (op) { + case TypedBinOp::TYPED_LESS: + case TypedBinOp::TYPED_LESSEQ: + case TypedBinOp::TYPED_GREATER: + case TypedBinOp::TYPED_GREATEREQ: + case TypedBinOp::TYPED_EQ: + case TypedBinOp::TYPED_NOTEQ: + if (cond == OpCode::IF_TRUE) { + op = TypedBinaryMetaData::GetRevCompareOp(op); + } + AddIfCondition(pushed, x, y, op); + AddIfCondition(pushed, y, x, TypedBinaryMetaData::GetSwapCompareOp(op)); + break; + default: + break; + } + break; + } +} + +bool ArrayBoundsCheckElimination::Contain(GateLists &gateLists, GateRef gate) +{ + for (size_t i = 0; i < gateLists.size(); i++) { + if (gateLists[i] == gate) { + return true; + } + } + return false; +} + +void ArrayBoundsCheckElimination::AddAccessIndexedInfo(GateLists &indices, GateRef gate, int idx, GateRef indexCheck) +{ + IndexCheckInfo *indexCheckInfo = indexCheckInfo_[acc_.GetId(gate)]; + if (indexCheckInfo == nullptr) { + indexCheckInfo = new IndexCheckInfo(chunk_); + indexCheckInfo_[acc_.GetId(gate)] = indexCheckInfo; + indices.push_back(gate); + indexCheckInfo->min_ = idx; + indexCheckInfo->max_ = idx; + } else if (idx >= indexCheckInfo->min_ && idx <= indexCheckInfo->max_) { + RemoveIndexCheck(indexCheck); + return; + } + indexCheckInfo->min_ = std::min(indexCheckInfo->min_, idx); + indexCheckInfo->max_ = std::max(indexCheckInfo->max_, idx); + indexCheckInfo->list_.push_back(indexCheck); +} + +void ArrayBoundsCheckElimination::InBlockMotion(GateLists &indexChecked, GateLists &arrays) +{ + GateLists indices(chunk_); + for (size_t i = 0; i < arrays.size(); i++) { + int maxConstant = -1; + GateLists listConstant(chunk_); + GateRef arrayGate = arrays[i]; + for (size_t j = 0; j < indexChecked.size(); j++) { + GateRef indexCheck = indexChecked[j]; + // INDEX_CHECK may be dead + if (acc_.GetOpCode(indexCheck) != OpCode::INDEX_CHECK) { + continue; + } + GateRef length = acc_.GetValueIn(indexCheck, 0); + GateRef index = acc_.GetValueIn(indexCheck, 1); + GateRef array = acc_.GetValueIn(length, 0); + if (array != arrayGate) { + continue; + } + if (acc_.IsConstant(index)) { + int constValue = static_cast(acc_.GetConstantValue(index)); + if (constValue >= 0 && constValue <= maxConstant) { + RemoveIndexCheck(indexCheck); + } else if (constValue >= 0 && constValue > maxConstant) { + maxConstant = constValue; + listConstant.push_back(indexCheck); + } + } else { + int lastInteger; + GateRef lastGate; + GetInstrAndConstValueFromOp(index, lastGate, lastInteger); + if (lastInteger >= 0 && lastGate == Circuit::NullGate()) { // IsConstant + if (lastInteger <= maxConstant) { + RemoveIndexCheck(indexCheck); + } else { + maxConstant = lastInteger; + listConstant.push_back(indexCheck); + } + } else if (lastGate != Circuit::NullGate()) { + AddAccessIndexedInfo(indices, lastGate, lastInteger, indexCheck); + } // when lastInteger < 0, dont remove IndexCheck + } + } + + // Iterate over all different indices + for (size_t j = 0; j < indices.size(); j++) { + GateRef index = indices[j]; + + IndexCheckInfo *info = indexCheckInfo_[acc_.GetId(index)]; + ASSERT(info != nullptr); + + // maybe index < 0, max > 0 + // max + index in [0, a.length) + // min + index overflow !!!, min + index > 0 + // so, min + index >= INT_MIN, min >= INT_MIN - index + // max in [-index, a.length - index) + // min >= INT_MIN + max + bool rangeCond = (info->max_ < 0 || info->max_ + INT_MIN <= info->min_); + if (info->list_.size() > 2 && rangeCond) { // 2: size + GateRef insertAfter = info->list_.front(); + GateRef length = acc_.GetValueIn(insertAfter, 0); + ASSERT(length != Circuit::NullGate()); + + Environment env(insertAfter, circuit_, &builder_); + + // Calculate lower bound + GateRef lowerCompare = index; + if (info->min_ > 0) { + GateRef minGate = builder_.Int32(info->min_); + lowerCompare = builder_.InsertTypedBinaryop(lowerCompare, minGate, + GateType::NumberType(), GateType::NumberType(), + GateType::AnyType(), PGOTypeRef::NoneType(), + TypedBinOp::TYPED_ADD); + } else if (info->min_ < 0) { + GateRef minGate = builder_.Int32(-info->min_); + lowerCompare = builder_.InsertTypedBinaryop(lowerCompare, minGate, + GateType::NumberType(), GateType::NumberType(), + GateType::AnyType(), PGOTypeRef::NoneType(), + TypedBinOp::TYPED_SUB); + } + + PredicateCmpWithConst(lowerCompare, TypedBinOp::TYPED_GREATEREQ, 0); + + // Calculate upper bound + GateRef upperCompare = index; + if (info->max_ != 0) { + if (info->max_ > 0) { + GateRef maxGate = builder_.Int32(info->max_); + upperCompare = builder_.InsertTypedBinaryop(upperCompare, maxGate, + GateType::NumberType(), GateType::NumberType(), + GateType::AnyType(), PGOTypeRef::NoneType(), + TypedBinOp::TYPED_ADD); + } else if (info->max_ < 0) { + GateRef maxGate = builder_.Int32(-info->max_); + upperCompare = builder_.InsertTypedBinaryop(upperCompare, maxGate, + GateType::NumberType(), GateType::NumberType(), + GateType::AnyType(), PGOTypeRef::NoneType(), + TypedBinOp::TYPED_SUB); + } + } + + Predicate(upperCompare, TypedBinOp::TYPED_LESS, length); + for (auto& indexCheck: (info->list_)) { + RemoveIndexCheck(indexCheck); + } + } + } + + // index only constant + if (listConstant.size() > 1) { + GateRef firIndexCheckGate = listConstant.front(); + Environment env(firIndexCheckGate, circuit_, &builder_); + GateRef length = acc_.GetValueIn(firIndexCheckGate, 0); + ASSERT(length != Circuit::NullGate()); + ASSERT(maxConstant >= 0); + PredicateCmpWithConst(length, TypedBinOp::TYPED_GREATER, maxConstant); // length > index + for (size_t j = 0; j < listConstant.size(); j++) { + GateRef indexCheck = listConstant[j]; + RemoveIndexCheck(indexCheck); + } + } + + for (size_t j = 0; j < indices.size(); j++) { + indexCheckInfo_[acc_.GetId(indices[j])] = nullptr; + } + indices.clear(); + } +} + +void ArrayBoundsCheckElimination::CalcBounds(GateRegion *block, GateRegion *loopHeader) +{ + // Pushed stack for condition + IntegerStack pushed(chunk_); + + // Process If + GateRegion *parent = block->GetDominator(); + if (parent != nullptr) { + auto gate = block->GetGates().front(); + auto op = acc_.GetOpCode(gate); + if (op == OpCode::IF_TRUE || op == OpCode::IF_FALSE) { // Recognize If (including the condition in forloop) + ProcessIf(pushed, parent, op); + } + } + + GateLists indexChecked(chunk_); + GateLists arrays(chunk_); + + auto& gateList_ = block->GetGates(); + for (size_t i = 0; i < gateList_.size(); i++) { // Visit GateUnion + GateRef gate = gateList_[i]; + auto op = acc_.GetOpCode(gate); + if (op == OpCode::INDEX_CHECK) { + auto length = acc_.GetValueIn(gate, 0); + auto index = acc_.GetValueIn(gate, 1); + auto array = acc_.GetValueIn(length, 0); + + ProcessIndexCheck(loopHeader, gate); + indexChecked.push_back(gate); + + if (!Contain(arrays, array)) { + arrays.push_back(array); + } + + // Give IndexCheck a bound [0, Length - 1] + Bound *b = GetBound(index); + if (b->LowerGate() == Circuit::NullGate()) { // LowerBound is the Constant !!! + UpdateBound(pushed, index, TypedBinOp::TYPED_GREATEREQ, Circuit::NullGate(), 0); + } + if (!b->HasUpper() && length != Circuit::NullGate()) { // default dont know the Length + UpdateBound(pushed, index, TypedBinOp::TYPED_LESS, length, 0); + } + } + } + + InBlockMotion(indexChecked, arrays); + + auto& dominatedRegions_ = block->GetDominatedRegions(); + for (size_t i = 0; i < dominatedRegions_.size(); i++) { + GateRegion *nex = dominatedRegions_[i]; + if (block->IsLoopHead() && (block->GetInnerLoopIndex() == nex->GetInnerLoopIndex() + || nex->GetLoopDepth() > block->GetLoopDepth())) { + CalcBounds(nex, block); + } else { + CalcBounds(nex, loopHeader); + } + } + + for (size_t i = 0; i < pushed.size(); i++) { + bounds_[pushed[i]]->pop_back(); + } +} +} diff --git a/ecmascript/compiler/array_bounds_check_elimination.h b/ecmascript/compiler/array_bounds_check_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..54b0c5a50d8f7bd7c85c8c8c8c8c22ffaff0f848 --- /dev/null +++ b/ecmascript/compiler/array_bounds_check_elimination.h @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_ARRAY_BOUNDS_CHECK_ELIMINATION_H +#define ECMASCRIPT_COMPILER_ARRAY_BOUNDS_CHECK_ELIMINATION_H + +#include "ecmascript/compiler/circuit_builder.h" +#include "ecmascript/compiler/mcr_gate_meta_data.h" +#include "ecmascript/compiler/gate_accessor.h" +#include "ecmascript/compiler/graph_linearizer.h" +#include "ecmascript/compiler/pass_manager.h" +#include "ecmascript/mem/chunk_containers.h" + +namespace panda::ecmascript::kungfu { +class ArrayBoundsCheckElimination { +public: + ArrayBoundsCheckElimination(Circuit *circuit, bool enableLog, const std::string& name, Chunk* chunk) + : acc_(circuit), bounds_(chunk), circuit_(circuit), builder_(circuit), chunk_(chunk), enableLog_(enableLog), + graphLinearizer_(circuit, enableLog, name, chunk, true, true), methodName_(name), indexCheckInfo_(chunk) {} + + ~ArrayBoundsCheckElimination() = default; + void Run(); + +private: + class Bound { + public: + Bound(); + Bound(GateRef v); + Bound(int lower, GateRef lowerGate, int upper, GateRef upperGate); + Bound(TypedBinOp op, GateRef gate, int constant); + ~Bound(){}; + int Upper() + { + return upper_; + } + GateRef UpperGate() + { + return upperGate_; + } + int Lower() + { + return lower_; + } + GateRef LowerGate() + { + return lowerGate_; + } + bool HasUpper() + { + return upperGate_ != Circuit::NullGate() || upper_ < INT_MAX; + } + bool HasLower() + { + return lowerGate_ != Circuit::NullGate() || lower_ > INT_MIN; + } + void RemoveUpper() + { + upperGate_ = Circuit::NullGate(); + upper_ = INT_MAX; + } + void RemoveLower() + { + lowerGate_ = Circuit::NullGate(); + lower_ = INT_MIN; + } + bool IsSmaller(Bound *b) + { + if (b->LowerGate() != upperGate_) { + return false; + } + return upper_ < b->Lower(); + } + Bound* Copy() + { + return new Bound(lower_, lowerGate_, upper_, upperGate_); + } + + private: + int upper_; + GateRef upperGate_; + int lower_; + GateRef lowerGate_; + + friend ArrayBoundsCheckElimination; + }; + + bool IsLogEnabled() const + { + return enableLog_; + } + + const std::string& GetMethodName() const + { + return methodName_; + } + + typedef ChunkVector BoundStack; + typedef ChunkVector BoundMap; + typedef ChunkVector IntegerStack; + typedef ChunkVector GateLists; + + void AddAccessIndexedInfo(GateLists &indices, GateRef gate, int idx, GateRef indexCheck); + void AddIfCondition(IntegerStack &pushed, GateRef x, GateRef y, TypedBinOp op); + Bound *AndOp(Bound *bound, Bound *b); + Bound *OrOp(Bound *bound, Bound *b); + bool Contain(GateLists& gateLists, GateRef gate); + void CalcBounds(GateRegion *block, GateRegion *loopHeader); + bool CheckLoop(GateRef array, GateRef lowerGate, int lower, GateRef upperGate, int upper); + void InBlockMotion(GateLists &indexChecked, GateLists &arrays); + bool InLoop(GateRef loopHeader, GateRef gate); + bool IsArrayLength(GateRef gate); + bool LoopInvariant(GateRegion *loopHeader, GateRef gate); + void UpdateBound(IntegerStack &pushed, GateRef gate, Bound *bound); + void UpdateBound(IntegerStack &pushed, GateRef x, TypedBinOp op, GateRef y, int constValue); + void ProcessIndexCheck(GateRegion *loopHeader, GateRef gate); + void RemoveIndexCheck(GateRef gate); + void CopyStateInAndDependIn(GateRef &stateIn, GateRef &dependIn, GateRef insertAfter); + void LoopInvariantMotionForIndexCheck(GateRef array, GateRef length, GateRef lowerGate, int lower, + GateRef upperGate, int upper, bool isTypedArray); + void GetInstrAndConstValueFromOp(GateRef gate, GateRef &instrValue, int& constValue); + Bound *GetBound(GateRef gate); + Bound *DoConstant(GateRef gate); + Bound *DoArithmeticOp(GateRef gate); + Bound *DoPhi(GateRef gate); + void SetBound(GateRef gate, Bound *bound); + void ProcessIf(IntegerStack &pushed, GateRegion *parent, OpCode cond); + bool InArrayBound(Bound *bound, GateRef length, GateRef array); + Bound *VisitGate(GateRef gate); + + void ReplaceIn(GateRef stateIn, GateRef dependIn, GateRef newGate); + + GateRef Predicate(GateRef left, TypedBinOp cond, GateRef right); + GateRef PredicateCmpWithConst(GateRef left, TypedBinOp cond, int right); + GateRef PredicateAdd(GateRef left, int leftConst, TypedBinOp cond, GateRef right); + GateRef PredicateAddCmpWithConst(GateRef left, int leftConst, TypedBinOp cond, int right); + + GateAccessor acc_; + BoundMap bounds_; + Circuit *circuit_ {nullptr}; + CircuitBuilder builder_; + Chunk *chunk_ {nullptr}; + bool enableLog_ {false}; + GraphLinearizer graphLinearizer_; + std::string methodName_; + + class IndexCheckInfo { + public: + IndexCheckInfo(Chunk* chunk): list_(chunk) {} + GateLists list_; + int min_; + int max_; + }; + typedef ChunkVector IndexCheckInfoList; + IndexCheckInfoList indexCheckInfo_; +}; +} +#endif \ No newline at end of file diff --git a/ecmascript/compiler/assembler/tests/assembler_aarch64_test.cpp b/ecmascript/compiler/assembler/tests/assembler_aarch64_test.cpp index 05bb40ffe5de18d2726e4479d544b34513ba5434..1a08bbbc98f4b3a09897ecc392f7da21d9cee3ee 100644 --- a/ecmascript/compiler/assembler/tests/assembler_aarch64_test.cpp +++ b/ecmascript/compiler/assembler/tests/assembler_aarch64_test.cpp @@ -80,13 +80,6 @@ public: LLVMInitializeAArch64AsmPrinter(); LLVMInitializeAArch64AsmParser(); LLVMInitializeAArch64Target(); - } else if (triple.compare(TARGET_ARM32) == 0) { - LLVMInitializeARMTargetInfo(); - LLVMInitializeARMTargetMC(); - LLVMInitializeARMDisassembler(); - LLVMInitializeARMAsmPrinter(); - LLVMInitializeARMAsmParser(); - LLVMInitializeARMTarget(); } else { LOG_ECMA(FATAL) << "this branch is unreachable"; UNREACHABLE(); diff --git a/ecmascript/compiler/assembler/tests/assembler_x64_test.cpp b/ecmascript/compiler/assembler/tests/assembler_x64_test.cpp index 5afa7050f2dde919138b84e5287b99ac4f7b1174..56820bc6a739dbbaeb8b840adae24a12c4dca15e 100644 --- a/ecmascript/compiler/assembler/tests/assembler_x64_test.cpp +++ b/ecmascript/compiler/assembler/tests/assembler_x64_test.cpp @@ -85,13 +85,6 @@ public: LLVMInitializeAArch64AsmPrinter(); LLVMInitializeAArch64AsmParser(); LLVMInitializeAArch64Target(); - } else if (triple.compare(TARGET_ARM32) == 0) { - LLVMInitializeARMTargetInfo(); - LLVMInitializeARMTargetMC(); - LLVMInitializeARMDisassembler(); - LLVMInitializeARMAsmPrinter(); - LLVMInitializeARMAsmParser(); - LLVMInitializeARMTarget(); } else { LOG_ECMA(FATAL) << "this branch is unreachable"; UNREACHABLE(); diff --git a/ecmascript/compiler/assembler_module.cpp b/ecmascript/compiler/assembler_module.cpp index 29a28e6e5af1876b62243bd459188284f703167a..bb6cfe72318c648e6f857f84585c916e22178ea1 100644 --- a/ecmascript/compiler/assembler_module.cpp +++ b/ecmascript/compiler/assembler_module.cpp @@ -19,6 +19,7 @@ #include "ecmascript/compiler/assembler/x64/assembler_x64.h" #include "ecmascript/compiler/call_signature.h" #include "ecmascript/compiler/circuit_builder.h" +#include "ecmascript/compiler/circuit_builder_helper.h" #include "ecmascript/compiler/trampoline/aarch64/common_call.h" #include "ecmascript/compiler/trampoline/x64/common_call.h" #include "ecmascript/compiler/rt_call_signature.h" diff --git a/ecmascript/compiler/async_function_lowering.cpp b/ecmascript/compiler/async_function_lowering.cpp index 647a0b47a87e46f771ea5a46f2883198896ee943..e0e267c2e6b29205b6e2c0fc5fc9494d3241f8d4 100644 --- a/ecmascript/compiler/async_function_lowering.cpp +++ b/ecmascript/compiler/async_function_lowering.cpp @@ -41,7 +41,7 @@ void AsyncFunctionLowering::ProcessJumpTable() GateRef ifBranchCondition = builder_.Branch(stateEntry_, isEqual); GateRef ifTrueCondition = builder_.IfTrue(ifBranchCondition); GateRef ifFalseCondition = builder_.IfFalse(ifBranchCondition); - if (accessor_.GetOpCode(*firstUse) == OpCode::STATE_SPLIT) { + while (accessor_.GetOpCode(*firstUse) == OpCode::STATE_SPLIT) { firstUse++; } accessor_.ReplaceStateIn(*firstUse, ifTrueCondition); @@ -49,11 +49,12 @@ void AsyncFunctionLowering::ProcessJumpTable() GateRef contextOffset = builder_.IntPtr(JSGeneratorObject::GENERATOR_CONTEXT_OFFSET); GateRef val = builder_.PtrAdd(newTarget, contextOffset); GateRef dependStart = builder_.DependRelay(ifFalseCondition, dependEntry_); - GateRef contextGate = circuit_->NewGate(circuit_->Load(), MachineType::I64, {dependStart, val}, + auto bit = LoadStoreAccessor::ToValue(MemoryOrder::NOT_ATOMIC); + GateRef contextGate = circuit_->NewGate(circuit_->Load(bit), MachineType::I64, {dependStart, val}, GateType::TaggedPointer()); GateRef bcOffset = builder_.IntPtr(GeneratorContext::GENERATOR_BC_OFFSET_OFFSET); val = builder_.PtrAdd(contextGate, bcOffset); - GateRef restoreOffsetGate = circuit_->NewGate(circuit_->Load(), MachineType::I32, {contextGate, val}, + GateRef restoreOffsetGate = circuit_->NewGate(circuit_->Load(bit), MachineType::I32, {contextGate, val}, GateType::NJSValue()); GateRef firstState = Circuit::NullGate(); const auto &suspendAndResumeGates = bcBuilder_->GetAsyncRelatedGates(); @@ -81,10 +82,9 @@ void AsyncFunctionLowering::RebuildGeneratorCfg(GateRef resumeGate, GateRef rest GateRef loopBeginStateIn = Circuit::NullGate(); GateRef prevBcOffsetPhiGate = Circuit::NullGate(); while (true) { - auto opcode = accessor_.GetOpCode(stateInGate); - if (opcode == OpCode::STATE_ENTRY) { + if (stateInGate == GetEntryBBStateOut()) { // from state entry GateRef condition = builder_.Equal(offsetConstantGate, restoreOffsetGate); - GateRef ifBranch = circuit_->NewGate(circuit_->IfBranch(), { ifFalseCondition, condition }); + GateRef ifBranch = circuit_->NewGate(circuit_->IfBranch(0), { ifFalseCondition, condition }); GateRef ifTrue = circuit_->NewGate(circuit_->IfTrue(), {ifBranch}); GateRef ifFalse = circuit_->NewGate(circuit_->IfFalse(), {ifBranch}); GateRef ifTrueDepend = builder_.DependRelay(ifTrue, restoreOffsetGate); @@ -110,7 +110,7 @@ void AsyncFunctionLowering::RebuildGeneratorCfg(GateRef resumeGate, GateRef rest } firstState = ifBranch; } - + auto opcode = accessor_.GetOpCode(stateInGate); if (opcode == OpCode::LOOP_BEGIN) { bool resumeInLoopBody = false; CheckResumeInLoopBody(stateInGate, resumeInLoopBody); @@ -119,13 +119,16 @@ void AsyncFunctionLowering::RebuildGeneratorCfg(GateRef resumeGate, GateRef rest // loop needs to modify the phi node or not. GateRef emptyOffsetGate = circuit_->NewGate(circuit_->GetMetaBuilder()->Constant(-1), MachineType::I32, GateType::NJSValue()); - // 2: valuesIn - GateRef bcOffsetPhiGate = circuit_->NewGate(circuit_->ValueSelector(2), MachineType::I32, - {stateInGate, restoreOffsetGate, emptyOffsetGate}, - GateType::NJSValue()); + + auto numIn = accessor_.GetNumIns(stateInGate); + std::vector inList(numIn + 1, emptyOffsetGate); + inList[0] = stateInGate; // 0 : state in + inList[1] = restoreOffsetGate; // 1 : outloop value in + GateRef bcOffsetPhiGate = circuit_->NewGate(circuit_->ValueSelector(numIn), MachineType::I32, + inList, GateType::NJSValue()); GateRef condition = builder_.Equal(offsetConstantGate, bcOffsetPhiGate); - GateRef ifBranch = circuit_->NewGate(circuit_->IfBranch(), {stateInGate, condition}); + GateRef ifBranch = circuit_->NewGate(circuit_->IfBranch(0), {stateInGate, condition}); GateRef ifTrue = circuit_->NewGate(circuit_->IfTrue(), {ifBranch}); GateRef ifFalse = circuit_->NewGate(circuit_->IfFalse(), {ifBranch}); @@ -158,7 +161,7 @@ void AsyncFunctionLowering::RebuildGeneratorCfg(GateRef resumeGate, GateRef rest UpdateValueSelector(prevLoopBeginGate, loopBeginStateIn, prevBcOffsetPhiGate); break; } - if (accessor_.GetOpCode(stateInGate) == OpCode::STATE_ENTRY) { + if (stateInGate == GetEntryBBStateOut()) { break; } stateInGate = accessor_.GetState(stateInGate); @@ -192,9 +195,16 @@ void AsyncFunctionLowering::UpdateValueSelector(GateRef prevLoopBeginGate, if (accessor_.GetOpCode(use) == OpCode::VALUE_SELECTOR && use != prevBcOffsetPhiGate) { auto machineType = accessor_.GetMachineType(use); auto gateType = accessor_.GetGateType(use); - GateRef undefinedGate = + GateRef undefinedGate = Circuit::NullGate(); + if (gateType.IsNumberType()) { + undefinedGate = + circuit_->NewGate(circuit_->GetMetaBuilder()->Constant(JSTaggedValue::VALUE_ZERO), + machineType, GateType::IntType()); + } else { + undefinedGate = circuit_->NewGate(circuit_->GetMetaBuilder()->Constant(JSTaggedValue::VALUE_UNDEFINED), machineType, gateType); + } auto firstValueGate = accessor_.GetValueIn(use, 0); auto newValueSelector = circuit_->NewGate(circuit_->ValueSelector(2), machineType, // 2: valuesIn {newGate, undefinedGate, firstValueGate}, @@ -236,25 +246,25 @@ void AsyncFunctionLowering::ModifyStateInput(GateRef stateInGate, GateRef ifBran void AsyncFunctionLowering::CheckResumeInLoopBody(GateRef stateInGate, bool &resumeInLoopBody) { ASSERT(accessor_.GetOpCode(stateInGate) == OpCode::LOOP_BEGIN); - GateRef loopBack = accessor_.GetIn(stateInGate, 0); - if (accessor_.GetOpCode(loopBack) != OpCode::LOOP_BACK) { - loopBack = accessor_.GetIn(stateInGate, 1); - } - ChunkQueue resuemList(circuit_->chunk()); - resuemList.push(loopBack); + ChunkQueue resumeList(circuit_->chunk()); ChunkVector visited(circuit_->GetMaxGateId() + 1, VisitState::UNVISITED, circuit_->chunk()); + for (size_t i = 0; i < accessor_.GetNumIns(stateInGate); i++) { + GateRef inGate = accessor_.GetIn(stateInGate, i); + if (accessor_.GetOpCode(inGate) == OpCode::LOOP_BACK) { + resumeList.push(inGate); + visited[accessor_.GetId(inGate)] = VisitState::VISITED; + } + } auto loopBeginId = accessor_.GetId(stateInGate); visited[loopBeginId] = VisitState::VISITED; - auto loopBackId = accessor_.GetId(loopBack); - visited[loopBackId] = VisitState::VISITED; - while (!resuemList.empty()) { - GateRef curGate = resuemList.front(); + while (!resumeList.empty()) { + GateRef curGate = resumeList.front(); if (accessor_.GetOpCode(curGate) == OpCode::JS_BYTECODE && accessor_.GetByteCodeOpcode(curGate) == EcmaOpcode::RESUMEGENERATOR) { resumeInLoopBody = true; break; } - resuemList.pop(); + resumeList.pop(); size_t stateStart = 0; size_t stateEnd = accessor_.GetStateCount(curGate); for (size_t idx = stateStart; idx < stateEnd; idx++) { @@ -262,7 +272,7 @@ void AsyncFunctionLowering::CheckResumeInLoopBody(GateRef stateInGate, bool &res auto id = accessor_.GetId(gate); if (visited[id] == VisitState::UNVISITED) { visited[id] = VisitState::VISITED; - resuemList.push(gate); + resumeList.push(gate); } } } @@ -279,5 +289,28 @@ GateRef AsyncFunctionLowering::GetDependPhiFromLoopBegin(GateRef gate) const LOG_COMPILER(FATAL) << "Can not find depend-selector from loopbegin"; return Circuit::NullGate(); } + +GateRef AsyncFunctionLowering::GetEntryBBStateOut() const +{ + auto& bb = bcBuilder_->GetBasicBlockById(0); // 0 : Entry Block Id + // state may CheckSafePointAndStackOver + auto state = bb.dependCache; + if (state == Circuit::NullGate()) { + return circuit_->GetStateRoot(); + } else { + return state; + } +} + +GateRef AsyncFunctionLowering::GetEntryBBDependOut() const +{ + auto& bb = bcBuilder_->GetBasicBlockById(0); // 0 : Entry Block Id + auto depend = bb.dependCache; + if (depend == Circuit::NullGate()) { + return circuit_->GetDependRoot(); + } else { + return depend; + } +} } // panda::ecmascript::kungfu diff --git a/ecmascript/compiler/async_function_lowering.h b/ecmascript/compiler/async_function_lowering.h index 35c19cac298282399df7506ea26bf54c193457c8..adca55226c7621a35c709e74656297123d0ef8a2 100644 --- a/ecmascript/compiler/async_function_lowering.h +++ b/ecmascript/compiler/async_function_lowering.h @@ -20,7 +20,6 @@ #include "ecmascript/compiler/circuit.h" #include "ecmascript/compiler/circuit_builder-inl.h" #include "ecmascript/compiler/circuit_builder.h" -#include "ecmascript/compiler/graph_visitor.h" #include "ecmascript/mem/chunk_containers.h" namespace panda::ecmascript::kungfu { @@ -29,9 +28,8 @@ public: AsyncFunctionLowering(BytecodeCircuitBuilder *bcBuilder, Circuit *circuit, CompilationConfig *cmpCfg, bool enableLog, const std::string& name) : bcBuilder_(bcBuilder), circuit_(circuit), builder_(circuit, cmpCfg), enableLog_(enableLog), - stateEntry_(circuit->GetStateRoot()), - dependEntry_(circuit->GetDependRoot()), - accessor_(circuit), argAccessor_(circuit), methodName_(name) + accessor_(circuit), argAccessor_(circuit), stateEntry_(GetEntryBBStateOut()), + dependEntry_(GetEntryBBDependOut()), methodName_(name) { } @@ -66,14 +64,18 @@ private: GateRef GetDependPhiFromLoopBegin(GateRef loopbegin) const; + GateRef GetEntryBBStateOut() const; + + GateRef GetEntryBBDependOut() const; + BytecodeCircuitBuilder *bcBuilder_; Circuit *circuit_; CircuitBuilder builder_; bool enableLog_ {false}; - GateRef stateEntry_ {Circuit::NullGate()}; - GateRef dependEntry_ {Circuit::NullGate()}; GateAccessor accessor_; ArgumentAccessor argAccessor_; + GateRef stateEntry_ {Circuit::NullGate()}; + GateRef dependEntry_ {Circuit::NullGate()}; std::string methodName_; }; } // panda::ecmascript::kungfu diff --git a/ecmascript/compiler/base/bit_set.h b/ecmascript/compiler/base/bit_set.h index 5eb0c40878ee4b9c578de5df48dbad4dd1edd682..02b6ddcb741678ba734608ce385f41303d413cf0 100644 --- a/ecmascript/compiler/base/bit_set.h +++ b/ecmascript/compiler/base/bit_set.h @@ -115,6 +115,17 @@ public: } } + void Intersect(const BitSet &bitset) + { + if (!UseWords()) { + data_.inlineWord_ &= bitset.data_.inlineWord_; + } else { + for (size_t i = 0; i < wordCount_; i++) { + data_.words_[i] &= bitset.data_.words_[i]; + } + } + } + void CopyFrom(const BitSet &other) { ASSERT(wordCount_ == other.wordCount_); diff --git a/ecmascript/compiler/base/depend_chain_helper.cpp b/ecmascript/compiler/base/depend_chain_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3dc92566e65b8e7e2b36a591c74412098768e1b2 --- /dev/null +++ b/ecmascript/compiler/base/depend_chain_helper.cpp @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/base/depend_chain_helper.h" + +namespace panda::ecmascript::kungfu { + +void DependChains::Merge(DependChains* that) +{ + // find common sub list + while (size_ > that->size_) { + head_ = head_->next; + size_--; + } + + auto lhs = this->head_; + auto rhs = that->head_; + size_t rhsSize = that->size_; + while (rhsSize > size_) { + rhs = rhs->next; + rhsSize--; + } + while (lhs != rhs) { + ASSERT(lhs != nullptr); + lhs = lhs->next; + rhs = rhs->next; + size_--; + } + head_ = lhs; +} + +bool DependChains::Equals(DependChains* that) +{ + if (that == nullptr) { + return false; + } + if (size_ != that->size_) { + return false; + } + auto lhs = this->head_; + auto rhs = that->head_; + while (lhs != rhs) { + if (lhs->gate != rhs->gate) { + return false; + } + lhs = lhs->next; + rhs = rhs->next; + } + return true; +} + +uint32_t DependChains::FoundIndexCheckedForLength(RangeGuard* rangeGuard, GateRef input) +{ + for (Node* node = head_; node != nullptr; node = node->next) { + uint32_t length = rangeGuard->CheckIndexCheckLengthInput(node->gate, input); + if (length > 0) { // found !!! + return length; + } + } + return 0; +} + +uint32_t DependChains::FoundIndexCheckedForIndex(RangeGuard* rangeGuard, GateRef input) +{ + for (Node* node = head_; node != nullptr; node = node->next) { + uint32_t length = rangeGuard->CheckIndexCheckIndexInput(node->gate, input); + if (length > 0) { // found !!! + return length; + } + } + return 0; +} + +GateRef DependChains::LookupNode(LaterElimination* elimination, GateRef gate) +{ + for (Node* node = head_; node != nullptr; node = node->next) { + if (elimination->CheckReplacement(node->gate, gate)) { + return node->gate; + } + } + return Circuit::NullGate(); +} + +DependChains* DependChains::UpdateNode(GateRef gate) +{ + // assign node->next to head + Node* node = chunk_->New(gate, head_); + DependChains* that = new (chunk_) DependChains(chunk_); + // assign head to node + that->head_ = node; + that->size_ = size_ + 1; + return that; +} + +GateRef DependChains::LookupStLexvarNode(LexicalEnvSpecialization* lexicalEnvSpecialization, GateRef gate) +{ + GateRef result = Circuit::NullGate(); + for (Node* node = head_; node != nullptr; node = node->next) { + if (lexicalEnvSpecialization->SearchStLexVar(node->gate, gate, result)) { + return node->gate; + } else { + if (result == gate) { + return Circuit::NullGate(); + } + } + } + return Circuit::NullGate(); +} +} // namespace panda::ecmascript::kungfu \ No newline at end of file diff --git a/ecmascript/compiler/base/depend_chain_helper.h b/ecmascript/compiler/base/depend_chain_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..f6fb7fb2b972ca6aa2d157a14b90abe4d75ca1aa --- /dev/null +++ b/ecmascript/compiler/base/depend_chain_helper.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_DEPEND_CHAIN_HELPER_H +#define ECMASCRIPT_COMPILER_DEPEND_CHAIN_HELPER_H + +#include "ecmascript/compiler/circuit_builder.h" +#include "ecmascript/compiler/gate_accessor.h" +#include "ecmascript/compiler/later_elimination.h" +#include "ecmascript/compiler/lexical_env_specialization.h" +#include "ecmascript/compiler/range_guard.h" +#include "ecmascript/mem/chunk_containers.h" + +namespace panda::ecmascript::kungfu { +class LaterElimination; +class RangeGuard; +class LexicalEnvSpecialization; +class DependChains : public ChunkObject { +public: + DependChains(Chunk* chunk) : chunk_(chunk) {} + ~DependChains() = default; + + DependChains* UpdateNode(GateRef gate); + bool Equals(DependChains* that); + void Merge(DependChains* that); + void CopyFrom(DependChains *other) + { + head_ = other->head_; + size_ = other->size_; + } + uint32_t FoundIndexCheckedForLength(RangeGuard* rangeGuard, GateRef input); + uint32_t FoundIndexCheckedForIndex(RangeGuard* rangeGuard, GateRef input); + GateRef LookupNode(LaterElimination* elimination, GateRef gate); + GateRef LookupStLexvarNode(LexicalEnvSpecialization* lexicalEnvSpecialization, GateRef gate); + GateRef GetHeadGate() + { + return head_->gate; + } +private: + struct Node { + Node(GateRef gate, Node* next) : gate(gate), next(next) {} + GateRef gate; + Node *next; + }; + + Node *head_{nullptr}; + size_t size_ {0}; + Chunk* chunk_; +}; +} // panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_DEPEND_CHAIN_HELPER_H \ No newline at end of file diff --git a/ecmascript/compiler/bc_call_signature.h b/ecmascript/compiler/bc_call_signature.h index 08aceb8cd7207bc2c839d6e5da16f512fb9da6f1..b42421909bb4c7a9e0da18988e93f3abf3cb1b49 100644 --- a/ecmascript/compiler/bc_call_signature.h +++ b/ecmascript/compiler/bc_call_signature.h @@ -382,7 +382,10 @@ namespace panda::ecmascript::kungfu { V(ThrowStackOverflowException) #define APPEND_SUFFIX(name, V) \ - V(name##WithProf, name) + V(name##WithProf, name, SlotIDFormat::IMM8) + +#define APPEND_SUFFIX_IMM16(name, V) \ + V(name##WithProf, name, SlotIDFormat::IMM16) #define ASM_INTERPRETER_BC_PROFILER_STUB_LIST(V) \ ASM_INTERPRETER_BC_TYPE_PROFILER_STUB_LIST(V) \ @@ -416,70 +419,89 @@ namespace panda::ecmascript::kungfu { APPEND_SUFFIX(HandleStrictnoteqImm8V8, V) \ APPEND_SUFFIX(HandleStricteqImm8V8, V) -#define ASM_INTERPRETER_BC_FUNC_HOT_PROFILER_STUB_LIST(V) \ - APPEND_SUFFIX(HandleJmpImm8, V) \ - APPEND_SUFFIX(HandleJmpImm16, V) \ - APPEND_SUFFIX(HandleJmpImm32, V) \ - APPEND_SUFFIX(HandleJeqzImm8, V) \ - APPEND_SUFFIX(HandleJeqzImm16, V) \ - APPEND_SUFFIX(HandleJeqzImm32, V) \ - APPEND_SUFFIX(HandleJnezImm8, V) \ - APPEND_SUFFIX(HandleJnezImm16, V) \ - APPEND_SUFFIX(HandleJnezImm32, V) \ - APPEND_SUFFIX(HandleReturn, V) \ - APPEND_SUFFIX(HandleReturnundefined, V) \ - APPEND_SUFFIX(HandleSuspendgeneratorV8, V) \ - APPEND_SUFFIX(HandleDeprecatedSuspendgeneratorPrefV8V8, V) \ +#define ASM_INTERPRETER_BC_FUNC_HOT_PROFILER_STUB_LIST(V) \ + APPEND_SUFFIX(HandleJmpImm8, V) \ + APPEND_SUFFIX_IMM16(HandleJmpImm16, V) \ + APPEND_SUFFIX(HandleJmpImm32, V) \ + APPEND_SUFFIX(HandleJeqzImm8, V) \ + APPEND_SUFFIX_IMM16(HandleJeqzImm16, V) \ + APPEND_SUFFIX(HandleJeqzImm32, V) \ + APPEND_SUFFIX(HandleJnezImm8, V) \ + APPEND_SUFFIX_IMM16(HandleJnezImm16, V) \ + APPEND_SUFFIX(HandleJnezImm32, V) \ + APPEND_SUFFIX(HandleReturn, V) \ + APPEND_SUFFIX(HandleReturnundefined, V) \ + APPEND_SUFFIX(HandleSuspendgeneratorV8, V) \ APPEND_SUFFIX(HandleAsyncgeneratorresolveV8V8V8, V) #define ASM_INTERPRETER_BC_FUNC_COUNT_PROFILER_STUB_LIST(V) \ APPEND_SUFFIX(HandleCallarg0Imm8, V) \ - APPEND_SUFFIX(HandleDeprecatedCallarg0PrefV8, V) \ APPEND_SUFFIX(HandleCallarg1Imm8V8, V) \ - APPEND_SUFFIX(HandleDeprecatedCallarg1PrefV8V8, V) \ APPEND_SUFFIX(HandleCallargs2Imm8V8V8, V) \ - APPEND_SUFFIX(HandleDeprecatedCallargs2PrefV8V8V8, V) \ APPEND_SUFFIX(HandleCallargs3Imm8V8V8V8, V) \ - APPEND_SUFFIX(HandleDeprecatedCallargs3PrefV8V8V8V8, V) \ APPEND_SUFFIX(HandleCallrangeImm8Imm8V8, V) \ - APPEND_SUFFIX(HandleWideCallrangePrefImm16V8, V) \ - APPEND_SUFFIX(HandleDeprecatedCallrangePrefImm16V8, V) \ + APPEND_SUFFIX_IMM16(HandleWideCallrangePrefImm16V8, V) \ APPEND_SUFFIX(HandleCallthisrangeImm8Imm8V8, V) \ - APPEND_SUFFIX(HandleWideCallthisrangePrefImm16V8, V) \ - APPEND_SUFFIX(HandleDeprecatedCallthisrangePrefImm16V8, V) \ + APPEND_SUFFIX_IMM16(HandleWideCallthisrangePrefImm16V8, V) \ APPEND_SUFFIX(HandleCallthis0Imm8V8, V) \ APPEND_SUFFIX(HandleCallthis1Imm8V8V8, V) \ APPEND_SUFFIX(HandleCallthis2Imm8V8V8V8, V) \ APPEND_SUFFIX(HandleCallthis3Imm8V8V8V8V8, V) \ APPEND_SUFFIX(HandleNewobjrangeImm8Imm8V8, V) \ - APPEND_SUFFIX(HandleNewobjrangeImm16Imm8V8, V) \ - APPEND_SUFFIX(HandleWideNewobjrangePrefImm16V8, V) - -#define ASM_INTERPRETER_BC_LAYOUT_PROFILER_STUB_LIST(V) \ - APPEND_SUFFIX(HandleDefineclasswithbufferImm8Id16Id16Imm16V8, V) \ - APPEND_SUFFIX(HandleDefineclasswithbufferImm16Id16Id16Imm16V8, V) \ - APPEND_SUFFIX(HandleDefinegettersetterbyvalueV8V8V8V8, V) \ - APPEND_SUFFIX(HandleCreateobjectwithbufferImm8Id16, V) \ - APPEND_SUFFIX(HandleCreateobjectwithbufferImm16Id16, V) \ - APPEND_SUFFIX(HandleLdobjbynameImm8Id16, V) \ - APPEND_SUFFIX(HandleLdobjbynameImm16Id16, V) \ - APPEND_SUFFIX(HandleLdthisbynameImm16Id16, V) \ - APPEND_SUFFIX(HandleLdthisbynameImm8Id16, V) \ - APPEND_SUFFIX(HandleStthisbynameImm8Id16, V) \ - APPEND_SUFFIX(HandleStthisbynameImm16Id16, V) \ - APPEND_SUFFIX(HandleStthisbyvalueImm8V8, V) \ - APPEND_SUFFIX(HandleStthisbyvalueImm16V8, V) \ - APPEND_SUFFIX(HandleStobjbyvalueImm16V8V8, V) \ - APPEND_SUFFIX(HandleStobjbynameImm8Id16V8, V) \ - APPEND_SUFFIX(HandleStobjbynameImm16Id16V8, V) \ - APPEND_SUFFIX(HandleStobjbyvalueImm8V8V8, V) \ - APPEND_SUFFIX(HandleStownbyvaluewithnamesetImm16V8V8, V) \ - APPEND_SUFFIX(HandleStownbyvaluewithnamesetImm8V8V8, V) \ - APPEND_SUFFIX(HandleStownbyvalueImm8V8V8, V) \ - APPEND_SUFFIX(HandleStownbyvalueImm16V8V8, V) \ - APPEND_SUFFIX(HandleStownbynamewithnamesetImm16Id16V8, V) \ - APPEND_SUFFIX(HandleStownbynamewithnamesetImm8Id16V8, V) \ - APPEND_SUFFIX(HandleStownbynameImm16Id16V8, V) \ + APPEND_SUFFIX_IMM16(HandleNewobjrangeImm16Imm8V8, V) \ + APPEND_SUFFIX_IMM16(HandleWideNewobjrangePrefImm16V8, V) \ + APPEND_SUFFIX(HandleInstanceofImm8V8, V) \ + APPEND_SUFFIX(HandleGetiteratorImm8, V) \ + APPEND_SUFFIX_IMM16(HandleGetiteratorImm16, V) \ + APPEND_SUFFIX(HandleTryldglobalbynameImm8Id16, V) \ + APPEND_SUFFIX_IMM16(HandleTryldglobalbynameImm16Id16, V) \ + APPEND_SUFFIX(HandleTrystglobalbynameImm8Id16, V) \ + APPEND_SUFFIX_IMM16(HandleTrystglobalbynameImm16Id16, V) \ + APPEND_SUFFIX_IMM16(HandleLdglobalvarImm16Id16, V) + +#define ASM_INTERPRETER_BC_LAYOUT_PROFILER_STUB_LIST(V) \ + APPEND_SUFFIX(HandleDefineclasswithbufferImm8Id16Id16Imm16V8, V) \ + APPEND_SUFFIX_IMM16(HandleDefineclasswithbufferImm16Id16Id16Imm16V8, V) \ + APPEND_SUFFIX(HandleDefinegettersetterbyvalueV8V8V8V8, V) \ + APPEND_SUFFIX(HandleCreateobjectwithbufferImm8Id16, V) \ + APPEND_SUFFIX_IMM16(HandleCreateobjectwithbufferImm16Id16, V) \ + APPEND_SUFFIX(HandleCreatearraywithbufferImm8Id16, V) \ + APPEND_SUFFIX_IMM16(HandleCreatearraywithbufferImm16Id16, V) \ + APPEND_SUFFIX(HandleCreateemptyobject, V) \ + APPEND_SUFFIX(HandleCreateemptyarrayImm8, V) \ + APPEND_SUFFIX_IMM16(HandleCreateemptyarrayImm16, V) \ + APPEND_SUFFIX(HandleLdobjbynameImm8Id16, V) \ + APPEND_SUFFIX_IMM16(HandleLdobjbynameImm16Id16, V) \ + APPEND_SUFFIX_IMM16(HandleLdthisbynameImm16Id16, V) \ + APPEND_SUFFIX(HandleLdthisbynameImm8Id16, V) \ + APPEND_SUFFIX(HandleStthisbynameImm8Id16, V) \ + APPEND_SUFFIX_IMM16(HandleStthisbynameImm16Id16, V) \ + APPEND_SUFFIX(HandleStthisbyvalueImm8V8, V) \ + APPEND_SUFFIX_IMM16(HandleStthisbyvalueImm16V8, V) \ + APPEND_SUFFIX_IMM16(HandleStobjbyvalueImm16V8V8, V) \ + APPEND_SUFFIX(HandleStobjbynameImm8Id16V8, V) \ + APPEND_SUFFIX_IMM16(HandleStobjbynameImm16Id16V8, V) \ + APPEND_SUFFIX(HandleStobjbyvalueImm8V8V8, V) \ + APPEND_SUFFIX(HandleStobjbyindexImm8V8Imm16, V) \ + APPEND_SUFFIX_IMM16(HandleStobjbyindexImm16V8Imm16, V) \ + APPEND_SUFFIX(HandleLdobjbyvalueImm8V8, V) \ + APPEND_SUFFIX_IMM16(HandleLdobjbyvalueImm16V8, V) \ + APPEND_SUFFIX_IMM16(HandleLdthisbyvalueImm16, V) \ + APPEND_SUFFIX(HandleLdthisbyvalueImm8, V) \ + APPEND_SUFFIX(HandleLdobjbyindexImm8Imm16, V) \ + APPEND_SUFFIX_IMM16(HandleLdobjbyindexImm16Imm16, V) \ + APPEND_SUFFIX(HandleWideLdobjbyindexPrefImm32, V) \ + APPEND_SUFFIX(HandleWideStobjbyindexPrefV8Imm32, V) \ + APPEND_SUFFIX_IMM16(HandleStownbyindexImm16V8Imm16, V) \ + APPEND_SUFFIX(HandleStownbyindexImm8V8Imm16, V) \ + APPEND_SUFFIX(HandleWideStownbyindexPrefV8Imm32, V) \ + APPEND_SUFFIX_IMM16(HandleStownbyvaluewithnamesetImm16V8V8, V) \ + APPEND_SUFFIX(HandleStownbyvaluewithnamesetImm8V8V8, V) \ + APPEND_SUFFIX(HandleStownbyvalueImm8V8V8, V) \ + APPEND_SUFFIX_IMM16(HandleStownbyvalueImm16V8V8, V) \ + APPEND_SUFFIX_IMM16(HandleStownbynamewithnamesetImm16Id16V8, V) \ + APPEND_SUFFIX(HandleStownbynamewithnamesetImm8Id16V8, V) \ + APPEND_SUFFIX_IMM16(HandleStownbynameImm16Id16V8, V) \ APPEND_SUFFIX(HandleStownbynameImm8Id16V8, V) #define INTERPRETER_DISABLE_SINGLE_STEP_DEBUGGING_BC_STUB_LIST(V) \ diff --git a/ecmascript/compiler/binary_section.h b/ecmascript/compiler/binary_section.h index 7a221e269b4afa771432c60416936253db0435c9..54c26ee2b791f6dc1db6a39622fe499edae23239 100644 --- a/ecmascript/compiler/binary_section.h +++ b/ecmascript/compiler/binary_section.h @@ -144,7 +144,8 @@ public: int Link() const { - return value_ == ElfSecName::SYMTAB ? 1 : 0; + // The strtab index is 2 inside An file. + return value_ == ElfSecName::SYMTAB ? 2 : 0; } void InitShTypeAndFlag() @@ -212,7 +213,7 @@ public: // RO data section needs 16 bytes alignment bool InRodataSection() const { - return ElfSecName::RODATA <= value_ && value_ <= ElfSecName::RODATA_CST8; + return ElfSecName::RODATA <= value_ && value_ <= ElfSecName::RODATA_CST32; } private: static int const FIX_SIZE = 24; // 24:Elf_Rel diff --git a/ecmascript/compiler/builtins/builtins_array_stub_builder.cpp b/ecmascript/compiler/builtins/builtins_array_stub_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..469bcf60838ed56281c2b7216d99181c1c99bedb --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_array_stub_builder.cpp @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/builtins/builtins_array_stub_builder.h" + +#include "ecmascript/compiler/builtins/builtins_stubs.h" +#include "ecmascript/compiler/new_object_stub_builder.h" +#include "ecmascript/compiler/profiler_operation.h" +#include "ecmascript/compiler/rt_call_signature.h" +#include "ecmascript/runtime_call_id.h" + +namespace panda::ecmascript::kungfu { +void BuiltinsArrayStubBuilder::Concat(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisIsEmpty(env); + // Fast path if all the conditions below are satisfied: + // (1) this is an empty array with constructor not reset (see ArraySpeciesCreate for details); + // (2) At most one argument; + // (3) all the arguments (if exists) are empty arrays. + JsArrayRequirements reqThisValue; + reqThisValue.defaultConstructor = true; + Branch(IsJsArrayWithLengthLimit(glue, thisValue, MAX_LENGTH_ZERO, reqThisValue), &thisIsEmpty, slowPath); + Bind(&thisIsEmpty); + { + Label atMostOneArg(env); + Label argValIsEmpty(env); + GateRef numArgsAsInt32 = TruncPtrToInt32(numArgs); + Branch(Int32LessThanOrEqual(numArgsAsInt32, Int32(1)), &atMostOneArg, slowPath); + Bind(&atMostOneArg); + { + Label exactlyOneArg(env); + Branch(Int32Equal(numArgsAsInt32, Int32(0)), &argValIsEmpty, &exactlyOneArg); + Bind(&exactlyOneArg); + GateRef argVal = GetCallArg0(numArgs); + JsArrayRequirements reqArgVal; + Branch(IsJsArrayWithLengthLimit(glue, argVal, MAX_LENGTH_ZERO, reqArgVal), &argValIsEmpty, slowPath); + // Creates an empty array on fast path + Bind(&argValIsEmpty); + NewObjectStubBuilder newBuilder(this); + result->WriteVariable(newBuilder.CreateEmptyArray(glue)); + Jump(exit); + } + } +} + +void BuiltinsArrayStubBuilder::Filter(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisIsEmpty(env); + // Fast path if all the conditions below are satisfied: + // (1) this is an empty array with constructor not reset (see ArraySpeciesCreate for details); + // (2) callbackFn is callable (otherwise a TypeError shall be thrown in the slow path) + JsArrayRequirements req; + req.defaultConstructor = true; + Branch(IsJsArrayWithLengthLimit(glue, thisValue, MAX_LENGTH_ZERO, req), &thisIsEmpty, slowPath); + Bind(&thisIsEmpty); + { + Label isCallable(env); + Label isHeapObject(env); + Branch(TaggedIsHeapObject(GetCallArg0(numArgs)), &isHeapObject, slowPath); + Bind(&isHeapObject); + Branch(IsCallable(GetCallArg0(numArgs)), &isCallable, slowPath); + // Creates an empty array on fast path + Bind(&isCallable); + NewObjectStubBuilder newBuilder(this); + result->WriteVariable(newBuilder.CreateEmptyArray(glue)); + Jump(exit); + } +} + +// Note: unused arguments are reserved for further development +void BuiltinsArrayStubBuilder::ForEach([[maybe_unused]] GateRef glue, GateRef thisValue, GateRef numArgs, + [[maybe_unused]] Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisIsEmpty(env); + Label isHeapObject(env); + // Fast path if all the conditions below are satisfied: + // (1) this is an empty array with constructor not reset (see ArraySpeciesCreate for details); + // (2) callbackFn is callable (otherwise a TypeError shall be thrown in the slow path) + JsArrayRequirements req; + req.defaultConstructor = true; + Branch(IsJsArrayWithLengthLimit(glue, thisValue, MAX_LENGTH_ZERO, req), &thisIsEmpty, slowPath); + Bind(&thisIsEmpty); + // Do nothing on fast path + Branch(TaggedIsHeapObject(GetCallArg0(numArgs)), &isHeapObject, slowPath); + Bind(&isHeapObject); + Branch(IsCallable(GetCallArg0(numArgs)), exit, slowPath); +} + +// Note: unused arguments are reserved for further development +void BuiltinsArrayStubBuilder::IndexOf([[maybe_unused]] GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisIsEmpty(env); + // Fast path if: (1) this is an empty array; (2) fromIndex is missing + JsArrayRequirements req; + Branch(IsJsArrayWithLengthLimit(glue, thisValue, MAX_LENGTH_ZERO, req), &thisIsEmpty, slowPath); + Bind(&thisIsEmpty); + { + Label atMostOneArg(env); + Branch(Int32LessThanOrEqual(TruncPtrToInt32(numArgs), Int32(1)), &atMostOneArg, slowPath); + // Returns -1 on fast path + Bind(&atMostOneArg); + result->WriteVariable(IntToTaggedPtr(Int32(-1))); + Jump(exit); + } +} + +// Note: unused arguments are reserved for further development +void BuiltinsArrayStubBuilder::LastIndexOf([[maybe_unused]] GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisIsEmpty(env); + // Fast path if: (1) this is an empty array; (2) fromIndex is missing + JsArrayRequirements req; + Branch(IsJsArrayWithLengthLimit(glue, thisValue, MAX_LENGTH_ZERO, req), &thisIsEmpty, slowPath); + Bind(&thisIsEmpty); + { + Label atMostOneArg(env); + Branch(Int32LessThanOrEqual(TruncPtrToInt32(numArgs), Int32(1)), &atMostOneArg, slowPath); + // Returns -1 on fast path + Bind(&atMostOneArg); + result->WriteVariable(IntToTaggedPtr(Int32(-1))); + Jump(exit); + } +} + +void BuiltinsArrayStubBuilder::Slice(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisIsEmpty(env); + // Fast path if: + // (1) this is an empty array with constructor not reset (see ArraySpeciesCreate for details); + // (2) no arguments exist + JsArrayRequirements req; + req.defaultConstructor = true; + Branch(IsJsArrayWithLengthLimit(glue, thisValue, MAX_LENGTH_ZERO, req), &thisIsEmpty, slowPath); + Bind(&thisIsEmpty); + { + Label noArgs(env); + GateRef numArgsAsInt32 = TruncPtrToInt32(numArgs); + Branch(Int32Equal(numArgsAsInt32, Int32(0)), &noArgs, slowPath); + // Creates a new empty array on fast path + Bind(&noArgs); + NewObjectStubBuilder newBuilder(this); + result->WriteVariable(newBuilder.CreateEmptyArray(glue)); + Jump(exit); + } +} + +// Note: unused arguments are reserved for further development +void BuiltinsArrayStubBuilder::Reverse([[maybe_unused]] GateRef glue, GateRef thisValue, + [[maybe_unused]] GateRef numArgs, + Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisIsEmpty(env); + // Fast path is this is an array of length 0 or 1 + JsArrayRequirements req; + Branch(IsJsArrayWithLengthLimit(glue, thisValue, MAX_LENGTH_ONE, req), &thisIsEmpty, slowPath); + Bind(&thisIsEmpty); + // Returns thisValue on fast path + result->WriteVariable(thisValue); + Jump(exit); +} + +GateRef BuiltinsArrayStubBuilder::IsJsArrayWithLengthLimit(GateRef glue, GateRef object, + uint32_t maxLength, JsArrayRequirements requirements) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + Label isHeapObject(env); + Label isJsArray(env); + Label stabilityCheckPassed(env); + Label defaultConstructorCheckPassed(env); + Label exit(env); + DEFVARIABLE(result, VariableType::BOOL(), False()); + + Branch(TaggedIsHeapObject(object), &isHeapObject, &exit); + Bind(&isHeapObject); + Branch(IsJsArray(object), &isJsArray, &exit); + Bind(&isJsArray); + if (requirements.stable) { + Branch(IsStableJSArray(glue, object), &stabilityCheckPassed, &exit); + } else { + Jump(&stabilityCheckPassed); + } + Bind(&stabilityCheckPassed); + if (requirements.defaultConstructor) { + // If HasConstructor bit is set to 1, then the constructor has been modified. + Branch(HasConstructor(object), &exit, &defaultConstructorCheckPassed); + } else { + Jump(&defaultConstructorCheckPassed); + } + Bind(&defaultConstructorCheckPassed); + result.WriteVariable(Int32UnsignedLessThanOrEqual(GetArrayLength(object), Int32(maxLength))); + Jump(&exit); + Bind(&exit); + GateRef ret = *result; + env->SubCfgExit(); + return ret; +} + +void BuiltinsArrayStubBuilder::Push(GateRef glue, GateRef thisValue, + GateRef numArgs, Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label isHeapObject(env); + Label isJsArray(env); + Label isStability(env); + Label setLength(env); + Label smallArgs(env); + Label checkSmallArgs(env); + + Branch(TaggedIsHeapObject(thisValue), &isHeapObject, slowPath); + Bind(&isHeapObject); + Branch(IsJsArray(thisValue), &isJsArray, slowPath); + Bind(&isJsArray); + + Branch(IsStableJSArray(glue, thisValue), &isStability, slowPath); + Bind(&isStability); + + GateRef oldLength = GetArrayLength(thisValue); + *result = IntToTaggedPtr(oldLength); + + Branch(Int32Equal(ChangeIntPtrToInt32(numArgs), Int32(0)), exit, &checkSmallArgs); + Bind(&checkSmallArgs); + // now unsupport more than 2 args + Branch(Int32LessThanOrEqual(ChangeIntPtrToInt32(numArgs), Int32(2)), &smallArgs, slowPath); + Bind(&smallArgs); + GateRef newLength = Int32Add(oldLength, ChangeIntPtrToInt32(numArgs)); + + DEFVARIABLE(elements, VariableType::JS_ANY(), GetElementsArray(thisValue)); + GateRef capacity = GetLengthOfTaggedArray(*elements); + Label grow(env); + Label setValue(env); + Branch(Int32GreaterThan(newLength, capacity), &grow, &setValue); + Bind(&grow); + { + elements = + CallRuntime(glue, RTSTUB_ID(JSObjectGrowElementsCapacity), { thisValue, IntToTaggedInt(newLength) }); + Jump(&setValue); + } + Bind(&setValue); + { + Label oneArg(env); + Label twoArg(env); + DEFVARIABLE(index, VariableType::INT32(), Int32(0)); + DEFVARIABLE(value, VariableType::JS_ANY(), Undefined()); + Branch(Int64Equal(numArgs, IntPtr(1)), &oneArg, &twoArg); // 1 one arg + Bind(&oneArg); + { + value = GetCallArg0(numArgs); + index = Int32Add(oldLength, Int32(0)); // 0 slot index + SetValueToTaggedArray(VariableType::JS_ANY(), glue, *elements, *index, *value); + Jump(&setLength); + } + Bind(&twoArg); + { + value = GetCallArg0(numArgs); + index = Int32Add(oldLength, Int32(0)); // 0 slot index + SetValueToTaggedArray(VariableType::JS_ANY(), glue, *elements, *index, *value); + + value = GetCallArg1(numArgs); + index = Int32Add(oldLength, Int32(1)); // 1 slot index + SetValueToTaggedArray(VariableType::JS_ANY(), glue, *elements, *index, *value); + Jump(&setLength); + } + } + Bind(&setLength); + SetArrayLength(glue, thisValue, newLength); + result->WriteVariable(IntToTaggedPtr(newLength)); + Jump(exit); +} +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/builtins_array_stub_builder.h b/ecmascript/compiler/builtins/builtins_array_stub_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..e95eaf5f4d4f82927539c321ac2278c8fb631b7c --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_array_stub_builder.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_BUILTINS_ARRAY_STUB_BUILDER_H +#define ECMASCRIPT_COMPILER_BUILTINS_ARRAY_STUB_BUILDER_H +#include "ecmascript/compiler/circuit_builder.h" +#include "ecmascript/compiler/gate.h" +#include "ecmascript/compiler/share_gate_meta_data.h" +#include "ecmascript/compiler/stub_builder-inl.h" + +namespace panda::ecmascript::kungfu { +class BuiltinsArrayStubBuilder : public BuiltinsStubBuilder { +public: + explicit BuiltinsArrayStubBuilder(StubBuilder *parent) + : BuiltinsStubBuilder(parent) {} + ~BuiltinsArrayStubBuilder() override = default; + NO_MOVE_SEMANTIC(BuiltinsArrayStubBuilder); + NO_COPY_SEMANTIC(BuiltinsArrayStubBuilder); + void GenerateCircuit() override {} + + void Concat(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); + + void Filter(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); + + void ForEach(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); + + void IndexOf(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); + + void LastIndexOf(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); + + void Slice(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); + + void Reverse(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); + + void Push(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *result, Label *exit, Label *slowPath); +private: + static constexpr uint32_t MAX_LENGTH_ZERO = 0; + static constexpr uint32_t MAX_LENGTH_ONE = 1; + struct JsArrayRequirements { + bool stable = false; + bool defaultConstructor = false; + }; + GateRef IsJsArrayWithLengthLimit(GateRef glue, GateRef object, + uint32_t maxLength, JsArrayRequirements requirements); +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_BUILTINS_ARRAY_STUB_BUILDER_H diff --git a/ecmascript/compiler/builtins/builtins_call_signature.cpp b/ecmascript/compiler/builtins/builtins_call_signature.cpp index 0a3f6b83389641ba75829733e38f1f714a11cd75..890741540963f072a846202947a36eb5720e62a6 100644 --- a/ecmascript/compiler/builtins/builtins_call_signature.cpp +++ b/ecmascript/compiler/builtins/builtins_call_signature.cpp @@ -38,15 +38,9 @@ void BuiltinsStubCSigns::Initialize() BuiltinsCallSignature::Initialize(&callSigns_[name]); \ COMMON_INIT(name) - BUILTINS_METHOD_STUB_LIST(INIT_BUILTINS_METHOD) + BUILTINS_STUB_LIST(INIT_BUILTINS_METHOD) #undef INIT_BUILTINS_METHOD -#define INIT_BUILTINS_CONSTRUCTOR(name) \ - BuiltinsWithArgvCallSignature::Initialize(&callSigns_[name]); \ - COMMON_INIT(name) - - BUILTINS_CONSTRUCTOR_STUB_LIST(INIT_BUILTINS_CONSTRUCTOR) -#undef INIT_BUILTINS_CONSTRUCTOR #undef COMMON_INIT BuiltinsCallSignature::Initialize(&builtinsCSign_); BuiltinsWithArgvCallSignature::Initialize(&builtinsWithArgvCSign_); diff --git a/ecmascript/compiler/builtins/builtins_call_signature.h b/ecmascript/compiler/builtins/builtins_call_signature.h index af7f5ec30dbaf2832fee9bf9344d2bde886fdc13..82f7ad4160804ddd349d4858be468b61b484b0fe 100644 --- a/ecmascript/compiler/builtins/builtins_call_signature.h +++ b/ecmascript/compiler/builtins/builtins_call_signature.h @@ -29,13 +29,21 @@ namespace panda::ecmascript::kungfu { // AOT_BUILTINS_STUB_LIST is used in AOT only. #define BUILTINS_STUB_LIST(V) \ BUILTINS_METHOD_STUB_LIST(V) \ - BUILTINS_CONSTRUCTOR_STUB_LIST(V) + BUILTINS_CONSTRUCTOR_STUB_LIST(V) \ + AOT_AND_BUILTINS_STUB_LIST(V) #define BUILTINS_METHOD_STUB_LIST(V) \ - V(CharCodeAt) \ - V(IndexOf) \ - V(Substring) \ - V(CharAt) \ + V(StringCharCodeAt) \ + V(StringIndexOf) \ + V(StringSubstring) \ + V(StringReplace) \ + V(StringCharAt) \ + V(StringFromCharCode) \ + V(StringTrim) \ + V(StringSlice) \ + V(ObjectToString) \ + V(ObjectCreate) \ + V(ObjectAssign) \ V(VectorForEach) \ V(VectorReplaceAllElements) \ V(StackForEach) \ @@ -50,22 +58,60 @@ namespace panda::ecmascript::kungfu { V(ListForEach) \ V(ArrayListForEach) \ V(ArrayListReplaceAllElements) \ - V(FunctionPrototypeApply) + V(FunctionPrototypeApply) \ + V(ArrayConcat) \ + V(ArrayFilter) \ + V(ArrayForEach) \ + V(ArrayIndexOf) \ + V(ArrayLastIndexOf) \ + V(ArraySlice) \ + V(ArrayReverse) \ + V(ArrayPush) \ + V(SetClear) \ + V(SetValues) \ + V(SetEntries) \ + V(SetForEach) \ + V(SetAdd) \ + V(SetDelete) \ + V(SetHas) \ + V(MapClear) \ + V(MapValues) \ + V(MapEntries) \ + V(MapKeys) \ + V(MapForEach) \ + V(MapSet) \ + V(MapDelete) \ + V(MapHas) \ + V(NumberParseFloat) #define BUILTINS_CONSTRUCTOR_STUB_LIST(V) \ V(BooleanConstructor) \ + V(NumberConstructor) \ V(DateConstructor) \ V(ArrayConstructor) +#define AOT_AND_BUILTINS_STUB_LIST(V) \ + V(LocaleCompare) + #define AOT_BUILTINS_STUB_LIST(V) \ - V(SQRT) \ + V(SQRT) /* list start and math list start */ \ V(COS) \ V(SIN) \ V(ACOS) \ V(ATAN) \ V(ABS) \ - V(FLOOR) \ - V(LocaleCompare) + V(FLOOR) /* math list end */ \ + V(SORT) \ + V(STRINGIFY) \ + V(MAP_PROTO_ITERATOR) \ + V(SET_PROTO_ITERATOR) \ + V(STRING_PROTO_ITERATOR) \ + V(ARRAY_PROTO_ITERATOR) \ + V(TYPED_ARRAY_PROTO_ITERATOR) \ + V(MAP_ITERATOR_PROTO_NEXT) \ + V(SET_ITERATOR_PROTO_NEXT) \ + V(STRING_ITERATOR_PROTO_NEXT) \ + V(ARRAY_ITERATOR_PROTO_NEXT) class BuiltinsStubCSigns { public: @@ -79,8 +125,13 @@ public: AOT_BUILTINS_STUB_LIST(DEF_STUB_ID) #undef DEF_STUB_ID BUILTINS_CONSTRUCTOR_STUB_FIRST = BooleanConstructor, + TYPED_BUILTINS_FIRST = SQRT, + TYPED_BUILTINS_LAST = ARRAY_ITERATOR_PROTO_NEXT, + TYPED_BUILTINS_MATH_FIRST = SQRT, + TYPED_BUILTINS_MATH_LAST = FLOOR, INVALID = 0xFF, }; + static_assert(ID::NONE == 0); static void Initialize(); @@ -115,18 +166,15 @@ public: static bool IsTypedBuiltin(ID builtinId) { - switch (builtinId) { - case BuiltinsStubCSigns::ID::COS: - case BuiltinsStubCSigns::ID::SIN: - case BuiltinsStubCSigns::ID::ACOS: - case BuiltinsStubCSigns::ID::ATAN: - case BuiltinsStubCSigns::ID::ABS: - case BuiltinsStubCSigns::ID::FLOOR: - case BuiltinsStubCSigns::ID::SQRT: - return true; - default: - return false; - } + return (BuiltinsStubCSigns::ID::LocaleCompare == builtinId) || + ((BuiltinsStubCSigns::ID::TYPED_BUILTINS_FIRST <= builtinId) && + (builtinId <= BuiltinsStubCSigns::ID::TYPED_BUILTINS_LAST)); + } + + static bool IsTypedBuiltinMath(ID builtinId) + { + return (BuiltinsStubCSigns::ID::TYPED_BUILTINS_MATH_FIRST <= builtinId) && + (builtinId <= BuiltinsStubCSigns::ID::TYPED_BUILTINS_MATH_LAST); } static ConstantIndex GetConstantIndex(ID builtinId) @@ -146,6 +194,20 @@ public: return ConstantIndex::MATH_FLOOR_FUNCTION_INDEX; case BuiltinsStubCSigns::ID::SQRT: return ConstantIndex::MATH_SQRT_FUNCTION_INDEX; + case BuiltinsStubCSigns::ID::LocaleCompare: + return ConstantIndex::LOCALE_COMPARE_FUNCTION_INDEX; + case BuiltinsStubCSigns::ID::SORT: + return ConstantIndex::ARRAY_SORT_FUNCTION_INDEX; + case BuiltinsStubCSigns::ID::STRINGIFY: + return ConstantIndex::JSON_STRINGIFY_FUNCTION_INDEX; + case BuiltinsStubCSigns::ID::MAP_ITERATOR_PROTO_NEXT: + return ConstantIndex::MAP_ITERATOR_PROTO_NEXT_INDEX; + case BuiltinsStubCSigns::ID::SET_ITERATOR_PROTO_NEXT: + return ConstantIndex::SET_ITERATOR_PROTO_NEXT_INDEX; + case BuiltinsStubCSigns::ID::STRING_ITERATOR_PROTO_NEXT: + return ConstantIndex::STRING_ITERATOR_PROTO_NEXT_INDEX; + case BuiltinsStubCSigns::ID::ARRAY_ITERATOR_PROTO_NEXT: + return ConstantIndex::ARRAY_ITERATOR_PROTO_NEXT_INDEX; default: LOG_COMPILER(FATAL) << "this branch is unreachable"; UNREACHABLE(); @@ -163,6 +225,8 @@ public: {"abs", ABS}, {"floor", FLOOR}, {"localeCompare", LocaleCompare}, + {"sort", SORT}, + {"stringify", STRINGIFY}, }; if (str2BuiltinId.count(idStr) > 0) { return str2BuiltinId.at(idStr); @@ -190,7 +254,10 @@ enum class BuiltinsArgs : size_t { }; #define BUILTINS_STUB_ID(name) kungfu::BuiltinsStubCSigns::name +// to distinguish with the positive method offset of js function +#define PGO_BUILTINS_STUB_ID(name) ((-1) * kungfu::BuiltinsStubCSigns::name) #define IS_TYPED_BUILTINS_ID(id) kungfu::BuiltinsStubCSigns::IsTypedBuiltin(id) +#define IS_TYPED_BUILTINS_MATH_ID(id) kungfu::BuiltinsStubCSigns::IsTypedBuiltinMath(id) #define GET_TYPED_CONSTANT_INDEX(id) kungfu::BuiltinsStubCSigns::GetConstantIndex(id) } // namespace panda::ecmascript::kungfu #endif // ECMASCRIPT_COMPILER_BUILTINS_CALL_SIGNATURE_H diff --git a/ecmascript/compiler/builtins/builtins_collection_stub_builder.cpp b/ecmascript/compiler/builtins/builtins_collection_stub_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..26ca3d5cbe199a375e05b070020cd0d60b8841f2 --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_collection_stub_builder.cpp @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/builtins/builtins_collection_stub_builder.h" + +#include "ecmascript/compiler/builtins/builtins_stubs.h" +#include "ecmascript/compiler/new_object_stub_builder.h" +#include "ecmascript/linked_hash_table.h" +#include "ecmascript/js_map.h" +#include "ecmascript/js_set.h" +#include "ecmascript/js_iterator.h" + +namespace panda::ecmascript::kungfu { + +template +void BuiltinsCollectionStubBuilder::CheckCollectionObj(Label *thisCollectionObj, Label *slowPath) +{ + // check target obj + auto jsType = std::is_same_v ? JSType::JS_SET : JSType::JS_MAP; + GateRef isJsCollectionObj = IsJSObjectType(thisValue_, jsType); + Branch(isJsCollectionObj, thisCollectionObj, slowPath); +} + +template +void BuiltinsCollectionStubBuilder::Clear(Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisCollectionObj(env); + // check target obj + CheckCollectionObj(&thisCollectionObj, slowPath); + + Bind(&thisCollectionObj); + GateRef linkedTable = GetLinked(); + GateRef res = Circuit::NullGate(); + if constexpr (std::is_same_v) { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Clear(linkedTable); + } else { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Clear(linkedTable); + } + + Label exception(env); + Label noException(env); + Branch(TaggedIsException(res), &exception, &noException); + Bind(&noException); + SetLinked(res); + Jump(exit); + Bind(&exception); + *result = res; + Jump(exit); +} + +template void BuiltinsCollectionStubBuilder::Clear(Variable *result, Label *exit, Label *slowPath); +template void BuiltinsCollectionStubBuilder::Clear(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::CreateIterator(Variable *result, + Label *exit, Label *slowPath, GateRef kind) +{ + auto env = GetEnvironment(); + Label entry(env); + Label thisCollectionObj(env); + // check target obj + CheckCollectionObj(&thisCollectionObj, slowPath); + + Bind(&thisCollectionObj); + NewObjectStubBuilder newBuilder(this); + newBuilder.SetGlue(glue_); + if constexpr (std::is_same_v) { + newBuilder.CreateJSCollectionIterator(result, exit, thisValue_, kind); + } else { + newBuilder.CreateJSCollectionIterator(result, exit, thisValue_, kind); + } +} + +template +void BuiltinsCollectionStubBuilder::Values(Variable *result, Label *exit, Label *slowPath) +{ + GateRef kind = Int32(static_cast(IterationKind::VALUE)); + CreateIterator(result, exit, slowPath, kind); +} + +template void BuiltinsCollectionStubBuilder::Values(Variable *result, Label *exit, Label *slowPath); +template void BuiltinsCollectionStubBuilder::Values(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::Entries(Variable *result, Label *exit, Label *slowPath) +{ + GateRef kind = Int32(static_cast(IterationKind::KEY_AND_VALUE)); + CreateIterator(result, exit, slowPath, kind); +} + +template void BuiltinsCollectionStubBuilder::Entries(Variable *result, Label *exit, Label *slowPath); +template void BuiltinsCollectionStubBuilder::Entries(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::Keys(Variable *result, Label *exit, Label *slowPath) +{ + GateRef kind = Int32(static_cast(IterationKind::KEY)); + CreateIterator(result, exit, slowPath, kind); +} + +template void BuiltinsCollectionStubBuilder::Keys(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::ForEach(Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisCollectionObj(env); + // check target obj + CheckCollectionObj(&thisCollectionObj, slowPath); + + Bind(&thisCollectionObj); + GateRef callbackFnHandle = GetCallArg0(numArgs_); + Label callable(env); + // check heap obj + Label heapObj(env); + Branch(TaggedIsHeapObject(callbackFnHandle), &heapObj, slowPath); + Bind(&heapObj); + Branch(IsCallable(callbackFnHandle), &callable, slowPath); + Bind(&callable); + + GateRef linkedTable = GetLinked(); + GateRef res = Circuit::NullGate(); + if constexpr (std::is_same_v) { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.ForEach(thisValue_, linkedTable, numArgs_); + } else { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.ForEach(thisValue_, linkedTable, numArgs_); + } + + Label exception(env); + Branch(TaggedIsException(res), &exception, exit); + Bind(&exception); + *result = res; + Jump(exit); +} + +template void BuiltinsCollectionStubBuilder::ForEach(Variable *result, Label *exit, Label *slowPath); +template void BuiltinsCollectionStubBuilder::ForEach(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::MapSetOrSetAdd( + Variable *result, Label *exit, Label *slowPath, bool isJsMapSet) +{ + auto env = GetEnvironment(); + Label thisCollectionObj(env); + // check target obj + CheckCollectionObj(&thisCollectionObj, slowPath); + Bind(&thisCollectionObj); + GateRef key = GetCallArg0(numArgs_); + // check key + Label keyNotHole(env); + Branch(TaggedIsHole(key), slowPath, &keyNotHole); + Bind(&keyNotHole); + GateRef value = isJsMapSet ? GetCallArg1(numArgs_) : key; + GateRef linkedTable = GetLinked(); + GateRef res = Circuit::NullGate(); + if constexpr (std::is_same_v) { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Insert(linkedTable, key, value); + } else { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Insert(linkedTable, key, value); + } + + SetLinked(res); + *result = thisValue_; + Jump(exit); +} + +template +void BuiltinsCollectionStubBuilder::Set(Variable *result, Label *exit, Label *slowPath) +{ + MapSetOrSetAdd(result, exit, slowPath, true); +} + +template void BuiltinsCollectionStubBuilder::Set(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::Add(Variable *result, Label *exit, Label *slowPath) +{ + MapSetOrSetAdd(result, exit, slowPath, false); +} + +template void BuiltinsCollectionStubBuilder::Add(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::Delete(Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisCollectionObj(env); + // check target obj + CheckCollectionObj(&thisCollectionObj, slowPath); + + Bind(&thisCollectionObj); + GateRef key = GetCallArg0(numArgs_); + GateRef linkedTable = GetLinked(); + GateRef res = Circuit::NullGate(); + if constexpr (std::is_same_v) { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Delete(linkedTable, key); + } else { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Delete(linkedTable, key); + } + *result = res; + Jump(exit); +} + +template void BuiltinsCollectionStubBuilder::Delete(Variable *result, Label *exit, Label *slowPath); +template void BuiltinsCollectionStubBuilder::Delete(Variable *result, Label *exit, Label *slowPath); + +template +void BuiltinsCollectionStubBuilder::Has(Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisCollectionObj(env); + // check target obj + CheckCollectionObj(&thisCollectionObj, slowPath); + + Bind(&thisCollectionObj); + GateRef key = GetCallArg0(numArgs_); + GateRef linkedTable = GetLinked(); + GateRef res = Circuit::NullGate(); + if constexpr (std::is_same_v) { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Has(linkedTable, key); + } else { + LinkedHashTableStubBuilder linkedHashTableStubBuilder(this, glue_); + res = linkedHashTableStubBuilder.Has(linkedTable, key); + } + *result = res; + Jump(exit); +} + +template void BuiltinsCollectionStubBuilder::Has(Variable *result, Label *exit, Label *slowPath); +template void BuiltinsCollectionStubBuilder::Has(Variable *result, Label *exit, Label *slowPath); +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/builtins_collection_stub_builder.h b/ecmascript/compiler/builtins/builtins_collection_stub_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..36298475e7b365a828cad7c18687400042993009 --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_collection_stub_builder.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_BUILTINS_COLLECTION_STUB_BUILDER_H +#define ECMASCRIPT_COMPILER_BUILTINS_COLLECTION_STUB_BUILDER_H +#include "ecmascript/compiler/stub_builder-inl.h" +#include "ecmascript/compiler/builtins/linked_hashtable_stub_builder.h" + +namespace panda::ecmascript::kungfu { +template +class BuiltinsCollectionStubBuilder : public BuiltinsStubBuilder { +public: + explicit BuiltinsCollectionStubBuilder(BuiltinsStubBuilder *parent, GateRef glue, GateRef thisValue, + GateRef numArgs) : BuiltinsStubBuilder(parent), glue_(glue), thisValue_(thisValue), numArgs_(numArgs) {} + ~BuiltinsCollectionStubBuilder() override = default; + NO_MOVE_SEMANTIC(BuiltinsCollectionStubBuilder); + NO_COPY_SEMANTIC(BuiltinsCollectionStubBuilder); + void GenerateCircuit() override {} + + void Clear(Variable *result, Label *exit, Label *slowPath); + void Values(Variable *result, Label *exit, Label *slowPath); + void Entries(Variable *result, Label *exit, Label *slowPath); + void Keys(Variable *result, Label *exit, Label *slowPath); + void ForEach(Variable *result, Label *exit, Label *slowPath); + void Set(Variable *result, Label *exit, Label *slowPath); + void Add(Variable *result, Label *exit, Label *slowPath); + void Delete(Variable *result, Label *exit, Label *slowPath); + void Has(Variable *result, Label *exit, Label *slowPath); + +private: + // check target obj + void CheckCollectionObj(Label *exit, Label *slowPath); + void CreateIterator(Variable *result, Label *exit, Label *slowPath, GateRef iterationKind); + void MapSetOrSetAdd(Variable *result, Label *exit, Label *slowPath, bool isJsMapSet); + + GateRef GetLinkedOffset() + { + int32_t linkedTableOffset = 0; + if constexpr (std::is_same_v) { + linkedTableOffset = CollectionType::LINKED_MAP_OFFSET; + } else { + linkedTableOffset = CollectionType::LINKED_SET_OFFSET; + } + return IntPtr(linkedTableOffset); + } + + GateRef GetLinked() + { + GateRef linkedTableOffset = GetLinkedOffset(); + return Load(VariableType::JS_ANY(), thisValue_, linkedTableOffset); + } + + void SetLinked(GateRef newTable) + { + GateRef linkedTableOffset = GetLinkedOffset(); + Store(VariableType::JS_ANY(), glue_, thisValue_, linkedTableOffset, newTable); + } + + GateRef glue_; + GateRef thisValue_; + GateRef numArgs_; +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_BUILTINS_COLLECTION_STUB_BUILDER_H \ No newline at end of file diff --git a/ecmascript/compiler/builtins/builtins_function_stub_builder.cpp b/ecmascript/compiler/builtins/builtins_function_stub_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..98dabbcadc691e5d113e8fde9b6e91e0ad9b2dea --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_function_stub_builder.cpp @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/builtins/builtins_function_stub_builder.h" + +#include "ecmascript/compiler/builtins/builtins_object_stub_builder.h" +#include "ecmascript/compiler/stub_builder-inl.h" +#include "ecmascript/js_arguments.h" + +namespace panda::ecmascript::kungfu { + +void BuiltinsFunctionStubBuilder::Apply(GateRef glue, GateRef thisValue, + GateRef numArgs, Variable* res, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label targetIsCallable(env); + Label targetIsUndefined(env); + Label targetNotUndefined(env); + Label isHeapObject(env); + //1. If IsCallable(func) is false, throw a TypeError exception + Branch(TaggedIsHeapObject(thisValue), &isHeapObject, slowPath); + Bind(&isHeapObject); + { + Branch(IsCallable(thisValue), &targetIsCallable, slowPath); + Bind(&targetIsCallable); + { + GateRef thisArg = GetCallArg0(numArgs); + GateRef arrayObj = GetCallArg1(numArgs); + // 2. If argArray is null or undefined, then + Branch(TaggedIsUndefined(arrayObj), &targetIsUndefined, &targetNotUndefined); + Bind(&targetIsUndefined); + { + // a. Return Call(func, thisArg). + res->WriteVariable(JSCallDispatch(glue, thisValue, Int32(0), 0, Circuit::NullGate(), + JSCallMode::CALL_GETTER, { thisArg })); + Jump(exit); + } + Bind(&targetNotUndefined); + { + // 3. Let argList be CreateListFromArrayLike(argArray). + GateRef elements = BuildArgumentsListFastElements(glue, arrayObj); + Label targetIsHole(env); + Label targetNotHole(env); + Branch(TaggedIsHole(elements), &targetIsHole, &targetNotHole); + Bind(&targetIsHole); + { + BuiltinsObjectStubBuilder objectStubBuilder(this); + GateRef argList = objectStubBuilder.CreateListFromArrayLike(glue, arrayObj); + // 4. ReturnIfAbrupt(argList). + Label isPendingException(env); + Label noPendingException(env); + Branch(HasPendingException(glue), &isPendingException, &noPendingException); + Bind(&isPendingException); + { + Jump(slowPath); + } + Bind(&noPendingException); + { + GateRef argsLength = GetLengthOfTaggedArray(argList); + GateRef argv = PtrAdd(argList, IntPtr(TaggedArray::DATA_OFFSET)); + res->WriteVariable(JSCallDispatch(glue, thisValue, argsLength, 0, Circuit::NullGate(), + JSCallMode::CALL_THIS_ARGV_WITH_RETURN, { argsLength, argv, thisArg })); + Jump(exit); + } + } + Bind(&targetNotHole); + { + // 6. Return Call(func, thisArg, argList). + Label taggedIsStableJsArg(env); + Label taggedNotStableJsArg(env); + Branch(IsStableJSArguments(glue, arrayObj), &taggedIsStableJsArg, &taggedNotStableJsArg); + Bind(&taggedIsStableJsArg); + { + GateRef hClass = LoadHClass(arrayObj); + GateRef PropertyInlinedPropsOffset = IntPtr(JSArguments::LENGTH_INLINE_PROPERTY_INDEX); + GateRef result = GetPropertyInlinedProps(arrayObj, hClass, PropertyInlinedPropsOffset); + GateRef length = TaggedGetInt(result); + GateRef argsLength = MakeArgListWithHole(glue, elements, length); + GateRef elementArgv = PtrAdd(elements, IntPtr(TaggedArray::DATA_OFFSET)); + res->WriteVariable(JSCallDispatch(glue, thisValue, argsLength, 0, Circuit::NullGate(), + JSCallMode::CALL_THIS_ARGV_WITH_RETURN, { argsLength, elementArgv, thisArg })); + Jump(exit); + } + Bind(&taggedNotStableJsArg); + { + GateRef length = GetArrayLength(arrayObj); + GateRef argsLength = MakeArgListWithHole(glue, elements, length); + GateRef elementArgv = PtrAdd(elements, IntPtr(TaggedArray::DATA_OFFSET)); + res->WriteVariable(JSCallDispatch(glue, thisValue, argsLength, 0, Circuit::NullGate(), + JSCallMode::CALL_THIS_ARGV_WITH_RETURN, { argsLength, elementArgv, thisArg })); + Jump(exit); + } + } + } + } + } +} + +// return elements +GateRef BuiltinsFunctionStubBuilder::BuildArgumentsListFastElements(GateRef glue, GateRef arrayObj) +{ + auto env = GetEnvironment(); + Label subentry(env); + env->SubCfgEntry(&subentry); + DEFVARIABLE(res, VariableType::JS_ANY(), Hole()); + Label exit(env); + Label hasStableElements(env); + Label targetIsStableJSArguments(env); + Label targetNotStableJSArguments(env); + Label targetIsInt(env); + Label hClassEqual(env); + Label targetIsStableJSArray(env); + Label targetNotStableJSArray(env); + + Branch(HasStableElements(glue, arrayObj), &hasStableElements, &exit); + Bind(&hasStableElements); + { + Branch(IsStableJSArguments(glue, arrayObj), &targetIsStableJSArguments, &targetNotStableJSArguments); + Bind(&targetIsStableJSArguments); + { + GateRef hClass = LoadHClass(arrayObj); + GateRef glueGlobalEnvOffset = IntPtr(JSThread::GlueData::GetGlueGlobalEnvOffset(env->Is32Bit())); + GateRef glueGlobalEnv = Load(VariableType::NATIVE_POINTER(), glue, glueGlobalEnvOffset); + GateRef argmentsClass = GetGlobalEnvValue(VariableType::JS_ANY(), glueGlobalEnv, + GlobalEnv::ARGUMENTS_CLASS); + Branch(Int64Equal(hClass, argmentsClass), &hClassEqual, &exit); + Bind(&hClassEqual); + { + GateRef PropertyInlinedPropsOffset = IntPtr(JSArguments::LENGTH_INLINE_PROPERTY_INDEX); + GateRef result = GetPropertyInlinedProps(arrayObj, hClass, PropertyInlinedPropsOffset); + Branch(TaggedIsInt(result), &targetIsInt, &exit); + Bind(&targetIsInt); + { + res = GetElementsArray(arrayObj); + Jump(&exit); + } + } + } + Bind(&targetNotStableJSArguments); + { + Branch(IsStableJSArray(glue, arrayObj), &targetIsStableJSArray, &targetNotStableJSArray); + Bind(&targetIsStableJSArray); + { + res = GetElementsArray(arrayObj); + Jump(&exit); + } + Bind(&targetNotStableJSArray); + { + FatalPrint(glue, { Int32(GET_MESSAGE_STRING_ID(ThisBranchIsUnreachable)) }); + Jump(&exit); + } + } + } + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +GateRef BuiltinsFunctionStubBuilder::MakeArgListWithHole(GateRef glue, GateRef argv, GateRef length) +{ + auto env = GetEnvironment(); + Label subentry(env); + env->SubCfgEntry(&subentry); + DEFVARIABLE(res, VariableType::INT32(), length); + DEFVARIABLE(i, VariableType::INT32(), Int32(0)); + Label exit(env); + + GateRef argsLength = GetLengthOfTaggedArray(argv); + + Label lengthGreaterThanArgsLength(env); + Label lengthLessThanArgsLength(env); + Branch(Int32GreaterThan(length, argsLength), &lengthGreaterThanArgsLength, &lengthLessThanArgsLength); + Bind(&lengthGreaterThanArgsLength); + { + res = argsLength; + Jump(&lengthLessThanArgsLength); + } + Bind(&lengthLessThanArgsLength); + { + Label loopHead(env); + Label loopEnd(env); + Label targetIsHole(env); + Label targetNotHole(env); + Branch(Int32UnsignedLessThan(*i, *res), &loopHead, &exit); + LoopBegin(&loopHead); + { + GateRef value = GetValueFromTaggedArray(argv, *i); + Branch(TaggedIsHole(value), &targetIsHole, &targetNotHole); + Bind(&targetIsHole); + { + SetValueToTaggedArray(VariableType::JS_ANY(), glue, argv, *i, Undefined()); + Jump(&targetNotHole); + } + Bind(&targetNotHole); + i = Int32Add(*i, Int32(1)); + Branch(Int32UnsignedLessThan(*i, *res), &loopEnd, &exit); + } + Bind(&loopEnd); + LoopEnd(&loopHead); + } + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/builtins_function_stub_builder.h b/ecmascript/compiler/builtins/builtins_function_stub_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..babea4fce2d3eb34dcf7356f5abc220a76717c9c --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_function_stub_builder.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_BUILTINS_FUNCTION_STUB_BUILDER_H +#define ECMASCRIPT_COMPILER_BUILTINS_FUNCTION_STUB_BUILDER_H +#include "ecmascript/compiler/builtins/builtins_stubs.h" + +namespace panda::ecmascript::kungfu { +class BuiltinsFunctionStubBuilder : public BuiltinsStubBuilder { +public: + explicit BuiltinsFunctionStubBuilder(StubBuilder *parent) + : BuiltinsStubBuilder(parent) {} + ~BuiltinsFunctionStubBuilder() override = default; + NO_MOVE_SEMANTIC(BuiltinsFunctionStubBuilder); + NO_COPY_SEMANTIC(BuiltinsFunctionStubBuilder); + void GenerateCircuit() override {} + void Apply(GateRef glue, GateRef thisValue, GateRef numArgs, Variable* res, Label *exit, Label *slowPath); + GateRef BuildArgumentsListFastElements(GateRef glue, GateRef arrayObj); +private: + GateRef MakeArgListWithHole(GateRef glue, GateRef argv, GateRef length); +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_BUILTINS_FUNCTION_STUB_BUILDER_H \ No newline at end of file diff --git a/ecmascript/compiler/builtins/builtins_number_stub_builder.cpp b/ecmascript/compiler/builtins/builtins_number_stub_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b537dd27c93fb87ad7d283570adac21844d08d46 --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_number_stub_builder.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/builtins/builtins_number_stub_builder.h" + +#include "ecmascript/compiler/stub_builder-inl.h" +#include "ecmascript/js_arguments.h" +#include "ecmascript/tagged_dictionary.h" + +namespace panda::ecmascript::kungfu { +void BuiltinsNumberStubBuilder::ParseFloat(Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label definedMsg(env); + Label undefinedMsg(env); + GateRef msg = GetCallArg0(numArgs_); + Branch(TaggedIsUndefined(msg), &undefinedMsg, &definedMsg); + Bind(&undefinedMsg); + { + *result = DoubleToTaggedDoublePtr(Double(base::NAN_VALUE)); + Jump(exit); + } + Bind(&definedMsg); + { + Label heapObj(env); + Label stringObj(env); + Branch(TaggedIsHeapObject(msg), &heapObj, slowPath); + Bind(&heapObj); + Branch(IsString(msg), &stringObj, slowPath); + Bind(&stringObj); + { + *result = CallNGCRuntime(glue_, RTSTUB_ID(NumberHelperStringToDouble), { msg }); + Jump(exit); + } + } +} +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/builtins_number_stub_builder.h b/ecmascript/compiler/builtins/builtins_number_stub_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..564fc2ffda95de81998ccc7bbd8451796cdd7f50 --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_number_stub_builder.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_BUILTINS_NUMBER_STUB_BUILDER_H +#define ECMASCRIPT_COMPILER_BUILTINS_NUMBER_STUB_BUILDER_H +#include "ecmascript/compiler/builtins/builtins_stubs.h" + +namespace panda::ecmascript::kungfu { +class BuiltinsNumberStubBuilder : public BuiltinsStubBuilder { +public: + explicit BuiltinsNumberStubBuilder(StubBuilder *parent) + : BuiltinsStubBuilder(parent) {} + BuiltinsNumberStubBuilder(BuiltinsStubBuilder *parent, GateRef glue, GateRef thisValue, GateRef numArgs) + : BuiltinsStubBuilder(parent), glue_(glue), thisValue_(thisValue), numArgs_(numArgs) {} + ~BuiltinsNumberStubBuilder() override = default; + NO_MOVE_SEMANTIC(BuiltinsNumberStubBuilder); + NO_COPY_SEMANTIC(BuiltinsNumberStubBuilder); + void GenerateCircuit() override {} + void ParseFloat(Variable *result, Label *exit, Label *slowPath); + +private: + GateRef glue_ { Circuit::NullGate() }; + GateRef thisValue_ { Circuit::NullGate() }; + GateRef numArgs_ { Circuit::NullGate() }; +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_BUILTINS_NUMBER_STUB_BUILDER_H diff --git a/ecmascript/compiler/builtins/builtins_object_stub_builder.cpp b/ecmascript/compiler/builtins/builtins_object_stub_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1228e4ad6c49b9dc00fb2c87bf8cb26c729a3796 --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_object_stub_builder.cpp @@ -0,0 +1,687 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/builtins/builtins_object_stub_builder.h" + +#include "ecmascript/compiler/new_object_stub_builder.h" +#include "ecmascript/compiler/stub_builder-inl.h" +#include "ecmascript/compiler/typed_array_stub_builder.h" +#include "ecmascript/js_arguments.h" +#include "ecmascript/message_string.h" +#include "ecmascript/tagged_dictionary.h" + +namespace panda::ecmascript::kungfu { +GateRef BuiltinsObjectStubBuilder::CreateListFromArrayLike(GateRef glue, GateRef arrayObj) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + DEFVARIABLE(res, VariableType::JS_ANY(), Hole()); + DEFVARIABLE(index, VariableType::INT32(), Int32(0)); + Label exit(env); + + // 3. If Type(obj) is Object, throw a TypeError exception. + Label targetIsHeapObject(env); + Label targetIsEcmaObject(env); + Label targetNotEcmaObject(env); + Branch(TaggedIsHeapObject(arrayObj), &targetIsHeapObject, &targetNotEcmaObject); + Bind(&targetIsHeapObject); + Branch(TaggedObjectIsEcmaObject(arrayObj), &targetIsEcmaObject, &targetNotEcmaObject); + Bind(&targetNotEcmaObject); + { + GateRef taggedId = Int32(GET_MESSAGE_STRING_ID(TargetTypeNotObject)); + CallRuntime(glue, RTSTUB_ID(ThrowTypeError), { IntToTaggedInt(taggedId) }); + Jump(&exit); + } + Bind(&targetIsEcmaObject); + { + // 4. Let len be ToLength(Get(obj, "length")). + GateRef lengthString = GetGlobalConstantValue(VariableType::JS_POINTER(), glue, + ConstantIndex::LENGTH_STRING_INDEX); + GateRef value = FastGetPropertyByName(glue, arrayObj, lengthString, ProfileOperation()); + GateRef number = ToLength(glue, value); + // 5. ReturnIfAbrupt(len). + Label isPendingException1(env); + Label noPendingException1(env); + Branch(HasPendingException(glue), &isPendingException1, &noPendingException1); + Bind(&isPendingException1); + { + Jump(&exit); + } + Bind(&noPendingException1); + { + Label indexInRange(env); + Label indexOutRange(env); + + GateRef doubleLen = GetDoubleOfTNumber(number); + Branch(DoubleGreaterThan(doubleLen, Double(JSObject::MAX_ELEMENT_INDEX)), &indexOutRange, &indexInRange); + Bind(&indexOutRange); + { + GateRef taggedId = Int32(GET_MESSAGE_STRING_ID(LenGreaterThanMax)); + CallRuntime(glue, RTSTUB_ID(ThrowTypeError), { IntToTaggedInt(taggedId) }); + Jump(&exit); + } + Bind(&indexInRange); + { + GateRef int32Len = DoubleToInt(glue, doubleLen); + // 6. Let list be an empty List. + NewObjectStubBuilder newBuilder(this); + GateRef array = newBuilder.NewTaggedArray(glue, int32Len); + Label targetIsTypeArray(env); + Label targetNotTypeArray(env); + Branch(IsTypedArray(arrayObj), &targetIsTypeArray, &targetNotTypeArray); + Bind(&targetIsTypeArray); + { + TypedArrayStubBuilder arrayStubBuilder(this); + arrayStubBuilder.FastCopyElementToArray(glue, arrayObj, array); + // c. ReturnIfAbrupt(next). + Label isPendingException2(env); + Label noPendingException2(env); + Branch(HasPendingException(glue), &isPendingException2, &noPendingException2); + Bind(&isPendingException2); + { + Jump(&exit); + } + Bind(&noPendingException2); + { + res = array; + Jump(&exit); + } + } + Bind(&targetNotTypeArray); + // 8. Repeat while index < len + Label loopHead(env); + Label loopEnd(env); + Label afterLoop(env); + Label isPendingException3(env); + Label noPendingException3(env); + Label storeValue(env); + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32UnsignedLessThan(*index, int32Len), &storeValue, &afterLoop); + Bind(&storeValue); + { + GateRef next = FastGetPropertyByIndex(glue, arrayObj, *index, ProfileOperation()); + // c. ReturnIfAbrupt(next). + Branch(HasPendingException(glue), &isPendingException3, &noPendingException3); + Bind(&isPendingException3); + { + Jump(&exit); + } + Bind(&noPendingException3); + SetValueToTaggedArray(VariableType::JS_ANY(), glue, array, *index, next); + index = Int32Add(*index, Int32(1)); + Jump(&loopEnd); + } + } + Bind(&loopEnd); + LoopEnd(&loopHead); + Bind(&afterLoop); + { + res = array; + Jump(&exit); + } + } + } + } + Bind(&exit); + GateRef ret = *res; + env->SubCfgExit(); + return ret; +} + +void BuiltinsObjectStubBuilder::ToString(Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label ecmaObj(env); + // undefined + Label undefined(env); + Label checknull(env); + Branch(TaggedIsUndefined(thisValue_), &undefined, &checknull); + Bind(&undefined); + { + *result = GetGlobalConstantValue(VariableType::JS_POINTER(), glue_, ConstantIndex::UNDEFINED_TO_STRING_INDEX); + Jump(exit); + } + // null + Bind(&checknull); + Label null(env); + Label checkObject(env); + Branch(TaggedIsUndefined(thisValue_), &null, &checkObject); + Bind(&null); + { + *result = GetGlobalConstantValue(VariableType::JS_POINTER(), glue_, ConstantIndex::NULL_TO_STRING_INDEX); + Jump(exit); + } + + Bind(&checkObject); + Branch(IsEcmaObject(thisValue_), &ecmaObj, slowPath); + Bind(&ecmaObj); + { + GateRef glueGlobalEnvOffset = IntPtr(JSThread::GlueData::GetGlueGlobalEnvOffset(env->Is32Bit())); + GateRef glueGlobalEnv = Load(VariableType::NATIVE_POINTER(), glue_, glueGlobalEnvOffset); + GateRef toStringTagSymbol = GetGlobalEnvValue(VariableType::JS_ANY(), glueGlobalEnv, + GlobalEnv::TOSTRINGTAG_SYMBOL_INDEX); + GateRef tag = FastGetPropertyByName(glue_, thisValue_, toStringTagSymbol, ProfileOperation()); + + Label defaultToString(env); + Branch(TaggedIsString(tag), slowPath, &defaultToString); + Bind(&defaultToString); + { + // default object + Label objectTag(env); + Branch(IsJSObjectType(thisValue_, JSType::JS_OBJECT), &objectTag, slowPath); + Bind(&objectTag); + { + // [object object] + *result = GetGlobalConstantValue(VariableType::JS_POINTER(), glue_, + ConstantIndex::OBJECT_TO_STRING_INDEX); + Jump(exit); + } + } + } +} + +GateRef BuiltinsObjectStubBuilder::TransProtoWithoutLayout(GateRef hClass, GateRef proto) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + Label exit(env); + DEFVARIABLE(result, VariableType::JS_ANY(), Undefined()); + + GateRef key = GetGlobalConstantValue(VariableType::JS_POINTER(), glue_, + ConstantIndex::PROTOTYPE_STRING_INDEX); + GateRef newClass = CallNGCRuntime(glue_, RTSTUB_ID(JSHClassFindProtoTransitions), { hClass, key, proto }); + Label undef(env); + Label find(env); + Branch(IntPtrEqual(TaggedCastToIntPtr(newClass), IntPtr(0)), &undef, &find); + Bind(&find); + { + result = newClass; + Jump(&exit); + } + Bind(&undef); + { + result = CallRuntime(glue_, RTSTUB_ID(HClassCloneWithAddProto), { hClass, key, proto }); + Jump(&exit); + } + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + +GateRef BuiltinsObjectStubBuilder::OrdinaryNewJSObjectCreate(GateRef proto) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + Label exit(env); + DEFVARIABLE(result, VariableType::JS_ANY(), Undefined()); + + GateRef hClass = GetGlobalConstantValue(VariableType::JS_POINTER(), glue_, + ConstantIndex::OBJECT_HCLASS_INDEX); + GateRef newClass = TransProtoWithoutLayout(hClass, proto); + Label exception(env); + Label noexception(env); + Branch(TaggedIsException(newClass), &exception, &noexception); + Bind(&exception); + { + result = Exception(); + Jump(&exit); + } + Bind(&noexception); + NewObjectStubBuilder newBuilder(this); + GateRef newObj = newBuilder.NewJSObject(glue_, newClass); + Label exceptionNewObj(env); + Label noexceptionNewObj(env); + Branch(TaggedIsException(newObj), &exceptionNewObj, &noexceptionNewObj); + Bind(&exceptionNewObj); + { + result = Exception(); + Jump(&exit); + } + Bind(&noexceptionNewObj); + { + SetExtensibleToBitfield(glue_, newObj, True()); + result = newObj; + Jump(&exit); + } + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + +void BuiltinsObjectStubBuilder::Create(Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label newObject(env); + + GateRef proto = GetCallArg0(numArgs_); + GateRef protoIsNull = TaggedIsNull(proto); + GateRef protoIsEcmaObj = IsEcmaObject(proto); + Branch(BoolAnd(BoolNot(protoIsEcmaObj), BoolNot(protoIsNull)), slowPath, &newObject); + Bind(&newObject); + { + Label noProperties(env); + GateRef propertiesObject = GetCallArg1(numArgs_); + Branch(TaggedIsUndefined(propertiesObject), &noProperties, slowPath); + Bind(&noProperties); + { + // OrdinaryNewJSObjectCreate + *result = OrdinaryNewJSObjectCreate(proto); + Jump(exit); + } + } +} + +void BuiltinsObjectStubBuilder::AssignEnumElementProperty(Variable *result, Label *funcExit, + GateRef toAssign, GateRef source) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + env->SubCfgEntry(&entryLabel); + Label exit(env); + + GateRef elements = GetElementsArray(source); + Label dictionaryMode(env); + Label notDictionaryMode(env); + Branch(IsDictionaryMode(elements), &dictionaryMode, ¬DictionaryMode); + Bind(¬DictionaryMode); + { + GateRef len = GetLengthOfTaggedArray(elements); + DEFVARIABLE(idx, VariableType::INT32(), Int32(0)); + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label loopExit(env); + + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32LessThan(*idx, len), &next, &loopExit); + Bind(&next); + GateRef value = GetValueFromTaggedArray(elements, *idx); + Label notHole(env); + Branch(TaggedIsHole(value), &loopEnd, ¬Hole); + Bind(¬Hole); + { + // key, value + FastSetPropertyByIndex(glue_, toAssign, *idx, value); + Label exception(env); + Branch(HasPendingException(glue_), &exception, &loopEnd); + Bind(&exception); + { + *result = Exception(); + Jump(funcExit); + } + } + } + Bind(&loopEnd); + idx = Int32Add(*idx, Int32(1)); + LoopEnd(&loopHead); + Bind(&loopExit); + Jump(&exit); + } + Bind(&dictionaryMode); + { + // NumberDictionary::VisitAllEnumProperty + GateRef sizeIndex = Int32(TaggedHashTable::SIZE_INDEX); + GateRef size = GetInt32OfTInt(GetValueFromTaggedArray(elements, sizeIndex)); + DEFVARIABLE(idx, VariableType::INT32(), Int32(0)); + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label loopExit(env); + + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32LessThan(*idx, size), &next, &loopExit); + Bind(&next); + GateRef key = GetKeyFromDictionary(elements, *idx); + Label checkEnumerable(env); + Branch(BoolOr(TaggedIsUndefined(key), TaggedIsHole(key)), &loopEnd, &checkEnumerable); + Bind(&checkEnumerable); + { + GateRef attr = GetAttributesFromDictionary(elements, *idx); + Label enumerable(env); + Branch(IsEnumerable(attr), &enumerable, &loopEnd); + Bind(&enumerable); + { + GateRef value = GetValueFromDictionary(elements, *idx); + Label notHole(env); + Branch(TaggedIsHole(value), &loopEnd, ¬Hole); + Bind(¬Hole); + { + // value + FastSetPropertyByIndex(glue_, toAssign, *idx, value); + Label exception(env); + Branch(HasPendingException(glue_), &exception, &loopEnd); + Bind(&exception); + { + *result = Exception(); + Jump(funcExit); + } + } + } + } + } + Bind(&loopEnd); + idx = Int32Add(*idx, Int32(1)); + LoopEnd(&loopHead); + Bind(&loopExit); + Jump(&exit); + } + Bind(&exit); + env->SubCfgExit(); +} + +void BuiltinsObjectStubBuilder::LayoutInfoAssignAllEnumProperty(Variable *result, Label *funcExit, + GateRef toAssign, GateRef source) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + env->SubCfgEntry(&entryLabel); + Label exit(env); + + // LayoutInfo::VisitAllEnumProperty + GateRef cls = LoadHClass(source); + GateRef num = GetNumberOfPropsFromHClass(cls); + GateRef layout = GetLayoutFromHClass(cls); + DEFVARIABLE(idx, VariableType::INT32(), Int32(0)); + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label loopExit(env); + + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32LessThan(*idx, num), &next, &loopExit); + Bind(&next); + + GateRef key = GetKeyFromLayoutInfo(layout, *idx); + GateRef attr = TruncInt64ToInt32(GetPropAttrFromLayoutInfo(layout, *idx)); + Label stringKey(env); + Branch(TaggedIsString(key), &stringKey, &loopEnd); + Bind(&stringKey); + { + Label enumerable(env); + Branch(IsEnumerable(attr), &enumerable, &loopEnd); + Bind(&enumerable); + { + DEFVARIABLE(value, VariableType::JS_ANY(), Undefined()); + value = JSObjectGetProperty(source, cls, attr); + // exception + Label exception0(env); + Label noexception0(env); + Branch(HasPendingException(glue_), &exception0, &noexception0); + Bind(&exception0); + { + *result = Exception(); + Jump(funcExit); + } + Bind(&noexception0); + Label propertyBox(env); + Label checkAccessor(env); + Label setValue(env); + Branch(TaggedIsPropertyBox(*value), &propertyBox, &checkAccessor); + Bind(&propertyBox); + { + value = GetValueFromPropertyBox(*value); + Jump(&setValue); + } + Bind(&checkAccessor); + Label isAccessor(env); + Branch(IsAccessor(attr), &isAccessor, &setValue); + Bind(&isAccessor); + { + value = CallGetterHelper(glue_, source, source, *value, ProfileOperation()); + Label exception(env); + Branch(HasPendingException(glue_), &exception, &setValue); + Bind(&exception); + { + *result = Exception(); + Jump(funcExit); + } + } + Bind(&setValue); + { + FastSetPropertyByName(glue_, toAssign, key, *value); + Label exception(env); + Branch(HasPendingException(glue_), &exception, &loopEnd); + Bind(&exception); + { + *result = Exception(); + Jump(funcExit); + } + } + } + } + } + Bind(&loopEnd); + idx = Int32Add(*idx, Int32(1)); + LoopEnd(&loopHead); + Bind(&loopExit); + Jump(&exit); + + Bind(&exit); + env->SubCfgExit(); +} + +void BuiltinsObjectStubBuilder::NameDictionaryAssignAllEnumProperty(Variable *result, Label *funcExit, + GateRef toAssign, GateRef source, GateRef properties) +{ + // NameDictionary::VisitAllEnumProperty + auto env = GetEnvironment(); + Label entryLabel(env); + env->SubCfgEntry(&entryLabel); + Label exit(env); + + GateRef sizeIndex = Int32(TaggedHashTable::SIZE_INDEX); + GateRef size = GetInt32OfTInt(GetValueFromTaggedArray(properties, sizeIndex)); + DEFVARIABLE(idx, VariableType::INT32(), Int32(0)); + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label loopExit(env); + + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32LessThan(*idx, size), &next, &loopExit); + Bind(&next); + GateRef key = GetKeyFromDictionary(properties, *idx); + Label stringKey(env); + Branch(TaggedIsString(key), &stringKey, &loopEnd); + Bind(&stringKey); + { + GateRef attr = GetAttributesFromDictionary(properties, *idx); + Label enumerable(env); + Branch(IsEnumerable(attr), &enumerable, &loopEnd); + Bind(&enumerable); + { + DEFVARIABLE(value, VariableType::JS_ANY(), Undefined()); + value = GetValueFromDictionary(properties, *idx); + Label notHole(env); + Branch(TaggedIsHole(*value), &loopEnd, ¬Hole); + Bind(¬Hole); + { + Label isAccessor(env); + Label notAccessor(env); + Branch(IsAccessor(attr), &isAccessor, ¬Accessor); + Bind(&isAccessor); + { + value = CallGetterHelper(glue_, source, source, *value, ProfileOperation()); + // exception + Label exception(env); + Branch(HasPendingException(glue_), &exception, ¬Accessor); + Bind(&exception); + { + *result = Exception(); + Jump(funcExit); + } + } + Bind(¬Accessor); + { + FastSetPropertyByName(glue_, toAssign, key, *value); + Label exception(env); + Branch(HasPendingException(glue_), &exception, &loopEnd); + Bind(&exception); + { + *result = Exception(); + Jump(funcExit); + } + } + } + } + } + } + Bind(&loopEnd); + idx = Int32Add(*idx, Int32(1)); + LoopEnd(&loopHead); + Bind(&loopExit); + Jump(&exit); + + Bind(&exit); + env->SubCfgExit(); +} + +void BuiltinsObjectStubBuilder::AssignAllEnumProperty(Variable *res, Label *funcExit, + GateRef toAssign, GateRef source) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + env->SubCfgEntry(&entryLabel); + Label exit(env); + + GateRef properties = GetPropertiesArray(source); + Label dictionaryMode(env); + Label notDictionaryMode(env); + Branch(IsDictionaryMode(properties), &dictionaryMode, ¬DictionaryMode); + Bind(¬DictionaryMode); + { + LayoutInfoAssignAllEnumProperty(res, funcExit, toAssign, source); + Jump(&exit); + } + Bind(&dictionaryMode); + { + NameDictionaryAssignAllEnumProperty(res, funcExit, toAssign, source, properties); + Jump(&exit); + } + Bind(&exit); + env->SubCfgExit(); +} + +void BuiltinsObjectStubBuilder::SlowAssign(Variable *result, Label *funcExit, GateRef toAssign, GateRef source) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + env->SubCfgEntry(&entryLabel); + Label exit(env); + CallRuntime(glue_, RTSTUB_ID(ObjectSlowAssign), { toAssign, source }); + + Label exception(env); + Branch(HasPendingException(glue_), &exception, &exit); + Bind(&exception); + { + *result = Exception(); + Jump(funcExit); + } + Bind(&exit); + env->SubCfgExit(); +} + +void BuiltinsObjectStubBuilder::FastAssign(Variable *res, Label *funcExit, GateRef toAssign, GateRef source) +{ + // visit elements + AssignEnumElementProperty(res, funcExit, toAssign, source); + AssignAllEnumProperty(res, funcExit, toAssign, source); +} + +void BuiltinsObjectStubBuilder::Assign(Variable *res, Label *nextIt, Label *funcExit, + GateRef toAssign, GateRef source) +{ + auto env = GetEnvironment(); + Label checkJsObj(env); + Branch(BoolOr(TaggedIsNull(source), TaggedIsUndefined(source)), nextIt, &checkJsObj); + Bind(&checkJsObj); + { + Label fastAssign(env); + Label slowAssign(env); + Branch(IsJSObjectType(source, JSType::JS_OBJECT), &fastAssign, &slowAssign); + Bind(&fastAssign); + { + FastAssign(res, funcExit, toAssign, source); + Jump(nextIt); + } + Bind(&slowAssign); + { + SlowAssign(res, funcExit, toAssign, source); + Jump(nextIt); + } + } +} + +void BuiltinsObjectStubBuilder::Assign(Variable *result, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + Label thisCollectionObj(env); + + GateRef target = GetCallArg0(numArgs_); + *result = target; + Label jsObject(env); + Branch(IsJSObjectType(target, JSType::JS_OBJECT), &jsObject, slowPath); + Bind(&jsObject); + { + Label twoArg(env); + Label notTwoArg(env); + Branch(Int64Equal(numArgs_, IntPtr(2)), &twoArg, ¬TwoArg); // 2 : two args + Bind(&twoArg); + { + GateRef source = GetCallArg1(numArgs_); + Label next(env); + Assign(result, &next, exit, target, source); + Bind(&next); + Jump(exit); + } + Bind(¬TwoArg); + Label threeArg(env); + Label notThreeArg(env); + Branch(Int64Equal(numArgs_, IntPtr(3)), &threeArg, ¬ThreeArg); // 3 : three args + Bind(&threeArg); + { + Label nextArg(env); + GateRef source = GetCallArg1(numArgs_); + Label next(env); + Assign(result, &next, exit, target, source); + Bind(&next); + Label next1(env); + GateRef source1 = GetCallArg2(numArgs_); + Assign(result, &next1, exit, target, source1); + Bind(&next1); + Jump(exit); + } + Bind(¬ThreeArg); + { + Jump(slowPath); + } + } +} +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/builtins_object_stub_builder.h b/ecmascript/compiler/builtins/builtins_object_stub_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..a22ae7fada255fea9237f15b0c631f0c9017ecba --- /dev/null +++ b/ecmascript/compiler/builtins/builtins_object_stub_builder.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_BUILTINS_OBJECT_STUB_BUILDER_H +#define ECMASCRIPT_COMPILER_BUILTINS_OBJECT_STUB_BUILDER_H +#include "ecmascript/compiler/builtins/builtins_stubs.h" + +namespace panda::ecmascript::kungfu { +class BuiltinsObjectStubBuilder : public BuiltinsStubBuilder { +public: + explicit BuiltinsObjectStubBuilder(StubBuilder *parent) + : BuiltinsStubBuilder(parent) {} + BuiltinsObjectStubBuilder(BuiltinsStubBuilder *parent, GateRef glue, GateRef thisValue, GateRef numArgs) + : BuiltinsStubBuilder(parent), glue_(glue), thisValue_(thisValue), numArgs_(numArgs) {} + ~BuiltinsObjectStubBuilder() override = default; + NO_MOVE_SEMANTIC(BuiltinsObjectStubBuilder); + NO_COPY_SEMANTIC(BuiltinsObjectStubBuilder); + void GenerateCircuit() override {} + GateRef CreateListFromArrayLike(GateRef glue, GateRef arrayObj); + void ToString(Variable *result, Label *exit, Label *slowPath); + void Create(Variable *result, Label *exit, Label *slowPath); + void Assign(Variable *result, Label *exit, Label *slowPath); + +private: + GateRef OrdinaryNewJSObjectCreate(GateRef proto); + GateRef TransProtoWithoutLayout(GateRef hClass, GateRef proto); + void AssignEnumElementProperty(Variable *res, Label *funcExit, GateRef toAssign, GateRef source); + void LayoutInfoAssignAllEnumProperty(Variable *res, Label *funcExit, GateRef toAssign, GateRef source); + void NameDictionaryAssignAllEnumProperty(Variable *res, Label *funcExit, GateRef toAssign, GateRef source, + GateRef properties); + void SlowAssign(Variable *res, Label *funcExit, GateRef toAssign, GateRef source); + void FastAssign(Variable *res, Label *funcExit, GateRef toAssign, GateRef source); + void AssignAllEnumProperty(Variable *res, Label *funcExit, GateRef toAssign, GateRef source); + void Assign(Variable *res, Label *nextIt, Label *funcExit, GateRef toAssign, GateRef source); + + GateRef glue_ {Circuit::NullGate()}; + GateRef thisValue_ {Circuit::NullGate()}; + GateRef numArgs_ {Circuit::NullGate()}; +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_BUILTINS_OBJECT_STUB_BUILDER_H diff --git a/ecmascript/compiler/builtins/builtins_string_stub_builder.cpp b/ecmascript/compiler/builtins/builtins_string_stub_builder.cpp index b9acfb50097885b62cb76d600f01383f773eef7b..46585a19d96cacc8ac89da3bfcbf006865bc9bb8 100644 --- a/ecmascript/compiler/builtins/builtins_string_stub_builder.cpp +++ b/ecmascript/compiler/builtins/builtins_string_stub_builder.cpp @@ -19,7 +19,785 @@ #include "ecmascript/compiler/new_object_stub_builder.h" namespace panda::ecmascript::kungfu { -GateRef BuiltinsStringStubBuilder::StringAt(GateRef obj, GateRef index) +void BuiltinsStringStubBuilder::FromCharCode(GateRef glue, [[maybe_unused]] GateRef thisValue, + GateRef numArgs, Variable* res, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + DEFVARIABLE(value, VariableType::INT16(), Int16(0)); + Label lengthIsZero(env); + Label lengthNotZero(env); + Label lengthIsOne(env); + Label canBeCompress(env); + Label isInt(env); + Label notInt(env); + Label newObj(env); + Label canNotBeCompress(env); + Label isPendingException(env); + Label noPendingException(env); + Branch(Int64Equal(IntPtr(0), numArgs), &lengthIsZero, &lengthNotZero); + Bind(&lengthIsZero); + res->WriteVariable(GetGlobalConstantValue( + VariableType::JS_POINTER(), glue, ConstantIndex::EMPTY_STRING_OBJECT_INDEX)); + Jump(exit); + Bind(&lengthNotZero); + { + Branch(Int64Equal(IntPtr(1), numArgs), &lengthIsOne, slowPath); + Bind(&lengthIsOne); + { + GateRef codePointTag = GetCallArg0(numArgs); + GateRef codePointValue = ToNumber(glue, codePointTag); + Branch(HasPendingException(glue), &isPendingException, &noPendingException); + Bind(&isPendingException); + { + res->WriteVariable(Exception()); + Jump(exit); + } + Bind(&noPendingException); + { + Branch(TaggedIsInt(codePointValue), &isInt, ¬Int); + Bind(&isInt); + { + value = TruncInt32ToInt16(GetInt32OfTInt(codePointValue)); + Jump(&newObj); + } + Bind(¬Int); + { + value = TruncInt32ToInt16(DoubleToInt(glue, GetDoubleOfTDouble(codePointValue), base::INT16_BITS)); + Jump(&newObj); + } + Bind(&newObj); + Branch(IsASCIICharacter(ZExtInt16ToInt32(*value)), &canBeCompress, &canNotBeCompress); + NewObjectStubBuilder newBuilder(this); + newBuilder.SetParameters(glue, 0); + Bind(&canBeCompress); + { + Label afterNew(env); + newBuilder.AllocLineStringObject(res, &afterNew, Int32(1), true); + Bind(&afterNew); + { + GateRef dst = ChangeStringTaggedPointerToInt64( + PtrAdd(res->ReadVariable(), IntPtr(LineEcmaString::DATA_OFFSET))); + Store(VariableType::INT8(), glue, dst, IntPtr(0), TruncInt16ToInt8(*value)); + Jump(exit); + } + } + Bind(&canNotBeCompress); + { + Label afterNew1(env); + newBuilder.AllocLineStringObject(res, &afterNew1, Int32(1), false); + Bind(&afterNew1); + { + GateRef dst = ChangeStringTaggedPointerToInt64( + PtrAdd(res->ReadVariable(), IntPtr(LineEcmaString::DATA_OFFSET))); + Store(VariableType::INT16(), glue, dst, IntPtr(0), *value); + Jump(exit); + } + } + } + } + } +} + +void BuiltinsStringStubBuilder::CharAt(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable* res, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + DEFVARIABLE(pos, VariableType::INT32(), Int32(0)); + Label objNotUndefinedAndNull(env); + Label isString(env); + Label next(env); + Label posTagNotUndefined(env); + Label posTagIsInt(env); + Label posTagNotInt(env); + Label posNotGreaterLen(env); + Label posGreaterLen(env); + Label posNotLessZero(env); + Label posTagIsDouble(env); + Label thisIsHeapobject(env); + Label flattenFastPath(env); + + Branch(TaggedIsUndefinedOrNull(thisValue), slowPath, &objNotUndefinedAndNull); + Bind(&objNotUndefinedAndNull); + { + Branch(TaggedIsHeapObject(thisValue), &thisIsHeapobject, slowPath); + Bind(&thisIsHeapobject); + Branch(IsString(thisValue), &isString, slowPath); + Bind(&isString); + { + FlatStringStubBuilder thisFlat(this); + thisFlat.FlattenString(glue, thisValue, &flattenFastPath); + Bind(&flattenFastPath); + GateRef thisLen = GetLengthFromString(thisValue); + Branch(Int64GreaterThanOrEqual(IntPtr(0), numArgs), &next, &posTagNotUndefined); + Bind(&posTagNotUndefined); + { + GateRef posTag = GetCallArg0(numArgs); + Branch(TaggedIsInt(posTag), &posTagIsInt, &posTagNotInt); + Bind(&posTagIsInt); + pos = GetInt32OfTInt(posTag); + Jump(&next); + Bind(&posTagNotInt); + Branch(TaggedIsDouble(posTag), &posTagIsDouble, slowPath); + Bind(&posTagIsDouble); + pos = DoubleToInt(glue, GetDoubleOfTDouble(posTag)); + Jump(&next); + } + Bind(&next); + { + Branch(Int32GreaterThanOrEqual(*pos, thisLen), &posGreaterLen, &posNotGreaterLen); + Bind(&posNotGreaterLen); + { + Branch(Int32LessThan(*pos, Int32(0)), &posGreaterLen, &posNotLessZero); + Bind(&posNotLessZero); + { + StringInfoGateRef stringInfoGate(&thisFlat); + res->WriteVariable(CreateFromEcmaString(glue, *pos, stringInfoGate)); + Jump(exit); + } + } + Bind(&posGreaterLen); + { + res->WriteVariable(GetGlobalConstantValue( + VariableType::JS_POINTER(), glue, ConstantIndex::EMPTY_STRING_OBJECT_INDEX)); + Jump(exit); + } + } + } + } +} + +void BuiltinsStringStubBuilder::CharCodeAt(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable* res, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + DEFVARIABLE(pos, VariableType::INT32(), Int32(0)); + Label objNotUndefinedAndNull(env); + Label isString(env); + Label next(env); + Label posTagNotUndefined(env); + Label posTagIsInt(env); + Label posTagNotInt(env); + Label posNotGreaterLen(env); + Label posNotLessZero(env); + Label posTagIsDouble(env); + Label thisIsHeapobject(env); + Label flattenFastPath(env); + + Branch(TaggedIsUndefinedOrNull(thisValue), slowPath, &objNotUndefinedAndNull); + Bind(&objNotUndefinedAndNull); + { + Branch(TaggedIsHeapObject(thisValue), &thisIsHeapobject, slowPath); + Bind(&thisIsHeapobject); + Branch(IsString(thisValue), &isString, slowPath); + Bind(&isString); + { + FlatStringStubBuilder thisFlat(this); + thisFlat.FlattenString(glue, thisValue, &flattenFastPath); + Bind(&flattenFastPath); + GateRef thisLen = GetLengthFromString(thisValue); + Branch(Int64GreaterThanOrEqual(IntPtr(0), numArgs), &next, &posTagNotUndefined); + Bind(&posTagNotUndefined); + { + GateRef posTag = GetCallArg0(numArgs); + Branch(TaggedIsInt(posTag), &posTagIsInt, &posTagNotInt); + Bind(&posTagIsInt); + pos = GetInt32OfTInt(posTag); + Jump(&next); + Bind(&posTagNotInt); + Branch(TaggedIsDouble(posTag), &posTagIsDouble, slowPath); + Bind(&posTagIsDouble); + pos = DoubleToInt(glue, GetDoubleOfTDouble(posTag)); + Jump(&next); + } + Bind(&next); + { + Branch(Int32GreaterThanOrEqual(*pos, thisLen), exit, &posNotGreaterLen); + Bind(&posNotGreaterLen); + { + Branch(Int32LessThan(*pos, Int32(0)), exit, &posNotLessZero); + Bind(&posNotLessZero); + { + StringInfoGateRef stringInfoGate(&thisFlat); + res->WriteVariable(IntToTaggedPtr(StringAt(stringInfoGate, *pos))); + Jump(exit); + } + } + } + } + } +} + +void BuiltinsStringStubBuilder::IndexOf(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable* res, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + DEFVARIABLE(pos, VariableType::INT32(), Int32(0)); + + Label objNotUndefinedAndNull(env); + Label isString(env); + Label isSearchString(env); + Label next(env); + Label resPosGreaterZero(env); + Label searchTagIsHeapObject(env); + Label posTagNotUndefined(env); + Label posTagIsInt(env); + Label posTagNotInt(env); + Label posTagIsDouble(env); + Label nextCount(env); + Label posNotLessThanLen(env); + Label thisIsHeapobject(env); + Label flattenFastPath(env); + Label flattenFastPath1(env); + + Branch(TaggedIsUndefinedOrNull(thisValue), slowPath, &objNotUndefinedAndNull); + Bind(&objNotUndefinedAndNull); + { + Branch(TaggedIsHeapObject(thisValue), &thisIsHeapobject, slowPath); + Bind(&thisIsHeapobject); + Branch(IsString(thisValue), &isString, slowPath); + Bind(&isString); + { + GateRef searchTag = GetCallArg0(numArgs); + Branch(TaggedIsHeapObject(searchTag), &searchTagIsHeapObject, slowPath); + Bind(&searchTagIsHeapObject); + Branch(IsString(searchTag), &isSearchString, slowPath); + Bind(&isSearchString); + { + GateRef thisLen = GetLengthFromString(thisValue); + Branch(Int64GreaterThanOrEqual(IntPtr(1), numArgs), &next, &posTagNotUndefined); + Bind(&posTagNotUndefined); + { + GateRef posTag = GetCallArg1(numArgs); + Branch(TaggedIsInt(posTag), &posTagIsInt, &posTagNotInt); + Bind(&posTagIsInt); + pos = GetInt32OfTInt(posTag); + Jump(&next); + Bind(&posTagNotInt); + Branch(TaggedIsDouble(posTag), &posTagIsDouble, slowPath); + Bind(&posTagIsDouble); + pos = DoubleToInt(glue, GetDoubleOfTDouble(posTag)); + Jump(&next); + } + Bind(&next); + { + Label posGreaterThanZero(env); + Label posNotGreaterThanZero(env); + Branch(Int32GreaterThan(*pos, Int32(0)), &posGreaterThanZero, &posNotGreaterThanZero); + Bind(&posNotGreaterThanZero); + { + pos = Int32(0); + Jump(&nextCount); + } + Bind(&posGreaterThanZero); + { + Branch(Int32LessThanOrEqual(*pos, thisLen), &nextCount, &posNotLessThanLen); + Bind(&posNotLessThanLen); + { + pos = thisLen; + Jump(&nextCount); + } + } + Bind(&nextCount); + { + FlatStringStubBuilder thisFlat(this); + thisFlat.FlattenString(glue, thisValue, &flattenFastPath); + Bind(&flattenFastPath); + FlatStringStubBuilder searchFlat(this); + searchFlat.FlattenString(glue, searchTag, &flattenFastPath1); + Bind(&flattenFastPath1); + StringInfoGateRef thisStringInfoGate(&thisFlat); + StringInfoGateRef searchStringInfoGate(&searchFlat); + GateRef resPos = StringIndexOf(thisStringInfoGate, searchStringInfoGate, *pos); + Branch(Int32GreaterThanOrEqual(resPos, Int32(0)), &resPosGreaterZero, exit); + Bind(&resPosGreaterZero); + { + Label resPosLessZero(env); + Branch(Int32LessThanOrEqual(resPos, thisLen), &resPosLessZero, exit); + Bind(&resPosLessZero); + { + res->WriteVariable(IntToTaggedPtr(resPos)); + Jump(exit); + } + } + } + } + } + } + } +} + +void BuiltinsStringStubBuilder::Substring(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable* res, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + DEFVARIABLE(start, VariableType::INT32(), Int32(0)); + DEFVARIABLE(end, VariableType::INT32(), Int32(0)); + DEFVARIABLE(from, VariableType::INT32(), Int32(0)); + DEFVARIABLE(to, VariableType::INT32(), Int32(0)); + + Label objNotUndefinedAndNull(env); + Label isString(env); + Label isSearchString(env); + Label countStart(env); + Label endTagIsUndefined(env); + Label startNotGreatZero(env); + Label countEnd(env); + Label endNotGreatZero(env); + Label countFrom(env); + Label countRes(env); + Label startTagNotUndefined(env); + Label posTagIsInt(env); + Label posTagNotInt(env); + Label posTagIsDouble(env); + Label endTagNotUndefined(env); + Label endTagIsInt(env); + Label endTagNotInt(env); + Label endTagIsDouble(env); + Label endGreatZero(env); + Label endGreatLen(env); + Label startGreatZero(env); + Label startGreatEnd(env); + Label startNotGreatEnd(env); + Label thisIsHeapobject(env); + + Branch(TaggedIsUndefinedOrNull(thisValue), slowPath, &objNotUndefinedAndNull); + Bind(&objNotUndefinedAndNull); + { + Branch(TaggedIsHeapObject(thisValue), &thisIsHeapobject, slowPath); + Bind(&thisIsHeapobject); + Branch(IsString(thisValue), &isString, slowPath); + Bind(&isString); + { + Label next(env); + GateRef thisLen = GetLengthFromString(thisValue); + Branch(Int64GreaterThanOrEqual(IntPtr(0), numArgs), &next, &startTagNotUndefined); + Bind(&startTagNotUndefined); + { + GateRef startTag = GetCallArg0(numArgs); + Branch(TaggedIsInt(startTag), &posTagIsInt, &posTagNotInt); + Bind(&posTagIsInt); + start = GetInt32OfTInt(startTag); + Jump(&next); + Bind(&posTagNotInt); + Branch(TaggedIsDouble(startTag), &posTagIsDouble, slowPath); + Bind(&posTagIsDouble); + start = DoubleToInt(glue, GetDoubleOfTDouble(startTag)); + Jump(&next); + } + Bind(&next); + { + Branch(Int64GreaterThanOrEqual(IntPtr(1), numArgs), &endTagIsUndefined, &endTagNotUndefined); + Bind(&endTagIsUndefined); + { + end = thisLen; + Jump(&countStart); + } + Bind(&endTagNotUndefined); + { + GateRef endTag = GetCallArg1(numArgs); + Branch(TaggedIsInt(endTag), &endTagIsInt, &endTagNotInt); + Bind(&endTagIsInt); + end = GetInt32OfTInt(endTag); + Jump(&countStart); + Bind(&endTagNotInt); + Branch(TaggedIsDouble(endTag), &endTagIsDouble, slowPath); + Bind(&endTagIsDouble); + end = DoubleToInt(glue, GetDoubleOfTDouble(endTag)); + Jump(&countStart); + } + } + Bind(&countStart); + { + Label startGreatLen(env); + Branch(Int32GreaterThan(*start, Int32(0)), &startGreatZero, &startNotGreatZero); + Bind(&startNotGreatZero); + { + start = Int32(0); + Jump(&countEnd); + } + Bind(&startGreatZero); + { + Branch(Int32GreaterThan(*start, thisLen), &startGreatLen, &countEnd); + Bind(&startGreatLen); + { + start = thisLen; + Jump(&countEnd); + } + } + } + Bind(&countEnd); + { + Branch(Int32GreaterThan(*end, Int32(0)), &endGreatZero, &endNotGreatZero); + Bind(&endNotGreatZero); + { + end = Int32(0); + Jump(&countFrom); + } + Bind(&endGreatZero); + { + Branch(Int32GreaterThan(*end, thisLen), &endGreatLen, &countFrom); + Bind(&endGreatLen); + { + end = thisLen; + Jump(&countFrom); + } + } + } + Bind(&countFrom); + { + Branch(Int32GreaterThan(*start, *end), &startGreatEnd, &startNotGreatEnd); + Bind(&startGreatEnd); + { + from = *end; + to = *start; + Jump(&countRes); + } + Bind(&startNotGreatEnd); + { + from = *start; + to = *end; + Jump(&countRes); + } + } + Bind(&countRes); + { + GateRef len = Int32Sub(*to, *from); + res->WriteVariable(GetSubString(glue, thisValue, *from, len)); + Jump(exit); + } + } + } +} + +GateRef BuiltinsStringStubBuilder::GetSubString(GateRef glue, GateRef thisValue, GateRef from, GateRef len) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + DEFVARIABLE(result, VariableType::JS_POINTER(), Undefined()); + + Label exit(env); + Label flattenFastPath(env); + Label sliceString(env); + Label mayGetSliceString(env); + Label fastSubstring(env); + Label isUtf16(env); + Label isUtf8(env); + Label afterNew(env); + FlatStringStubBuilder thisFlat(this); + thisFlat.FlattenString(glue, thisValue, &flattenFastPath); + Bind(&flattenFastPath); + { + Branch(Int32GreaterThanOrEqual(len, Int32(SlicedString::MIN_SLICED_ECMASTRING_LENGTH)), + &mayGetSliceString, &fastSubstring); + Bind(&mayGetSliceString); + { + Branch(IsUtf16String(thisValue), &isUtf16, &sliceString); + Bind(&isUtf16); + { + StringInfoGateRef stringInfoGate(&thisFlat); + GateRef fromOffset = PtrMul(ZExtInt32ToPtr(from), IntPtr(sizeof(uint16_t) / sizeof(uint8_t))); + GateRef source = PtrAdd(GetNormalStringData(stringInfoGate), fromOffset); + GateRef canBeCompressed = CanBeCompressed(source, len, true); + Branch(canBeCompressed, &isUtf8, &sliceString); + Bind(&isUtf8); + { + NewObjectStubBuilder newBuilder(this); + newBuilder.SetParameters(glue, 0); + newBuilder.AllocLineStringObject(&result, &afterNew, len, true); + Bind(&afterNew); + { + GateRef source1 = PtrAdd(GetNormalStringData(stringInfoGate), fromOffset); + GateRef dst = + ChangeStringTaggedPointerToInt64(PtrAdd(*result, IntPtr(LineEcmaString::DATA_OFFSET))); + CopyUtf16AsUtf8(glue, dst, source1, len); + Jump(&exit); + } + } + } + Bind(&sliceString); + { + NewObjectStubBuilder newBuilder(this); + newBuilder.SetParameters(glue, 0); + newBuilder.AllocSlicedStringObject(&result, &exit, from, len, &thisFlat); + } + } + Bind(&fastSubstring); + StringInfoGateRef stringInfoGate(&thisFlat); + result = FastSubString(glue, thisValue, from, len, stringInfoGate); + Jump(&exit); + } + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + +void BuiltinsStringStubBuilder::Replace(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *res, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + + Label objNotUndefinedAndNull(env); + + Branch(TaggedIsUndefinedOrNull(thisValue), slowPath, &objNotUndefinedAndNull); + Bind(&objNotUndefinedAndNull); + { + Label thisIsHeapObj(env); + Label tagsDefined(env); + Label searchIsHeapObj(env); + Label replaceIsHeapObj(env); + + Branch(TaggedIsHeapObject(thisValue), &thisIsHeapObj, slowPath); + Bind(&thisIsHeapObj); + Branch(Int64Equal(IntPtr(2), numArgs), &tagsDefined, slowPath); // 2: number of parameters. search & replace Tag + Bind(&tagsDefined); + { + Label next(env); + + GateRef searchTag = GetCallArg0(numArgs); + Branch(TaggedIsHeapObject(searchTag), &searchIsHeapObj, slowPath); + Bind(&searchIsHeapObj); + GateRef replaceTag = GetCallArg1(numArgs); + Branch(TaggedIsHeapObject(replaceTag), &replaceIsHeapObj, slowPath); + Bind(&replaceIsHeapObj); + Branch(BoolOr(IsJSRegExp(searchTag), IsEcmaObject(searchTag)), slowPath, &next); + Bind(&next); + { + Label allAreStrings(env); + GateRef thisIsString = IsString(thisValue); + GateRef searchIsString = IsString(searchTag); + GateRef replaceIsString = IsString(replaceTag); + Branch(BoolAnd(BoolAnd(thisIsString, searchIsString), replaceIsString), &allAreStrings, slowPath); + Bind(&allAreStrings); + { + Label replaceTagNotCallable(env); + + GateRef replaceTagIsCallable = IsCallable(replaceTag); + + Branch(replaceTagIsCallable, slowPath, &replaceTagNotCallable); + Bind(&replaceTagNotCallable); + { + Label thisFlattenFastPath(env); + Label searchFlattenFastPath(env); + Label noReplace(env); + Label nextProcess(env); + + FlatStringStubBuilder thisFlat(this); + thisFlat.FlattenString(glue, thisValue, &thisFlattenFastPath); + Bind(&thisFlattenFastPath); + StringInfoGateRef thisStringInfoGate(&thisFlat); + FlatStringStubBuilder searchFlat(this); + searchFlat.FlattenString(glue, searchTag, &searchFlattenFastPath); + Bind(&searchFlattenFastPath); + StringInfoGateRef searchStringInfoGate(&searchFlat); + GateRef pos = StringIndexOf(thisStringInfoGate, searchStringInfoGate, Int32(-1)); + Branch(Int32Equal(pos, Int32(-1)), &noReplace, &nextProcess); + Bind(&noReplace); + { + res->WriteVariable(thisValue); + Jump(exit); + } + Bind(&nextProcess); + { + Label functionalReplaceFalse(env); + + Branch(replaceTagIsCallable, slowPath, &functionalReplaceFalse); + Bind(&functionalReplaceFalse); + { + Label replHandleIsString(env); + + GateRef replHandle = GetSubstitution(glue, searchTag, thisValue, pos, replaceTag); + Branch(IsString(replHandle), &replHandleIsString, slowPath); + Bind(&replHandleIsString); + { + GateRef tailPos = Int32Add(pos, searchStringInfoGate.GetLength()); + GateRef prefixString = FastSubString(glue, thisValue, Int32(0), + pos, thisStringInfoGate); + GateRef thisLen = thisStringInfoGate.GetLength(); + GateRef suffixString = FastSubString(glue, thisValue, tailPos, + Int32Sub(thisLen, tailPos), thisStringInfoGate); + GateRef tempStr = StringConcat(glue, prefixString, replHandle); + GateRef resultStr = StringConcat(glue, tempStr, suffixString); + res->WriteVariable(resultStr); + Jump(exit); + } + } + } + } + } + } + } + } +} + +GateRef BuiltinsStringStubBuilder::ConvertAndClampRelativeIndex(GateRef index, GateRef length) +{ + auto env = GetEnvironment(); + + Label entry(env); + env->SubCfgEntry(&entry); + + DEFVARIABLE(relativeIndex, VariableType::INT32(), Int32(-1)); + + Label indexGreaterThanOrEqualZero(env); + Label indexLessThanZero(env); + Label next(env); + + Branch(Int32GreaterThanOrEqual(index, Int32(0)), &indexGreaterThanOrEqualZero, &indexLessThanZero); + Bind(&indexGreaterThanOrEqualZero); + { + relativeIndex = index; + Jump(&next); + } + Bind(&indexLessThanZero); + { + relativeIndex = Int32Add(index, length); + Jump(&next); + } + Bind(&next); + { + Label relativeIndexLessThanZero(env); + Label elseCheck(env); + Label exit(env); + + Branch(Int32LessThan(*relativeIndex, Int32(0)), &relativeIndexLessThanZero, &elseCheck); + Bind(&relativeIndexLessThanZero); + { + relativeIndex = Int32(0); + Jump(&exit); + } + Bind(&elseCheck); + { + Label relativeIndexGreaterThanLength(env); + + Branch(Int32GreaterThan(*relativeIndex, length), &relativeIndexGreaterThanLength, &exit); + Bind(&relativeIndexGreaterThanLength); + { + relativeIndex = length; + Jump(&exit); + } + } + Bind(&exit); + auto ret = *relativeIndex; + env->SubCfgExit(); + return ret; + } +} + +void BuiltinsStringStubBuilder::Slice(GateRef glue, GateRef thisValue, GateRef numArgs, + Variable *res, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + + DEFVARIABLE(start, VariableType::INT32(), Int32(-1)); + DEFVARIABLE(end, VariableType::INT32(), Int32(-1)); + DEFVARIABLE(sliceLen, VariableType::INT32(), Int32(-1)); + DEFVARIABLE(result, VariableType::JS_POINTER(), Undefined()); + + Label objNotUndefinedAndNull(env); + + Branch(TaggedIsUndefinedOrNull(thisValue), slowPath, &objNotUndefinedAndNull); + Bind(&objNotUndefinedAndNull); + { + Label thisIsHeapObj(env); + Label isString(env); + + Branch(TaggedIsHeapObject(thisValue), &thisIsHeapObj, slowPath); + Bind(&thisIsHeapObj); + Branch(IsString(thisValue), &isString, slowPath); + Bind(&isString); + { + Label startTagDefined(env); + + Branch(Int64GreaterThanOrEqual(IntPtr(0), numArgs), slowPath, &startTagDefined); + Bind(&startTagDefined); + { + Label startTagIsInt(env); + Label endTagUndefined(env); + Label endTagDefined(env); + Label endTagIsInt(env); + Label next(env); + + GateRef startTag = GetCallArg0(numArgs); + Branch(TaggedIsInt(startTag), &startTagIsInt, slowPath); + Bind(&startTagIsInt); + GateRef thisLen = GetLengthFromString(thisValue); + start = ConvertAndClampRelativeIndex(GetInt32OfTInt(startTag), thisLen); + Branch(Int64GreaterThanOrEqual(IntPtr(1), numArgs), &endTagUndefined, &endTagDefined); + Bind(&endTagUndefined); + { + end = thisLen; + Jump(&next); + } + Bind(&endTagDefined); + { + GateRef endTag = GetCallArg1(numArgs); + Branch(TaggedIsInt(endTag), &endTagIsInt, slowPath); + Bind(&endTagIsInt); + end = ConvertAndClampRelativeIndex(GetInt32OfTInt(endTag), thisLen); + Jump(&next); + } + Bind(&next); + { + Label emptyString(env); + Label fastSubString(env); + Label finish(env); + + sliceLen = Int32Sub(*end, *start); + Branch(Int32LessThanOrEqual(*sliceLen, Int32(0)), &emptyString, &fastSubString); + Bind(&emptyString); + { + result = GetGlobalConstantValue( + VariableType::JS_POINTER(), glue, ConstantIndex::EMPTY_STRING_OBJECT_INDEX); + Jump(&finish); + } + Bind(&fastSubString); + { + Label thisFlattenFastPath(env); + FlatStringStubBuilder thisFlat(this); + thisFlat.FlattenString(glue, thisValue, &thisFlattenFastPath); + Bind(&thisFlattenFastPath); + StringInfoGateRef stringInfoGate(&thisFlat); + result = FastSubString(glue, thisValue, *start, *sliceLen, stringInfoGate); + Jump(&finish); + } + Bind(&finish); + res->WriteVariable(*result); + Jump(exit); + } + } + } + } +} + +void BuiltinsStringStubBuilder::Trim(GateRef glue, GateRef thisValue, GateRef numArgs [[maybe_unused]], + Variable *res, Label *exit, Label *slowPath) +{ + auto env = GetEnvironment(); + DEFVARIABLE(start, VariableType::INT32(), Int32(-1)); + DEFVARIABLE(end, VariableType::INT32(), Int32(-1)); + DEFVARIABLE(sliceLen, VariableType::INT32(), Int32(-1)); + + Label objNotUndefinedAndNull(env); + + Branch(TaggedIsUndefinedOrNull(thisValue), slowPath, &objNotUndefinedAndNull); + Bind(&objNotUndefinedAndNull); + { + Label thisIsHeapObj(env); + Label thisIsString(env); + + Branch(TaggedIsHeapObject(thisValue), &thisIsHeapObj, slowPath); + Bind(&thisIsHeapObj); + Branch(IsString(thisValue), &thisIsString, slowPath); + Bind(&thisIsString); + GateRef result = EcmaStringTrim(glue, thisValue, Int32(0)); // 0: mode = TrimMode::TRIM + res->WriteVariable(result); + Jump(exit); + } +} + +GateRef BuiltinsStringStubBuilder::StringAt(const StringInfoGateRef &stringInfoGate, GateRef index) { auto env = GetEnvironment(); Label entry(env); @@ -32,17 +810,101 @@ GateRef BuiltinsStringStubBuilder::StringAt(GateRef obj, GateRef index) Label doIntOp(env); Label leftIsNumber(env); Label rightIsNumber(env); - GateRef dataUtf16 = GetNormalStringData(obj); - Branch(IsUtf16String(obj), &isUtf16, &isUtf8); + GateRef dataUtf16 = GetNormalStringData(stringInfoGate); + Branch(IsUtf16String(stringInfoGate.GetString()), &isUtf16, &isUtf8); + Bind(&isUtf16); + { + result = ZExtInt16ToInt32(Load(VariableType::INT16(), PtrAdd(dataUtf16, + PtrMul(ZExtInt32ToPtr(index), IntPtr(sizeof(uint16_t)))))); + Jump(&exit); + } + Bind(&isUtf8); + { + result = ZExtInt8ToInt32(Load(VariableType::INT8(), PtrAdd(dataUtf16, + PtrMul(ZExtInt32ToPtr(index), IntPtr(sizeof(uint8_t)))))); + Jump(&exit); + } + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + +GateRef BuiltinsStringStubBuilder::GetSingleCharCodeByIndex(GateRef str, GateRef index) +{ + // Note: This method cannot handle treestring. + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + DEFVARIABLE(result, VariableType::INT32(), Int32(0)); + + Label isConstantString(env); + Label lineStringCheck(env); + Label isLineString(env); + Label slicedStringCheck(env); + Label isSlicedString(env); + Label exit(env); + + Branch(IsConstantString(str), &isConstantString, &lineStringCheck); + Bind(&isConstantString); + { + result = GetSingleCharCodeFromConstantString(str, index); + Jump(&exit); + } + Bind(&lineStringCheck); + Branch(IsLineString(str), &isLineString, &slicedStringCheck); + Bind(&isLineString); + { + result = GetSingleCharCodeFromLineString(str, index); + Jump(&exit); + } + Bind(&slicedStringCheck); + Branch(IsSlicedString(str), &isSlicedString, &exit); + Bind(&isSlicedString); + { + result = GetSingleCharCodeFromSlicedString(str, index); + Jump(&exit); + } + + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + +GateRef BuiltinsStringStubBuilder::GetSingleCharCodeFromConstantString(GateRef str, GateRef index) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + GateRef offset = ChangeStringTaggedPointerToInt64(PtrAdd(str, IntPtr(ConstantString::CONSTANT_DATA_OFFSET))); + GateRef dataAddr = Load(VariableType::NATIVE_POINTER(), offset, IntPtr(0)); + GateRef result = ZExtInt8ToInt32(Load(VariableType::INT8(), PtrAdd(dataAddr, + PtrMul(ZExtInt32ToPtr(index), IntPtr(sizeof(uint8_t)))))); + env->SubCfgExit(); + return result; +} + +GateRef BuiltinsStringStubBuilder::GetSingleCharCodeFromLineString(GateRef str, GateRef index) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + DEFVARIABLE(result, VariableType::INT32(), Int32(0)); + GateRef dataAddr = ChangeStringTaggedPointerToInt64(PtrAdd(str, IntPtr(LineEcmaString::DATA_OFFSET))); + Label isUtf16(env); + Label isUtf8(env); + Label exit(env); + Branch(IsUtf16String(str), &isUtf16, &isUtf8); Bind(&isUtf16); { - result = ZExtInt16ToInt32(Load(VariableType::INT16(), PtrAdd(dataUtf16, + result = ZExtInt16ToInt32(Load(VariableType::INT16(), PtrAdd(dataAddr, PtrMul(ZExtInt32ToPtr(index), IntPtr(sizeof(uint16_t)))))); Jump(&exit); } Bind(&isUtf8); { - result = ZExtInt8ToInt32(Load(VariableType::INT8(), PtrAdd(dataUtf16, + result = ZExtInt8ToInt32(Load(VariableType::INT8(), PtrAdd(dataAddr, PtrMul(ZExtInt32ToPtr(index), IntPtr(sizeof(uint8_t)))))); Jump(&exit); } @@ -52,7 +914,85 @@ GateRef BuiltinsStringStubBuilder::StringAt(GateRef obj, GateRef index) return ret; } -GateRef BuiltinsStringStubBuilder::CreateFromEcmaString(GateRef glue, GateRef obj, GateRef index) +GateRef BuiltinsStringStubBuilder::GetSingleCharCodeFromSlicedString(GateRef str, GateRef index) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + DEFVARIABLE(result, VariableType::INT32(), Int32(0)); + Label isLineString(env); + Label notLineString(env); + Label exit(env); + + GateRef parent = Load(VariableType::JS_POINTER(), str, IntPtr(SlicedString::PARENT_OFFSET)); + GateRef startIndex = Load(VariableType::INT32(), str, IntPtr(SlicedString::STARTINDEX_OFFSET)); + Branch(IsLineString(parent), &isLineString, ¬LineString); + Bind(&isLineString); + { + result = GetSingleCharCodeFromLineString(parent, Int32Add(startIndex, index)); + Jump(&exit); + } + Bind(¬LineString); + { + result = GetSingleCharCodeFromConstantString(parent, Int32Add(startIndex, index)); + Jump(&exit); + } + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + +GateRef BuiltinsStringStubBuilder::CreateStringBySingleCharCode(GateRef glue, GateRef charCode) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + DEFVARIABLE(result, VariableType::JS_POINTER(), Hole()); + + NewObjectStubBuilder newBuilder(this); + newBuilder.SetParameters(glue, 0); + + Label exit(env); + Label utf8(env); + Label utf16(env); + Label afterNew(env); + GateRef canStoreAsUtf8 = IsASCIICharacter(charCode); + Branch(canStoreAsUtf8, &utf8, &utf16); + Bind(&utf8); + { + newBuilder.AllocLineStringObject(&result, &afterNew, Int32(1), true); + } + Bind(&utf16); + { + newBuilder.AllocLineStringObject(&result, &afterNew, Int32(1), false); + } + Bind(&afterNew); + { + Label isUtf8Copy(env); + Label isUtf16Copy(env); + GateRef dst = ChangeStringTaggedPointerToInt64(PtrAdd(*result, IntPtr(LineEcmaString::DATA_OFFSET))); + Branch(canStoreAsUtf8, &isUtf8Copy, &isUtf16Copy); + Bind(&isUtf8Copy); + { + Store(VariableType::INT8(), glue, dst, IntPtr(0), TruncInt32ToInt8(charCode)); + Jump(&exit); + } + Bind(&isUtf16Copy); + { + Store(VariableType::INT16(), glue, dst, IntPtr(0), TruncInt32ToInt16(charCode)); + Jump(&exit); + } + } + + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + +GateRef BuiltinsStringStubBuilder::CreateFromEcmaString(GateRef glue, GateRef index, + const StringInfoGateRef &stringInfoGate) { auto env = GetEnvironment(); Label entry(env); @@ -65,8 +1005,8 @@ GateRef BuiltinsStringStubBuilder::CreateFromEcmaString(GateRef glue, GateRef ob Label isUtf16(env); Label isUtf8(env); Label allocString(env); - GateRef dataUtf = GetNormalStringData(obj); - Branch(IsUtf16String(obj), &isUtf16, &isUtf8); + GateRef dataUtf = GetNormalStringData(stringInfoGate); + Branch(IsUtf16String(stringInfoGate.GetString()), &isUtf16, &isUtf8); Bind(&isUtf16); { GateRef dataAddr = PtrAdd(dataUtf, PtrMul(ZExtInt32ToPtr(index), IntPtr(sizeof(uint16_t)))); @@ -101,7 +1041,7 @@ GateRef BuiltinsStringStubBuilder::CreateFromEcmaString(GateRef glue, GateRef ob { Label isUtf8Copy(env); Label isUtf16Copy(env); - GateRef dst = PtrAdd(*result, IntPtr(LineEcmaString::DATA_OFFSET)); + GateRef dst = ChangeStringTaggedPointerToInt64(PtrAdd(*result, IntPtr(LineEcmaString::DATA_OFFSET))); Branch(*canBeCompressed, &isUtf8Copy, &isUtf16Copy); Bind(&isUtf8Copy); { @@ -121,7 +1061,8 @@ GateRef BuiltinsStringStubBuilder::CreateFromEcmaString(GateRef glue, GateRef ob return ret; } -GateRef BuiltinsStringStubBuilder::FastSubString(GateRef glue, GateRef thisValue, GateRef from, GateRef len) +GateRef BuiltinsStringStubBuilder::FastSubString(GateRef glue, GateRef thisValue, GateRef from, + GateRef len, const StringInfoGateRef &stringInfoGate) { auto env = GetEnvironment(); Label entry(env); @@ -148,7 +1089,7 @@ GateRef BuiltinsStringStubBuilder::FastSubString(GateRef glue, GateRef thisValue Branch(Int32Equal(from, Int32(0)), &fromEqualZero, &next); Bind(&fromEqualZero); { - GateRef thisLen = GetLengthFromString(thisValue); + GateRef thisLen = stringInfoGate.GetLength(); Branch(Int32Equal(len, thisLen), &exit, &next); } Bind(&next); @@ -156,12 +1097,12 @@ GateRef BuiltinsStringStubBuilder::FastSubString(GateRef glue, GateRef thisValue Branch(IsUtf8String(thisValue), &isUtf8, &isUtf16); Bind(&isUtf8); { - result = FastSubUtf8String(glue, thisValue, from, len); + result = FastSubUtf8String(glue, from, len, stringInfoGate); Jump(&exit); } Bind(&isUtf16); { - result = FastSubUtf16String(glue, thisValue, from, len); + result = FastSubUtf16String(glue, from, len, stringInfoGate); Jump(&exit); } } @@ -172,7 +1113,8 @@ GateRef BuiltinsStringStubBuilder::FastSubString(GateRef glue, GateRef thisValue return ret; } -GateRef BuiltinsStringStubBuilder::FastSubUtf8String(GateRef glue, GateRef thisValue, GateRef from, GateRef len) +GateRef BuiltinsStringStubBuilder::FastSubUtf8String(GateRef glue, GateRef from, GateRef len, + const StringInfoGateRef &stringInfoGate) { auto env = GetEnvironment(); Label entry(env); @@ -186,8 +1128,8 @@ GateRef BuiltinsStringStubBuilder::FastSubUtf8String(GateRef glue, GateRef thisV newBuilder.AllocLineStringObject(&result, &afterNew, len, true); Bind(&afterNew); { - GateRef dst = PtrAdd(*result, IntPtr(LineEcmaString::DATA_OFFSET)); - GateRef source = PtrAdd(GetNormalStringData(thisValue), ZExtInt32ToPtr(from)); + GateRef dst = ChangeStringTaggedPointerToInt64(PtrAdd(*result, IntPtr(LineEcmaString::DATA_OFFSET))); + GateRef source = PtrAdd(GetNormalStringData(stringInfoGate), ZExtInt32ToPtr(from)); CopyChars(glue, dst, source, len, IntPtr(sizeof(uint8_t)), VariableType::INT8()); Jump(&exit); } @@ -197,7 +1139,8 @@ GateRef BuiltinsStringStubBuilder::FastSubUtf8String(GateRef glue, GateRef thisV return ret; } -GateRef BuiltinsStringStubBuilder::FastSubUtf16String(GateRef glue, GateRef thisValue, GateRef from, GateRef len) +GateRef BuiltinsStringStubBuilder::FastSubUtf16String(GateRef glue, GateRef from, GateRef len, + const StringInfoGateRef &stringInfoGate) { auto env = GetEnvironment(); Label entry(env); @@ -211,7 +1154,7 @@ GateRef BuiltinsStringStubBuilder::FastSubUtf16String(GateRef glue, GateRef this Label isUtf16Next(env); GateRef fromOffset = PtrMul(ZExtInt32ToPtr(from), IntPtr(sizeof(uint16_t) / sizeof(uint8_t))); - GateRef source = PtrAdd(GetNormalStringData(thisValue), fromOffset); + GateRef source = PtrAdd(GetNormalStringData(stringInfoGate), fromOffset); GateRef canBeCompressed = CanBeCompressed(source, len, true); NewObjectStubBuilder newBuilder(this); newBuilder.SetParameters(glue, 0); @@ -227,12 +1170,12 @@ GateRef BuiltinsStringStubBuilder::FastSubUtf16String(GateRef glue, GateRef this } Bind(&afterNew); { - GateRef source1 = PtrAdd(GetNormalStringData(thisValue), fromOffset); - GateRef dst = PtrAdd(*result, IntPtr(LineEcmaString::DATA_OFFSET)); + GateRef source1 = PtrAdd(GetNormalStringData(stringInfoGate), fromOffset); + GateRef dst = ChangeStringTaggedPointerToInt64(PtrAdd(*result, IntPtr(LineEcmaString::DATA_OFFSET))); Branch(canBeCompressed, &isUtf8Next, &isUtf16Next); Bind(&isUtf8Next); { - CopyUtf16AsUtf8(glue, source1, dst, len); + CopyUtf16AsUtf8(glue, dst, source1, len); Jump(&exit); } Bind(&isUtf16Next); @@ -247,14 +1190,58 @@ GateRef BuiltinsStringStubBuilder::FastSubUtf16String(GateRef glue, GateRef this return ret; } +GateRef BuiltinsStringStubBuilder::GetSubstitution(GateRef glue, GateRef searchString, GateRef thisString, + GateRef pos, GateRef replaceString) +{ + auto env = GetEnvironment(); + + Label entry(env); + env->SubCfgEntry(&entry); + + DEFVARIABLE(result, VariableType::JS_POINTER(), Undefined()); + + Label dollarFlattenFastPath(env); + Label replaceFlattenFastPath(env); + Label notFound(env); + Label slowPath(env); + Label exit(env); + + GateRef dollarString = GetGlobalConstantValue(VariableType::JS_POINTER(), glue, ConstantIndex::DOLLAR_INDEX); + FlatStringStubBuilder dollarFlat(this); + dollarFlat.FlattenString(glue, dollarString, &dollarFlattenFastPath); + Bind(&dollarFlattenFastPath); + StringInfoGateRef dollarStringInfoGate(&dollarFlat); + FlatStringStubBuilder replaceFlat(this); + replaceFlat.FlattenString(glue, replaceString, &replaceFlattenFastPath); + Bind(&replaceFlattenFastPath); + StringInfoGateRef replaceStringInfoGate(&replaceFlat); + GateRef nextDollarIndex = StringIndexOf(replaceStringInfoGate, dollarStringInfoGate, Int32(-1)); + Branch(Int32LessThan(nextDollarIndex, Int32(0)), ¬Found, &slowPath); + Bind(¬Found); + { + result = replaceString; + Jump(&exit); + } + Bind(&slowPath); + { + result = CallRuntime(glue, RTSTUB_ID(RTSubstitution), + {searchString, thisString, IntToTaggedInt(pos), replaceString}); + Jump(&exit); + } + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + void BuiltinsStringStubBuilder::CopyChars(GateRef glue, GateRef dst, GateRef source, GateRef sourceLength, GateRef size, VariableType type) { auto env = GetEnvironment(); Label entry(env); env->SubCfgEntry(&entry); - DEFVARIABLE(dstTmp, VariableType::JS_ANY(), dst); - DEFVARIABLE(sourceTmp, VariableType::JS_ANY(), source); + DEFVARIABLE(dstTmp, VariableType::NATIVE_POINTER(), dst); + DEFVARIABLE(sourceTmp, VariableType::NATIVE_POINTER(), source); DEFVARIABLE(len, VariableType::INT32(), sourceLength); Label loopHead(env); Label loopEnd(env); @@ -327,14 +1314,52 @@ GateRef BuiltinsStringStubBuilder::CanBeCompressed(GateRef data, GateRef len, bo return ret; } -void BuiltinsStringStubBuilder::CopyUtf16AsUtf8(GateRef glue, GateRef src, GateRef dst, +// source is utf8, dst is utf16 +void BuiltinsStringStubBuilder::CopyUtf8AsUtf16(GateRef glue, GateRef dst, GateRef src, + GateRef sourceLength) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + DEFVARIABLE(dstTmp, VariableType::NATIVE_POINTER(), dst); + DEFVARIABLE(sourceTmp, VariableType::NATIVE_POINTER(), src); + DEFVARIABLE(len, VariableType::INT32(), sourceLength); + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label exit(env); + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32GreaterThan(*len, Int32(0)), &next, &exit); + Bind(&next); + { + len = Int32Sub(*len, Int32(1)); + GateRef i = Load(VariableType::INT8(), *sourceTmp); + Store(VariableType::INT16(), glue, *dstTmp, IntPtr(0), ZExtInt8ToInt16(i)); + Jump(&loopEnd); + } + } + + Bind(&loopEnd); + sourceTmp = PtrAdd(*sourceTmp, IntPtr(sizeof(uint8_t))); + dstTmp = PtrAdd(*dstTmp, IntPtr(sizeof(uint16_t))); + LoopEnd(&loopHead); + + Bind(&exit); + env->SubCfgExit(); + return; +} + +// source is utf16, dst is utf8 +void BuiltinsStringStubBuilder::CopyUtf16AsUtf8(GateRef glue, GateRef dst, GateRef src, GateRef sourceLength) { auto env = GetEnvironment(); Label entry(env); env->SubCfgEntry(&entry); - DEFVARIABLE(dstTmp, VariableType::JS_ANY(), dst); - DEFVARIABLE(sourceTmp, VariableType::JS_ANY(), src); + DEFVARIABLE(dstTmp, VariableType::NATIVE_POINTER(), dst); + DEFVARIABLE(sourceTmp, VariableType::NATIVE_POINTER(), src); DEFVARIABLE(len, VariableType::INT32(), sourceLength); Label loopHead(env); Label loopEnd(env); @@ -371,7 +1396,7 @@ GateRef BuiltinsStringStubBuilder::GetUtf16Data(GateRef stringData, GateRef inde GateRef BuiltinsStringStubBuilder::IsASCIICharacter(GateRef data) { - return Int32LessThan(Int32Sub(data, Int32(1)), Int32(base::utf_helper::UTF8_1B_MAX)); + return Int32UnsignedLessThan(Int32Sub(data, Int32(1)), Int32(base::utf_helper::UTF8_1B_MAX)); } GateRef BuiltinsStringStubBuilder::GetUtf8Data(GateRef stringData, GateRef index) @@ -503,7 +1528,19 @@ GateRef BuiltinsStringStubBuilder::StringIndexOf(GateRef lhsData, bool lhsIsUtf8 return ret; } -GateRef BuiltinsStringStubBuilder::StringIndexOf(GateRef lhs, GateRef rhs, GateRef pos) + +void BuiltinsStringStubBuilder::StoreParent(GateRef glue, GateRef object, GateRef parent) +{ + Store(VariableType::JS_POINTER(), glue, object, IntPtr(SlicedString::PARENT_OFFSET), parent); +} + +void BuiltinsStringStubBuilder::StoreStartIndex(GateRef glue, GateRef object, GateRef startIndex) +{ + Store(VariableType::INT32(), glue, object, IntPtr(SlicedString::STARTINDEX_OFFSET), startIndex); +} + +GateRef BuiltinsStringStubBuilder::StringIndexOf(const StringInfoGateRef &lStringInfoGate, + const StringInfoGateRef &rStringInfoGate, GateRef pos) { auto env = GetEnvironment(); Label entry(env); @@ -521,8 +1558,8 @@ GateRef BuiltinsStringStubBuilder::StringIndexOf(GateRef lhs, GateRef rhs, GateR Label rhsIsUtf16(env); Label posRMaxNotGreaterLhs(env); - GateRef lhsCount = GetLengthFromString(lhs); - GateRef rhsCount = GetLengthFromString(rhs); + GateRef lhsCount = lStringInfoGate.GetLength(); + GateRef rhsCount = rStringInfoGate.GetLength(); Branch(Int32GreaterThan(pos, lhsCount), &exit, &nextCount); Bind(&nextCount); @@ -550,14 +1587,14 @@ GateRef BuiltinsStringStubBuilder::StringIndexOf(GateRef lhs, GateRef rhs, GateR GateRef posRMax = Int32Add(*posTag, rhsCount); Branch(Int32GreaterThan(posRMax, lhsCount), &exit, &posRMaxNotGreaterLhs); Bind(&posRMaxNotGreaterLhs); - GateRef rhsData = GetNormalStringData(rhs); - GateRef lhsData = GetNormalStringData(lhs); - Branch(IsUtf8String(rhs), &rhsIsUtf8, &rhsIsUtf16); + GateRef rhsData = GetNormalStringData(rStringInfoGate); + GateRef lhsData = GetNormalStringData(lStringInfoGate); + Branch(IsUtf8String(rStringInfoGate.GetString()), &rhsIsUtf8, &rhsIsUtf16); Bind(&rhsIsUtf8); { Label lhsIsUtf8(env); Label lhsIsUtf16(env); - Branch(IsUtf8String(lhs), &lhsIsUtf8, &lhsIsUtf16); + Branch(IsUtf8String(lStringInfoGate.GetString()), &lhsIsUtf8, &lhsIsUtf16); Bind(&lhsIsUtf8); { result = StringIndexOf(lhsData, true, rhsData, true, *posTag, max, rhsCount); @@ -573,7 +1610,7 @@ GateRef BuiltinsStringStubBuilder::StringIndexOf(GateRef lhs, GateRef rhs, GateR { Label lhsIsUtf8(env); Label lhsIsUtf16(env); - Branch(IsUtf8String(lhs), &lhsIsUtf8, &lhsIsUtf16); + Branch(IsUtf8String(lStringInfoGate.GetString()), &lhsIsUtf8, &lhsIsUtf16); Bind(&lhsIsUtf8); { result = StringIndexOf(lhsData, true, rhsData, false, *posTag, max, rhsCount); @@ -594,4 +1631,356 @@ GateRef BuiltinsStringStubBuilder::StringIndexOf(GateRef lhs, GateRef rhs, GateR env->SubCfgExit(); return ret; } + +void FlatStringStubBuilder::FlattenString(GateRef glue, GateRef str, Label *fastPath) +{ + auto env = GetEnvironment(); + Label notLineString(env); + Label exit(env); + length_ = GetLengthFromString(str); + Branch(BoolOr(IsLineString(str), IsConstantString(str)), &exit, ¬LineString); + Bind(¬LineString); + { + Label isTreeString(env); + Label notTreeString(env); + Label isSlicedString(env); + Branch(IsTreeString(str), &isTreeString, ¬TreeString); + Bind(&isTreeString); + { + Label isFlat(env); + Label notFlat(env); + Branch(TreeStringIsFlat(str), &isFlat, ¬Flat); + Bind(&isFlat); + { + flatString_.WriteVariable(GetFirstFromTreeString(str)); + Jump(fastPath); + } + Bind(¬Flat); + { + flatString_.WriteVariable(CallRuntime(glue, RTSTUB_ID(SlowFlattenString), { str })); + Jump(fastPath); + } + } + Bind(¬TreeString); + Branch(IsSlicedString(str), &isSlicedString, &exit); + Bind(&isSlicedString); + { + flatString_.WriteVariable(GetParentFromSlicedString(str)); + startIndex_.WriteVariable(GetStartIndexFromSlicedString(str)); + Jump(fastPath); + } + } + Bind(&exit); + { + flatString_.WriteVariable(str); + Jump(fastPath); + } +} + +GateRef BuiltinsStringStubBuilder::GetStringDataFromLineOrConstantString(GateRef str) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + Label exit(env); + Label isConstantString(env); + Label isLineString(env); + DEFVARIABLE(result, VariableType::NATIVE_POINTER(), IntPtr(0)); + Branch(IsConstantString(str), &isConstantString, &isLineString); + Bind(&isConstantString); + { + GateRef address = ChangeStringTaggedPointerToInt64(PtrAdd(str, IntPtr(ConstantString::CONSTANT_DATA_OFFSET))); + result = Load(VariableType::NATIVE_POINTER(), address, IntPtr(0)); + Jump(&exit); + } + Bind(&isLineString); + { + result = ChangeStringTaggedPointerToInt64(PtrAdd(str, IntPtr(LineEcmaString::DATA_OFFSET))); + Jump(&exit); + } + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + +GateRef BuiltinsStringStubBuilder::StringConcat(GateRef glue, GateRef leftString, GateRef rightString) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + DEFVARIABLE(result, VariableType::JS_POINTER(), Undefined()); + Label exit(env); + Label equalZero(env); + Label notEqualZero(env); + Label lessThanMax(env); + Label throwError(env); + + GateRef leftLength = GetLengthFromString(leftString); + GateRef rightLength = GetLengthFromString(rightString); + GateRef newLength = Int32Add(leftLength, rightLength); + Branch(Int32GreaterThanOrEqual(newLength, Int32(EcmaString::MAX_STRING_LENGTH)), &throwError, &lessThanMax); + Bind(&throwError); + { + GateRef taggedId = Int32(GET_MESSAGE_STRING_ID(InvalidStringLength)); + CallRuntime(glue, RTSTUB_ID(ThrowRangeError), { IntToTaggedInt(taggedId) }); + Jump(&exit); + } + Bind(&lessThanMax); + Branch(Int32Equal(newLength, Int32(0)), &equalZero, ¬EqualZero); + Bind(&equalZero); + { + result = GetGlobalConstantValue( + VariableType::JS_POINTER(), glue, ConstantIndex::EMPTY_STRING_OBJECT_INDEX); + Jump(&exit); + } + Bind(¬EqualZero); + { + Label leftEqualZero(env); + Label leftNotEqualZero(env); + Label rightEqualZero(env); + Label rightNotEqualZero(env); + Label newLineString(env); + Label newTreeString(env); + Branch(Int32Equal(leftLength, Int32(0)), &leftEqualZero, &leftNotEqualZero); + Bind(&leftEqualZero); + { + result = rightString; + Jump(&exit); + } + Bind(&leftNotEqualZero); + Branch(Int32Equal(rightLength, Int32(0)), &rightEqualZero, &rightNotEqualZero); + Bind(&rightEqualZero); + { + result = leftString; + Jump(&exit); + } + Bind(&rightNotEqualZero); + { + GateRef leftIsUtf8 = IsUtf8String(leftString); + GateRef rightIsUtf8 = IsUtf8String(rightString); + GateRef canBeCompressed = BoolAnd(leftIsUtf8, rightIsUtf8); + NewObjectStubBuilder newBuilder(this); + newBuilder.SetParameters(glue, 0); + GateRef isTreeOrSlicedString = Int32LessThan(newLength, + Int32(std::min(TreeEcmaString::MIN_TREE_ECMASTRING_LENGTH, + SlicedString::MIN_SLICED_ECMASTRING_LENGTH))); + Branch(isTreeOrSlicedString, &newLineString, &newTreeString); + Bind(&newLineString); + { + Label isUtf8(env); + Label isUtf16(env); + Label isUtf8Next(env); + Label isUtf16Next(env); + Branch(canBeCompressed, &isUtf8, &isUtf16); + Bind(&isUtf8); + { + newBuilder.AllocLineStringObject(&result, &isUtf8Next, newLength, true); + } + Bind(&isUtf16); + { + newBuilder.AllocLineStringObject(&result, &isUtf16Next, newLength, false); + } + Bind(&isUtf8Next); + { + GateRef leftSource = GetStringDataFromLineOrConstantString(leftString); + GateRef rightSource = GetStringDataFromLineOrConstantString(rightString); + GateRef leftDst = ChangeStringTaggedPointerToInt64( + PtrAdd(*result, IntPtr(LineEcmaString::DATA_OFFSET))); + GateRef rightDst = ChangeStringTaggedPointerToInt64(PtrAdd(leftDst, ZExtInt32ToPtr(leftLength))); + CopyChars(glue, leftDst, leftSource, leftLength, IntPtr(sizeof(uint8_t)), VariableType::INT8()); + CopyChars(glue, rightDst, rightSource, rightLength, IntPtr(sizeof(uint8_t)), VariableType::INT8()); + Jump(&exit); + } + Bind(&isUtf16Next); + { + Label leftIsUtf8L(env); + Label leftIsUtf16L(env); + Label rightIsUtf8L(env); + Label rightIsUtf16L(env); + GateRef leftSource = GetStringDataFromLineOrConstantString(leftString); + GateRef rightSource = GetStringDataFromLineOrConstantString(rightString); + GateRef leftDst = ChangeStringTaggedPointerToInt64( + PtrAdd(*result, IntPtr(LineEcmaString::DATA_OFFSET))); + GateRef rightDst = ChangeStringTaggedPointerToInt64( + PtrAdd(leftDst, PtrMul(ZExtInt32ToPtr(leftLength), IntPtr(sizeof(uint16_t))))); + Branch(leftIsUtf8, &leftIsUtf8L, &leftIsUtf16L); + Bind(&leftIsUtf8L); + { + // left is utf8,right string must utf16 + CopyUtf8AsUtf16(glue, leftDst, leftSource, leftLength); + CopyChars(glue, rightDst, rightSource, rightLength, + IntPtr(sizeof(uint16_t)), VariableType::INT16()); + Jump(&exit); + } + Bind(&leftIsUtf16L); + { + CopyChars(glue, leftDst, leftSource, leftLength, + IntPtr(sizeof(uint16_t)), VariableType::INT16()); + Branch(rightIsUtf8, &rightIsUtf8L, &rightIsUtf16L); + Bind(&rightIsUtf8L); + CopyUtf8AsUtf16(glue, rightDst, rightSource, rightLength); + Jump(&exit); + Bind(&rightIsUtf16L); + CopyChars(glue, rightDst, rightSource, rightLength, + IntPtr(sizeof(uint16_t)), VariableType::INT16()); + Jump(&exit); + } + } + } + Bind(&newTreeString); + { + Label isUtf8(env); + Label isUtf16(env); + Branch(canBeCompressed, &isUtf8, &isUtf16); + Bind(&isUtf8); + { + newBuilder.AllocTreeStringObject(&result, &exit, leftString, rightString, newLength, true); + } + Bind(&isUtf16); + { + newBuilder.AllocTreeStringObject(&result, &exit, leftString, rightString, newLength, false); + } + } + } + } + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + +void BuiltinsStringStubBuilder::LocaleCompare([[maybe_unused]] GateRef glue, GateRef thisValue, GateRef numArgs, + [[maybe_unused]] Variable *res, [[maybe_unused]] Label *exit, + Label *slowPath) +{ + auto env = GetEnvironment(); + + Label thisIsHeapObj(env); + Branch(TaggedIsHeapObject(thisValue), &thisIsHeapObj, slowPath); + Bind(&thisIsHeapObj); + { + Label thisValueIsString(env); + Label fristArgIsString(env); + Label arg0IsHeapObj(env); + Branch(IsString(thisValue), &thisValueIsString, slowPath); + Bind(&thisValueIsString); + GateRef arg0 = GetCallArg0(numArgs); + Branch(TaggedIsHeapObject(arg0), &arg0IsHeapObj, slowPath); + Bind(&arg0IsHeapObj); + Branch(IsString(arg0), &fristArgIsString, slowPath); + Bind(&fristArgIsString); +#ifdef ARK_SUPPORT_INTL + GateRef locales = GetCallArg1(numArgs); + + GateRef options = GetCallArg2(numArgs); + GateRef localesIsUndef = TaggedIsUndefined(locales); + GateRef optionsIsUndef = TaggedIsUndefined(options); + GateRef cacheable = BoolAnd(BoolOr(localesIsUndef, TaggedObjectIsString(locales)), optionsIsUndef); + Label optionsIsString(env); + Label cacheAble(env); + Label uncacheable(env); + + Branch(cacheable, &cacheAble, &uncacheable); + Bind(&cacheAble); + { + Label defvalue(env); + GateRef resValue = CallNGCRuntime(glue, RTSTUB_ID(LocaleCompareNoGc), {glue, locales, thisValue, arg0}); + Branch(TaggedIsUndefined(resValue), slowPath, &defvalue); + Bind(&defvalue); + *res = resValue; + Jump(exit); + } + Bind(&uncacheable); + { + res->WriteVariable(CallRuntime(glue, RTSTUB_ID(LocaleCompareWithGc), {locales, thisValue, arg0, options})); + Jump(exit); + } +#else + Jump(slowPath); +#endif + } +} + +GateRef BuiltinsStringStubBuilder::EcmaStringTrim(GateRef glue, GateRef thisValue, GateRef trimMode) +{ + auto env = GetEnvironment(); + + Label entry(env); + env->SubCfgEntry(&entry); + + DEFVARIABLE(result, VariableType::JS_POINTER(), Undefined()); + + Label emptyString(env); + Label notEmpty(env); + Label exit(env); + + GateRef srcLen = GetLengthFromString(thisValue); + Branch(Int32Equal(srcLen, Int32(0)), &emptyString, ¬Empty); + Bind(&emptyString); + { + result = GetGlobalConstantValue( + VariableType::JS_POINTER(), glue, ConstantIndex::EMPTY_STRING_OBJECT_INDEX); + Jump(&exit); + } + Bind(¬Empty); + { + Label srcFlattenFastPath(env); + + FlatStringStubBuilder srcFlat(this); + srcFlat.FlattenString(glue, thisValue, &srcFlattenFastPath); + Bind(&srcFlattenFastPath); + StringInfoGateRef srcStringInfoGate(&srcFlat); + result = EcmaStringTrimBody(glue, thisValue, srcStringInfoGate, trimMode, IsUtf8String(thisValue)); + Jump(&exit); + } + Bind(&exit); + auto ret = *result; + env->SubCfgExit(); + return ret; +} + +GateRef BuiltinsStringStubBuilder::EcmaStringTrimBody(GateRef glue, GateRef thisValue, + StringInfoGateRef srcStringInfoGate, GateRef trimMode, GateRef isUtf8) +{ + auto env = GetEnvironment(); + + Label entry(env); + env->SubCfgEntry(&entry); + + GateRef srcLen = srcStringInfoGate.GetLength(); + GateRef srcString = srcStringInfoGate.GetString(); + GateRef startIndex = srcStringInfoGate.GetStartIndex(); + + DEFVARIABLE(start, VariableType::INT32(), Int32(0)); + DEFVARIABLE(end, VariableType::INT32(), Int32Sub(srcLen, Int32(1))); + + Label trimOrTrimStart(env); + Label notTrimStart(env); + Label next(env); + + Branch(Int32GreaterThanOrEqual(trimMode, Int32(0)), &trimOrTrimStart, ¬TrimStart); + Bind(&trimOrTrimStart); // mode = TrimMode::TRIM or TrimMode::TRIM_START + { + start = CallNGCRuntime(glue, RTSTUB_ID(StringGetStart), {isUtf8, srcString, srcLen, startIndex}); + Jump(¬TrimStart); + } + Bind(¬TrimStart); + { + Label trimOrTrimEnd(env); + Branch(Int32LessThanOrEqual(trimMode, Int32(0)), &trimOrTrimEnd, &next); + Bind(&trimOrTrimEnd); // mode = TrimMode::TRIM or TrimMode::TRIM_END + { + end = CallNGCRuntime(glue, RTSTUB_ID(StringGetEnd), {isUtf8, srcString, *start, srcLen, startIndex}); + Jump(&next); + } + } + Bind(&next); + { + auto ret = FastSubString(glue, thisValue, *start, + Int32Add(Int32Sub(*end, *start), Int32(1)), srcStringInfoGate); + env->SubCfgExit(); + return ret; + } +} } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/builtins_string_stub_builder.h b/ecmascript/compiler/builtins/builtins_string_stub_builder.h index 471b83c1e74a47aea900c63c8dcaa900e37fa18c..e2537ff9092af3b8ed4c00381a64d00ed84bf81e 100644 --- a/ecmascript/compiler/builtins/builtins_string_stub_builder.h +++ b/ecmascript/compiler/builtins/builtins_string_stub_builder.h @@ -18,30 +18,133 @@ #include "ecmascript/compiler/stub_builder-inl.h" namespace panda::ecmascript::kungfu { -class BuiltinsStringStubBuilder : public StubBuilder { +class FlatStringStubBuilder; +struct StringInfoGateRef; + +class BuiltinsStringStubBuilder : public BuiltinsStubBuilder { public: explicit BuiltinsStringStubBuilder(StubBuilder *parent) - : StubBuilder(parent) {} + : BuiltinsStubBuilder(parent) {} + BuiltinsStringStubBuilder(CallSignature *callSignature, Environment *env) + : BuiltinsStubBuilder(callSignature, env) {} ~BuiltinsStringStubBuilder() override = default; NO_MOVE_SEMANTIC(BuiltinsStringStubBuilder); NO_COPY_SEMANTIC(BuiltinsStringStubBuilder); void GenerateCircuit() override {} - GateRef StringAt(GateRef obj, GateRef index); - GateRef FastSubString(GateRef glue, GateRef thisValue, GateRef from, GateRef len); - GateRef FastSubUtf8String(GateRef glue, GateRef thisValue, GateRef from, GateRef len); - GateRef FastSubUtf16String(GateRef glue, GateRef thisValue, GateRef from, GateRef len); + void FromCharCode(GateRef glue, GateRef thisValue, GateRef numArgs, Variable* res, Label *exit, Label *slowPath); + void CharAt(GateRef glue, GateRef thisValue, GateRef numArgs, Variable* res, Label *exit, Label *slowPath); + void CharCodeAt(GateRef glue, GateRef thisValue, GateRef numArgs, Variable* res, Label *exit, Label *slowPath); + void IndexOf(GateRef glue, GateRef thisValue, GateRef numArgs, Variable* res, Label *exit, Label *slowPath); + void Substring(GateRef glue, GateRef thisValue, GateRef numArgs, Variable* res, Label *exit, Label *slowPath); + void Replace(GateRef glue, GateRef thisValue, GateRef numArgs, Variable *res, Label *exit, Label *slowPath); + void Trim(GateRef glue, GateRef thisValue, GateRef numArgs, Variable *res, Label *exit, Label *slowPath); + void Slice(GateRef glue, GateRef thisValue, GateRef numArgs, Variable *res, Label *exit, Label *slowPath); + void LocaleCompare(GateRef glue, GateRef thisValue, GateRef numArgs, Variable *res, Label *exit, Label *slowPath); + + GateRef ConvertAndClampRelativeIndex(GateRef index, GateRef length); + GateRef StringAt(const StringInfoGateRef &stringInfoGate, GateRef index); + GateRef FastSubString(GateRef glue, GateRef thisValue, GateRef from, GateRef len, + const StringInfoGateRef &stringInfoGate); + GateRef FastSubUtf8String(GateRef glue, GateRef from, GateRef len, const StringInfoGateRef &stringInfoGate); + GateRef FastSubUtf16String(GateRef glue, GateRef from, GateRef len, const StringInfoGateRef &stringInfoGate); + GateRef GetSubstitution(GateRef glue, GateRef searchString, GateRef thisString, + GateRef pos, GateRef replaceString); void CopyChars(GateRef glue, GateRef dst, GateRef source, GateRef sourceLength, GateRef size, VariableType type); - void CopyUtf16AsUtf8(GateRef glue, GateRef src, GateRef dst, GateRef sourceLength); + void CopyUtf16AsUtf8(GateRef glue, GateRef dst, GateRef src, GateRef sourceLength); + void CopyUtf8AsUtf16(GateRef glue, GateRef dst, GateRef src, GateRef sourceLength); GateRef StringIndexOf(GateRef lhsData, bool lhsIsUtf8, GateRef rhsData, bool rhsIsUtf8, GateRef pos, GateRef max, GateRef rhsCount); - GateRef StringIndexOf(GateRef lhs, GateRef rhs, GateRef pos); - GateRef CreateFromEcmaString(GateRef glue, GateRef obj, GateRef index); + GateRef StringIndexOf(const StringInfoGateRef &lStringInfoGate, + const StringInfoGateRef &rStringInfoGate, GateRef pos); + GateRef GetSingleCharCodeByIndex(GateRef str, GateRef index); + GateRef CreateStringBySingleCharCode(GateRef glue, GateRef charCode); + GateRef CreateFromEcmaString(GateRef glue, GateRef index, const StringInfoGateRef &stringInfoGate); + GateRef StringConcat(GateRef glue, GateRef leftString, GateRef rightString); + GateRef EcmaStringTrim(GateRef glue, GateRef srcString, GateRef trimMode); + GateRef EcmaStringTrimBody(GateRef glue, GateRef thisValue, StringInfoGateRef srcStringInfoGate, + GateRef trimMode, GateRef isUtf8); + void StoreParent(GateRef glue, GateRef object, GateRef parent); + void StoreStartIndex(GateRef glue, GateRef object, GateRef startIndex); private: + GateRef ChangeStringTaggedPointerToInt64(GateRef x) + { + return GetEnvironment()->GetBuilder()->ChangeTaggedPointerToInt64(x); + } + GateRef GetStringDataFromLineOrConstantString(GateRef str); GateRef CanBeCompressed(GateRef utf16Data, GateRef utf16Len, bool isUtf16); GateRef GetUtf16Data(GateRef stringData, GateRef index); GateRef IsASCIICharacter(GateRef data); GateRef GetUtf8Data(GateRef stringData, GateRef index); + GateRef GetSingleCharCodeFromConstantString(GateRef str, GateRef index); + GateRef GetSingleCharCodeFromLineString(GateRef str, GateRef index); + GateRef GetSingleCharCodeFromSlicedString(GateRef str, GateRef index); + GateRef GetSubString(GateRef glue, GateRef thisValue, GateRef from, GateRef len); +}; + +class FlatStringStubBuilder : public StubBuilder { +public: + explicit FlatStringStubBuilder(StubBuilder *parent) + : StubBuilder(parent) {} + ~FlatStringStubBuilder() override = default; + NO_MOVE_SEMANTIC(FlatStringStubBuilder); + NO_COPY_SEMANTIC(FlatStringStubBuilder); + void GenerateCircuit() override {} + + void FlattenString(GateRef glue, GateRef str, Label *fastPath); + GateRef GetParentFromSlicedString(GateRef string) + { + GateRef offset = IntPtr(SlicedString::PARENT_OFFSET); + return Load(VariableType::JS_POINTER(), string, offset); + } + GateRef GetStartIndexFromSlicedString(GateRef string) + { + GateRef offset = IntPtr(SlicedString::STARTINDEX_OFFSET); + return Load(VariableType::INT32(), string, offset); + } + + GateRef GetFlatString() + { + return flatString_.ReadVariable(); + } + + GateRef GetStartIndex() + { + return startIndex_.ReadVariable(); + } + + GateRef GetLength() + { + return length_; + } + +private: + Variable flatString_ { GetEnvironment(), VariableType::JS_POINTER(), NextVariableId(), Undefined() }; + Variable startIndex_ { GetEnvironment(), VariableType::INT32(), NextVariableId(), Int32(0) }; + GateRef length_ { Circuit::NullGate() }; +}; + +struct StringInfoGateRef { + GateRef string_ { Circuit::NullGate() }; + GateRef startIndex_ { Circuit::NullGate() }; + GateRef length_ { Circuit::NullGate() }; + StringInfoGateRef(FlatStringStubBuilder *flatString) : string_(flatString->GetFlatString()), + startIndex_(flatString->GetStartIndex()), + length_(flatString->GetLength()) {} + GateRef GetString() const + { + return string_; + } + + GateRef GetStartIndex() const + { + return startIndex_; + } + + GateRef GetLength() const + { + return length_; + } }; } // namespace panda::ecmascript::kungfu #endif // ECMASCRIPT_COMPILER_BUILTINS_STRING_STUB_BUILDER_H \ No newline at end of file diff --git a/ecmascript/compiler/builtins/builtins_stubs.cpp b/ecmascript/compiler/builtins/builtins_stubs.cpp index cd7719479064840652f8d3d24bdab3ce40577803..97239ec37c9097cf9bff78e3d9a6af4e31f9a5c9 100644 --- a/ecmascript/compiler/builtins/builtins_stubs.cpp +++ b/ecmascript/compiler/builtins/builtins_stubs.cpp @@ -16,14 +16,20 @@ #include "ecmascript/compiler/builtins/builtins_stubs.h" #include "ecmascript/base/number_helper.h" +#include "ecmascript/compiler/builtins/builtins_array_stub_builder.h" #include "ecmascript/compiler/builtins/builtins_call_signature.h" +#include "ecmascript/compiler/builtins/builtins_function_stub_builder.h" #include "ecmascript/compiler/builtins/builtins_string_stub_builder.h" +#include "ecmascript/compiler/builtins/builtins_number_stub_builder.h" #include "ecmascript/compiler/builtins/containers_vector_stub_builder.h" #include "ecmascript/compiler/builtins/containers_stub_builder.h" +#include "ecmascript/compiler/builtins/builtins_collection_stub_builder.h" +#include "ecmascript/compiler/builtins/builtins_object_stub_builder.h" #include "ecmascript/compiler/interpreter_stub-inl.h" #include "ecmascript/compiler/llvm_ir_builder.h" #include "ecmascript/compiler/new_object_stub_builder.h" #include "ecmascript/compiler/stub_builder-inl.h" +#include "ecmascript/compiler/stub_builder.h" #include "ecmascript/compiler/variable_type.h" #include "ecmascript/js_date.h" #include "ecmascript/js_primitive_ref.h" @@ -142,482 +148,46 @@ GateRef BuiltinsStubBuilder::CallSlowPath(GateRef nativeCode, GateRef glue, Gate return ret; } -DECLARE_BUILTINS(CharCodeAt) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_ANY(), DoubleToTaggedDoublePtr(Double(base::NAN_VALUE))); - DEFVARIABLE(pos, VariableType::INT32(), Int32(0)); - - Label objNotUndefinedAndNull(env); - Label isString(env); - Label slowPath(env); - Label next(env); - Label posTagNotUndefined(env); - Label posTagIsInt(env); - Label posTagNotInt(env); - Label posNotGreaterLen(env); - Label posNotLessZero(env); - Label exit(env); - Label posTagIsDouble(env); - Label thisIsHeapobject(env); - Label flattenFastPath(env); - - Branch(TaggedIsUndefinedOrNull(thisValue), &slowPath, &objNotUndefinedAndNull); - Bind(&objNotUndefinedAndNull); - { - Branch(TaggedIsHeapObject(thisValue), &thisIsHeapobject, &slowPath); - Bind(&thisIsHeapobject); - Branch(IsString(thisValue), &isString, &slowPath); - Bind(&isString); - { - DEFVARIABLE(thisFlat, VariableType::JS_POINTER(), thisValue); - FlattenString(thisValue, &thisFlat, &flattenFastPath, &slowPath); - Bind(&flattenFastPath); - GateRef thisLen = GetLengthFromString(*thisFlat); - Branch(Int64GreaterThanOrEqual(IntPtr(0), numArgs), &next, &posTagNotUndefined); - Bind(&posTagNotUndefined); - { - GateRef posTag = GetCallArg0(numArgs); - Branch(TaggedIsInt(posTag), &posTagIsInt, &posTagNotInt); - Bind(&posTagIsInt); - pos = GetInt32OfTInt(posTag); - Jump(&next); - Bind(&posTagNotInt); - Branch(TaggedIsDouble(posTag), &posTagIsDouble, &slowPath); - Bind(&posTagIsDouble); - pos = DoubleToInt(glue, GetDoubleOfTDouble(posTag)); - Jump(&next); - } - Bind(&next); - { - Branch(Int32GreaterThanOrEqual(*pos, thisLen), &exit, &posNotGreaterLen); - Bind(&posNotGreaterLen); - { - Branch(Int32LessThan(*pos, Int32(0)), &exit, &posNotLessZero); - Bind(&posNotLessZero); - { - BuiltinsStringStubBuilder stringBuilder(this); - res = IntToTaggedPtr(stringBuilder.StringAt(*thisFlat, *pos)); - Jump(&exit); - } - } - } - } - } - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(CharCodeAt)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(IndexOf) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_ANY(), IntToTaggedPtr(Int32(-1))); - DEFVARIABLE(pos, VariableType::INT32(), Int32(0)); - - Label objNotUndefinedAndNull(env); - Label isString(env); - Label isSearchString(env); - Label slowPath(env); - Label next(env); - Label resPosGreaterZero(env); - Label searchTagIsHeapObject(env); - Label posTagNotUndefined(env); - Label posTagIsInt(env); - Label posTagNotInt(env); - Label exit(env); - Label posTagIsDouble(env); - Label nextCount(env); - Label posNotLessThanLen(env); - Label thisIsHeapobject(env); - Label flattenFastPath(env); - Label flattenFastPath1(env); - - Branch(TaggedIsUndefinedOrNull(thisValue), &slowPath, &objNotUndefinedAndNull); - Bind(&objNotUndefinedAndNull); - { - Branch(TaggedIsHeapObject(thisValue), &thisIsHeapobject, &slowPath); - Bind(&thisIsHeapobject); - Branch(IsString(thisValue), &isString, &slowPath); - Bind(&isString); - { - GateRef searchTag = GetCallArg0(numArgs); - Branch(TaggedIsHeapObject(searchTag), &searchTagIsHeapObject, &slowPath); - Bind(&searchTagIsHeapObject); - Branch(IsString(searchTag), &isSearchString, &slowPath); - Bind(&isSearchString); - { - GateRef thisLen = GetLengthFromString(thisValue); - Branch(Int64GreaterThanOrEqual(IntPtr(1), numArgs), &next, &posTagNotUndefined); - Bind(&posTagNotUndefined); - { - GateRef posTag = GetCallArg1(numArgs); - Branch(TaggedIsInt(posTag), &posTagIsInt, &posTagNotInt); - Bind(&posTagIsInt); - pos = GetInt32OfTInt(posTag); - Jump(&next); - Bind(&posTagNotInt); - Branch(TaggedIsDouble(posTag), &posTagIsDouble, &slowPath); - Bind(&posTagIsDouble); - pos = DoubleToInt(glue, GetDoubleOfTDouble(posTag)); - Jump(&next); - } - Bind(&next); - { - Label posGreaterThanZero(env); - Label posNotGreaterThanZero(env); - Branch(Int32GreaterThan(*pos, Int32(0)), &posGreaterThanZero, &posNotGreaterThanZero); - Bind(&posNotGreaterThanZero); - { - pos = Int32(0); - Jump(&nextCount); - } - Bind(&posGreaterThanZero); - { - Branch(Int32LessThanOrEqual(*pos, thisLen), &nextCount, &posNotLessThanLen); - Bind(&posNotLessThanLen); - { - pos = thisLen; - Jump(&nextCount); - } - } - Bind(&nextCount); - { - DEFVARIABLE(thisFlat, VariableType::JS_POINTER(), thisValue); - DEFVARIABLE(searchFlat, VariableType::JS_POINTER(), searchTag); - FlattenString(thisValue, &thisFlat, &flattenFastPath, &slowPath); - Bind(&flattenFastPath); - FlattenString(searchTag, &searchFlat, &flattenFastPath1, &slowPath); - Bind(&flattenFastPath1); - BuiltinsStringStubBuilder stringBuilder(this); - GateRef resPos = stringBuilder.StringIndexOf(*thisFlat, *searchFlat, *pos); - Branch(Int32GreaterThanOrEqual(resPos, Int32(0)), &resPosGreaterZero, &exit); - Bind(&resPosGreaterZero); - { - Label resPosLessZero(env); - Branch(Int32LessThanOrEqual(resPos, thisLen), &resPosLessZero, &exit); - Bind(&resPosLessZero); - { - res = IntToTaggedPtr(resPos); - Jump(&exit); - } - } - } - } - } - } - } - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(IndexOf)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(Substring) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_ANY(), IntToTaggedPtr(Int32(-1))); - DEFVARIABLE(start, VariableType::INT32(), Int32(0)); - DEFVARIABLE(end, VariableType::INT32(), Int32(0)); - DEFVARIABLE(from, VariableType::INT32(), Int32(0)); - DEFVARIABLE(to, VariableType::INT32(), Int32(0)); - - Label objNotUndefinedAndNull(env); - Label isString(env); - Label isSearchString(env); - Label slowPath(env); - Label countStart(env); - Label endTagIsUndefined(env); - Label startNotGreatZero(env); - Label countEnd(env); - Label endNotGreatZero(env); - Label countFrom(env); - Label countRes(env); - Label startTagNotUndefined(env); - Label posTagIsInt(env); - Label posTagNotInt(env); - Label exit(env); - Label posTagIsDouble(env); - Label endTagNotUndefined(env); - Label endTagIsInt(env); - Label endTagNotInt(env); - Label endTagIsDouble(env); - Label endGreatZero(env); - Label endGreatLen(env); - Label startGreatZero(env); - Label startGreatEnd(env); - Label startNotGreatEnd(env); - Label thisIsHeapobject(env); - Label flattenFastPath(env); - - Branch(TaggedIsUndefinedOrNull(thisValue), &slowPath, &objNotUndefinedAndNull); - Bind(&objNotUndefinedAndNull); - { - Branch(TaggedIsHeapObject(thisValue), &thisIsHeapobject, &slowPath); - Bind(&thisIsHeapobject); - Branch(IsString(thisValue), &isString, &slowPath); - Bind(&isString); - { - Label next(env); - GateRef thisLen = GetLengthFromString(thisValue); - Branch(Int64GreaterThanOrEqual(IntPtr(0), numArgs), &next, &startTagNotUndefined); - Bind(&startTagNotUndefined); - { - GateRef startTag = GetCallArg0(numArgs); - Branch(TaggedIsInt(startTag), &posTagIsInt, &posTagNotInt); - Bind(&posTagIsInt); - start = GetInt32OfTInt(startTag); - Jump(&next); - Bind(&posTagNotInt); - Branch(TaggedIsDouble(startTag), &posTagIsDouble, &slowPath); - Bind(&posTagIsDouble); - start = DoubleToInt(glue, GetDoubleOfTDouble(startTag)); - Jump(&next); - } - Bind(&next); - { - Branch(Int64GreaterThanOrEqual(IntPtr(1), numArgs), &endTagIsUndefined, &endTagNotUndefined); - Bind(&endTagIsUndefined); - { - end = thisLen; - Jump(&countStart); - } - Bind(&endTagNotUndefined); - { - GateRef endTag = GetCallArg1(numArgs); - Branch(TaggedIsInt(endTag), &endTagIsInt, &endTagNotInt); - Bind(&endTagIsInt); - end = GetInt32OfTInt(endTag); - Jump(&countStart); - Bind(&endTagNotInt); - Branch(TaggedIsDouble(endTag), &endTagIsDouble, &slowPath); - Bind(&endTagIsDouble); - end = DoubleToInt(glue, GetDoubleOfTDouble(endTag)); - Jump(&countStart); - } - } - Bind(&countStart); - { - Label startGreatLen(env); - Branch(Int32GreaterThan(*start, Int32(0)), &startGreatZero, &startNotGreatZero); - Bind(&startNotGreatZero); - { - start = Int32(0); - Jump(&countEnd); - } - Bind(&startGreatZero); - { - Branch(Int32GreaterThan(*start, thisLen), &startGreatLen, &countEnd); - Bind(&startGreatLen); - { - start = thisLen; - Jump(&countEnd); - } - } - } - Bind(&countEnd); - { - Branch(Int32GreaterThan(*end, Int32(0)), &endGreatZero, &endNotGreatZero); - Bind(&endNotGreatZero); - { - end = Int32(0); - Jump(&countFrom); - } - Bind(&endGreatZero); - { - Branch(Int32GreaterThan(*end, thisLen), &endGreatLen, &countFrom); - Bind(&endGreatLen); - { - end = thisLen; - Jump(&countFrom); - } - } - } - Bind(&countFrom); - { - Branch(Int32GreaterThan(*start, *end), &startGreatEnd, &startNotGreatEnd); - Bind(&startGreatEnd); - { - from = *end; - to = *start; - Jump(&countRes); - } - Bind(&startNotGreatEnd); - { - from = *start; - to = *end; - Jump(&countRes); - } - } - Bind(&countRes); - { - GateRef len = Int32Sub(*to, *from); - DEFVARIABLE(thisFlat, VariableType::JS_POINTER(), thisValue); - FlattenString(thisValue, &thisFlat, &flattenFastPath, &slowPath); - Bind(&flattenFastPath); - { - BuiltinsStringStubBuilder stringBuilder(this); - res = stringBuilder.FastSubString(glue, *thisFlat, *from, len); - Jump(&exit); - } - } - } - } - - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(Substring)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(CharAt) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Hole()); - DEFVARIABLE(pos, VariableType::INT32(), Int32(0)); - - Label objNotUndefinedAndNull(env); - Label isString(env); - Label slowPath(env); - Label next(env); - Label posTagNotUndefined(env); - Label posTagIsInt(env); - Label posTagNotInt(env); - Label posNotGreaterLen(env); - Label posGreaterLen(env); - Label posNotLessZero(env); - Label exit(env); - Label posTagIsDouble(env); - Label thisIsHeapobject(env); - Label flattenFastPath(env); - - Branch(TaggedIsUndefinedOrNull(thisValue), &slowPath, &objNotUndefinedAndNull); - Bind(&objNotUndefinedAndNull); - { - Branch(TaggedIsHeapObject(thisValue), &thisIsHeapobject, &slowPath); - Bind(&thisIsHeapobject); - Branch(IsString(thisValue), &isString, &slowPath); - Bind(&isString); - { - DEFVARIABLE(thisFlat, VariableType::JS_POINTER(), thisValue); - FlattenString(thisValue, &thisFlat, &flattenFastPath, &slowPath); - Bind(&flattenFastPath); - GateRef thisLen = GetLengthFromString(*thisFlat); - Branch(Int64GreaterThanOrEqual(IntPtr(0), numArgs), &next, &posTagNotUndefined); - Bind(&posTagNotUndefined); - { - GateRef posTag = GetCallArg0(numArgs); - Branch(TaggedIsInt(posTag), &posTagIsInt, &posTagNotInt); - Bind(&posTagIsInt); - pos = GetInt32OfTInt(posTag); - Jump(&next); - Bind(&posTagNotInt); - Branch(TaggedIsDouble(posTag), &posTagIsDouble, &slowPath); - Bind(&posTagIsDouble); - pos = DoubleToInt(glue, GetDoubleOfTDouble(posTag)); - Jump(&next); - } - Bind(&next); - { - Branch(Int32GreaterThanOrEqual(*pos, thisLen), &posGreaterLen, &posNotGreaterLen); - Bind(&posNotGreaterLen); - { - Branch(Int32LessThan(*pos, Int32(0)), &posGreaterLen, &posNotLessZero); - Bind(&posNotLessZero); - { - BuiltinsStringStubBuilder stringBuilder(this); - res = stringBuilder.CreateFromEcmaString(glue, *thisFlat, *pos); - Jump(&exit); - } - } - Bind(&posGreaterLen); - { - res = GetGlobalConstantValue( - VariableType::JS_POINTER(), glue, ConstantIndex::EMPTY_STRING_OBJECT_INDEX); - Jump(&exit); - } - } - } - } - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(CharAt)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(VectorForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::VECTOR_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(VectorForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(VectorReplaceAllElements) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::VECTOR_REPLACEALLELEMENTS); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(VectorReplaceAllElements)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); +#define DECLARE_BUILTINS_WITH_STRING_STUB_BUILDER(method, resultVariableType, initValue) \ +DECLARE_BUILTINS(String##method) \ +{ \ + auto env = GetEnvironment(); \ + DEFVARIABLE(res, VariableType::resultVariableType(), initValue); \ + Label exit(env); \ + Label slowPath(env); \ + BuiltinsStringStubBuilder stringStubBuilder(this); \ + stringStubBuilder.method(glue, thisValue, numArgs, &res, &exit, &slowPath); \ + Bind(&slowPath); \ + { \ + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(String##method)); \ + res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); \ + Jump(&exit); \ + } \ + Bind(&exit); \ + Return(*res); \ } -DECLARE_BUILTINS(StackForEach) +#define BUILTINS_WITH_STRING_STUB_BUILDER(V) \ + V(CharAt, JS_POINTER, Hole()) \ + V(FromCharCode, JS_ANY, Hole()) \ + V(CharCodeAt, JS_ANY, DoubleToTaggedDoublePtr(Double(base::NAN_VALUE))) \ + V(IndexOf, JS_ANY, IntToTaggedPtr(Int32(-1))) \ + V(Substring, JS_ANY, IntToTaggedPtr(Int32(-1))) \ + V(Replace, JS_ANY, Undefined()) \ + V(Trim, JS_ANY, Undefined()) \ + V(Slice, JS_ANY, Undefined()) + +DECLARE_BUILTINS(LocaleCompare) { auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - + DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); Label exit(env); Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::STACK_FOREACH); + BuiltinsStringStubBuilder stringStubBuilder(this); + stringStubBuilder.LocaleCompare(glue, thisValue, numArgs, &res, &exit, &slowPath); Bind(&slowPath); { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(StackForEach)); + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(LocaleCompare)); res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); Jump(&exit); } @@ -625,41 +195,22 @@ DECLARE_BUILTINS(StackForEach) Return(*res); } -DECLARE_BUILTINS(PlainArrayForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); +BUILTINS_WITH_STRING_STUB_BUILDER(DECLARE_BUILTINS_WITH_STRING_STUB_BUILDER) - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::PLAINARRAY_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(PlainArrayForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} +#undef DECLARE_BUILTINS_WITH_STRING_STUB_BUILDER +#undef BUILTINS_WITH_STRING_STUB_BUILDER -DECLARE_BUILTINS(QueueForEach) +DECLARE_BUILTINS(FunctionPrototypeApply) { auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - + DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); Label exit(env); Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.QueueCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::QUEUE_FOREACH); + BuiltinsFunctionStubBuilder functionStubBuilder(this); + functionStubBuilder.Apply(glue, thisValue, numArgs, &res, &exit, &slowPath); Bind(&slowPath); { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(QueueForEach)); + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(FunctionPrototypeApply)); res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); Jump(&exit); } @@ -667,146 +218,126 @@ DECLARE_BUILTINS(QueueForEach) Return(*res); } -DECLARE_BUILTINS(DequeForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.DequeCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::DEQUE_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(DequeForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); +#define DECLARE_BUILTINS_WITH_CONTAINERS_STUB_BUILDER(StubName, Method, methodType, resultVariableType) \ +DECLARE_BUILTINS(StubName) \ +{ \ + auto env = GetEnvironment(); \ + DEFVARIABLE(res, VariableType::resultVariableType(), Undefined()); \ + Label exit(env); \ + Label slowPath(env); \ + ContainersStubBuilder containersBuilder(this); \ + containersBuilder.Method(glue, thisValue, numArgs, &res, &exit, &slowPath, ContainersType::methodType); \ + Bind(&slowPath); \ + { \ + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(StubName)); \ + res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); \ + Jump(&exit); \ + } \ + Bind(&exit); \ + Return(*res); \ } -DECLARE_BUILTINS(LightWeightMapForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersLightWeightCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::LIGHTWEIGHTMAP_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(LightWeightMapForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); +#define BUILTINS_WITH_CONTAINERS_STUB_BUILDER(V) \ + V(ArrayListForEach, ContainersCommonFuncCall, ARRAYLIST_FOREACH, JS_POINTER) \ + V(DequeForEach, DequeCommonFuncCall, DEQUE_FOREACH, JS_POINTER) \ + V(HashMapForEach, ContainersHashCall, HASHMAP_FOREACH, JS_POINTER) \ + V(HashSetForEach, ContainersHashCall, HASHSET_FOREACH, JS_POINTER) \ + V(LightWeightMapForEach, ContainersLightWeightCall, LIGHTWEIGHTMAP_FOREACH, JS_POINTER) \ + V(LightWeightSetForEach, ContainersLightWeightCall, LIGHTWEIGHTSET_FOREACH, JS_POINTER) \ + V(LinkedListForEach, ContainersLinkedListCall, LINKEDLIST_FOREACH, JS_POINTER) \ + V(ListForEach, ContainersLinkedListCall, LIST_FOREACH, JS_POINTER) \ + V(PlainArrayForEach, ContainersCommonFuncCall, PLAINARRAY_FOREACH, JS_POINTER) \ + V(QueueForEach, QueueCommonFuncCall, QUEUE_FOREACH, JS_POINTER) \ + V(StackForEach, ContainersCommonFuncCall, STACK_FOREACH, JS_POINTER) \ + V(VectorForEach, ContainersCommonFuncCall, VECTOR_FOREACH, JS_POINTER) \ + V(ArrayListReplaceAllElements, ContainersCommonFuncCall, ARRAYLIST_REPLACEALLELEMENTS, JS_POINTER) \ + V(VectorReplaceAllElements, ContainersCommonFuncCall, VECTOR_REPLACEALLELEMENTS, JS_POINTER) + +BUILTINS_WITH_CONTAINERS_STUB_BUILDER(DECLARE_BUILTINS_WITH_CONTAINERS_STUB_BUILDER) + +#undef DECLARE_BUILTINS_WITH_CONTAINERS_STUB_BUILDER +#undef BUILTINS_WITH_CONTAINERS_STUB_BUILDER + +#define DECLARE_BUILTINS_WITH_ARRAY_STUB_BUILDER(Method, resultVariableType) \ +DECLARE_BUILTINS(Array##Method) \ +{ \ + auto env = GetEnvironment(); \ + DEFVARIABLE(res, VariableType::resultVariableType(), Undefined()); \ + Label exit(env); \ + Label slowPath(env); \ + BuiltinsArrayStubBuilder arrayStubBuilder(this); \ + arrayStubBuilder.Method(glue, thisValue, numArgs, &res, &exit, &slowPath); \ + Bind(&slowPath); \ + { \ + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(Array##Method)); \ + res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); \ + Jump(&exit); \ + } \ + Bind(&exit); \ + Return(*res); \ } -DECLARE_BUILTINS(LightWeightSetForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); +#define BUILTINS_WITH_ARRAY_STUB_BUILDER(V) \ + V(Concat, JS_POINTER) \ + V(Filter, JS_POINTER) \ + V(ForEach, JS_ANY) \ + V(IndexOf, JS_ANY) \ + V(LastIndexOf, JS_ANY) \ + V(Slice, JS_POINTER) \ + V(Reverse, JS_POINTER) \ + V(Push, JS_ANY) - Label exit(env); - Label slowPath(env); +BUILTINS_WITH_ARRAY_STUB_BUILDER(DECLARE_BUILTINS_WITH_ARRAY_STUB_BUILDER) - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersLightWeightCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::LIGHTWEIGHTSET_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(LightWeightSetForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} +#undef DECLARE_BUILTINS_WITH_ARRAY_STUB_BUILDER +#undef BUILTINS_WITH_ARRAY_STUB_BUILDER -DECLARE_BUILTINS(HashMapForEach) +DECLARE_BUILTINS(BooleanConstructor) { auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); + DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); - Label exit(env); + Label newTargetIsHeapObject(env); + Label newTargetIsJSFunction(env); Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersHashCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::HASHMAP_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(HashMapForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(HashSetForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - + Label slowPath1(env); Label exit(env); - Label slowPath(env); - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersHashCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::HASHSET_FOREACH); - Bind(&slowPath); + Branch(TaggedIsHeapObject(newTarget), &newTargetIsHeapObject, &slowPath1); + Bind(&newTargetIsHeapObject); + Branch(IsJSFunction(newTarget), &newTargetIsJSFunction, &slowPath); + Bind(&newTargetIsJSFunction); { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(HashSetForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); + Label intialHClassIsHClass(env); + GateRef intialHClass = Load(VariableType::JS_ANY(), newTarget, + IntPtr(JSFunction::PROTO_OR_DYNCLASS_OFFSET)); + Branch(IsJSHClass(intialHClass), &intialHClassIsHClass, &slowPath); + Bind(&intialHClassIsHClass); + { + NewObjectStubBuilder newBuilder(this); + newBuilder.SetParameters(glue, 0); + Label afterNew(env); + newBuilder.NewJSObject(&res, &afterNew, intialHClass); + Bind(&afterNew); + { + GateRef valueOffset = IntPtr(JSPrimitiveRef::VALUE_OFFSET); + GateRef value = GetArg(numArgs, IntPtr(0)); + Store(VariableType::INT64(), glue, *res, valueOffset, FastToBoolean(value)); + Jump(&exit); + } + } } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(LinkedListForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersLinkedListCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::LINKEDLIST_FOREACH); Bind(&slowPath); { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(LinkedListForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(BooleanConstructor)); + GateRef argv = GetArgv(); + auto args = { glue, nativeCode, func, thisValue, numArgs, argv }; + res = CallBuiltinRuntime(glue, args, true, name.c_str()); Jump(&exit); } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(ListForEach) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); - Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersLinkedListCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::LIST_FOREACH); - Bind(&slowPath); + Bind(&slowPath1); { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(ListForEach)); + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(BooleanConstructor)); res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); Jump(&exit); } @@ -814,130 +345,59 @@ DECLARE_BUILTINS(ListForEach) Return(*res); } -DECLARE_BUILTINS(ArrayListForEach) +DECLARE_BUILTINS(NumberConstructor) { auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - - Label exit(env); + DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); + DEFVARIABLE(numberValue, VariableType::JS_ANY(), IntToTaggedPtr(IntPtr(0))); + Label thisCollectionObj(env); Label slowPath(env); - - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::ARRAYLIST_FOREACH); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(ArrayListForEach)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); - } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(ArrayListReplaceAllElements) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_POINTER(), Undefined()); - + Label slowPath1(env); Label exit(env); - Label slowPath(env); - ContainersStubBuilder containersBuilder(this); - containersBuilder.ContainersCommonFuncCall(glue, thisValue, numArgs, &res, &exit, - &slowPath, ContainersType::ARRAYLIST_REPLACEALLELEMENTS); - Bind(&slowPath); - { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(ArrayListReplaceAllElements)); - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); - Jump(&exit); + Label hasArg(env); + Label numberCreate(env); + Label newTargetIsHeapObject(env); + Branch(TaggedIsHeapObject(newTarget), &newTargetIsHeapObject, &slowPath1); + Bind(&newTargetIsHeapObject); + Branch(Int64GreaterThan(numArgs, IntPtr(0)), &hasArg, &numberCreate); + Bind(&hasArg); + { + GateRef value = GetArgNCheck(Int32(0)); + Label number(env); + Branch(TaggedIsNumber(value), &number, &slowPath); + Bind(&number); + { + numberValue = value; + res = value; + Jump(&numberCreate); + } } - Bind(&exit); - Return(*res); -} -DECLARE_BUILTINS(FunctionPrototypeApply) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); - Label exit(env); - Label slowPath(env); - Label targetIsCallable(env); - Label targetIsUndefined(env); - Label targetNotUndefined(env); - Label isHeapObject(env); - //1. If IsCallable(func) is false, throw a TypeError exception - Branch(TaggedIsHeapObject(thisValue), &isHeapObject, &slowPath); - Bind(&isHeapObject); + Bind(&numberCreate); + Label newObj(env); + Label newTargetIsJSFunction(env); + Branch(TaggedIsUndefined(newTarget), &exit, &newObj); + Bind(&newObj); { - Branch(IsCallable(thisValue), &targetIsCallable, &slowPath); - Bind(&targetIsCallable); + Branch(IsJSFunction(newTarget), &newTargetIsJSFunction, &slowPath); + Bind(&newTargetIsJSFunction); { - GateRef thisArg = GetCallArg0(numArgs); - GateRef arrayObj = GetCallArg1(numArgs); - // 2. If argArray is null or undefined, then - Branch(TaggedIsUndefined(arrayObj), &targetIsUndefined, &targetNotUndefined); - Bind(&targetIsUndefined); - { - // a. Return Call(func, thisArg). - res = JSCallDispatch(glue, thisValue, Int32(0), 0, Circuit::NullGate(), - JSCallMode::CALL_GETTER, { thisArg }); - Jump(&exit); - } - Bind(&targetNotUndefined); + Label intialHClassIsHClass(env); + GateRef intialHClass = Load(VariableType::JS_ANY(), newTarget, + IntPtr(JSFunction::PROTO_OR_DYNCLASS_OFFSET)); + Branch(IsJSHClass(intialHClass), &intialHClassIsHClass, &slowPath); + Bind(&intialHClassIsHClass); { - // 3. Let argList be CreateListFromArrayLike(argArray). - GateRef elements = BuildArgumentsListFastElements(glue, arrayObj); - Label targetIsHole(env); - Label targetNotHole(env); - Branch(TaggedIsHole(elements), &targetIsHole, &targetNotHole); - Bind(&targetIsHole); - { - GateRef argList = CreateListFromArrayLike(glue, arrayObj); - // 4. ReturnIfAbrupt(argList). - Label isPendingException(env); - Label noPendingException(env); - Branch(HasPendingException(glue), &isPendingException, &noPendingException); - Bind(&isPendingException); - { - Jump(&slowPath); - } - Bind(&noPendingException); - { - GateRef argsLength = GetLengthOfTaggedArray(argList); - GateRef argv = PtrAdd(argList, IntPtr(TaggedArray::DATA_OFFSET)); - res = JSCallDispatch(glue, thisValue, argsLength, 0, Circuit::NullGate(), - JSCallMode::CALL_THIS_ARGV_WITH_RETURN, { argsLength, argv, thisArg }); - Jump(&exit); - } - } - Bind(&targetNotHole); + NewObjectStubBuilder newBuilder(this); + newBuilder.SetParameters(glue, 0); + Label afterNew(env); + newBuilder.NewJSObject(&res, &afterNew, intialHClass); + Bind(&afterNew); { - // 6. Return Call(func, thisArg, argList). - Label taggedIsStableJsArg(env); - Label taggedNotStableJsArg(env); - Branch(IsStableJSArguments(glue, arrayObj), &taggedIsStableJsArg, &taggedNotStableJsArg); - Bind(&taggedIsStableJsArg); - { - GateRef hClass = LoadHClass(arrayObj); - GateRef PropertyInlinedPropsOffset = IntPtr(JSArguments::LENGTH_INLINE_PROPERTY_INDEX); - GateRef result = GetPropertyInlinedProps(arrayObj, hClass, PropertyInlinedPropsOffset); - GateRef length = TaggedGetInt(result); - GateRef argsLength = MakeArgListWithHole(glue, elements, length); - GateRef elementArgv = PtrAdd(elements, IntPtr(TaggedArray::DATA_OFFSET)); - res = JSCallDispatch(glue, thisValue, argsLength, 0, Circuit::NullGate(), - JSCallMode::CALL_THIS_ARGV_WITH_RETURN, { argsLength, elementArgv, thisArg }); - Jump(&exit); - } - Bind(&taggedNotStableJsArg); - { - GateRef length = GetLengthOfJsArray(glue, arrayObj); - GateRef argsLength = MakeArgListWithHole(glue, elements, length); - GateRef elementArgv = PtrAdd(elements, IntPtr(TaggedArray::DATA_OFFSET)); - res = JSCallDispatch(glue, thisValue, argsLength, 0, Circuit::NullGate(), - JSCallMode::CALL_THIS_ARGV_WITH_RETURN, { argsLength, elementArgv, thisArg }); - Jump(&exit); - } + GateRef valueOffset = IntPtr(JSPrimitiveRef::VALUE_OFFSET); + Store(VariableType::INT64(), glue, *res, valueOffset, *numberValue); + Jump(&exit); } } } @@ -945,50 +405,15 @@ DECLARE_BUILTINS(FunctionPrototypeApply) Bind(&slowPath); { - res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget); + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(NumberConstructor)); + GateRef argv = GetArgv(); + res = CallBuiltinRuntime(glue, { glue, nativeCode, func, thisValue, numArgs, argv }, true, name.c_str()); Jump(&exit); } - Bind(&exit); - Return(*res); -} - -DECLARE_BUILTINS(BooleanConstructor) -{ - auto env = GetEnvironment(); - DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); - - Label newTargetIsJSFunction(env); - Label slowPath(env); - Label exit(env); - - Branch(IsJSFunction(newTarget), &newTargetIsJSFunction, &slowPath); - Bind(&newTargetIsJSFunction); - { - Label intialHClassIsHClass(env); - GateRef intialHClass = Load(VariableType::JS_ANY(), newTarget, - IntPtr(JSFunction::PROTO_OR_DYNCLASS_OFFSET)); - Branch(IsJSHClass(intialHClass), &intialHClassIsHClass, &slowPath); - Bind(&intialHClassIsHClass); - { - NewObjectStubBuilder newBuilder(this); - newBuilder.SetParameters(glue, 0); - Label afterNew(env); - newBuilder.NewJSObject(&res, &afterNew, intialHClass); - Bind(&afterNew); - { - GateRef valueOffset = IntPtr(JSPrimitiveRef::VALUE_OFFSET); - GateRef value = GetArg(numArgs, IntPtr(0)); - Store(VariableType::INT64(), glue, *res, valueOffset, FastToBoolean(value)); - Jump(&exit); - } - } - } - Bind(&slowPath); + Bind(&slowPath1); { - auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(BooleanConstructor)); - GateRef argv = GetArgv(); - auto args = { glue, nativeCode, func, thisValue, numArgs, argv }; - res = CallBuiltinRuntime(glue, args, true, name.c_str()); + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(NumberConstructor)); + res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); Jump(&exit); } Bind(&exit); @@ -1000,10 +425,14 @@ DECLARE_BUILTINS(DateConstructor) auto env = GetEnvironment(); DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); + Label newTargetIsHeapObject(env); Label newTargetIsJSFunction(env); Label slowPath(env); + Label slowPath1(env); Label exit(env); + Branch(TaggedIsHeapObject(newTarget), &newTargetIsHeapObject, &slowPath1); + Bind(&newTargetIsHeapObject); Branch(IsJSFunction(newTarget), &newTargetIsJSFunction, &slowPath); Bind(&newTargetIsJSFunction); { @@ -1073,6 +502,12 @@ DECLARE_BUILTINS(DateConstructor) res = CallBuiltinRuntime(glue, { glue, nativeCode, func, thisValue, numArgs, argv }, true, name.c_str()); Jump(&exit); } + Bind(&slowPath1); + { + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(DateConstructor)); + res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); + Jump(&exit); + } Bind(&exit); Return(*res); } @@ -1082,10 +517,14 @@ DECLARE_BUILTINS(ArrayConstructor) auto env = GetEnvironment(); DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); + Label newTargetIsHeapObject(env); Label newTargetIsJSFunction(env); Label slowPath(env); + Label slowPath1(env); Label exit(env); + Branch(TaggedIsHeapObject(newTarget), &newTargetIsHeapObject, &slowPath1); + Bind(&newTargetIsHeapObject); Branch(IsJSFunction(newTarget), &newTargetIsJSFunction, &slowPath); Bind(&newTargetIsJSFunction); { @@ -1165,7 +604,7 @@ DECLARE_BUILTINS(ArrayConstructor) newBuilder.SetParameters(glue, 0); res = newBuilder.NewJSArrayWithSize(intialHClass, *arrayLength); GateRef lengthOffset = IntPtr(JSArray::LENGTH_OFFSET); - Store(VariableType::JS_ANY(), glue, *res, lengthOffset, Int64ToTaggedInt(*arrayLength)); + Store(VariableType::INT32(), glue, *res, lengthOffset, TruncInt64ToInt32(*arrayLength)); GateRef accessor = GetGlobalConstantValue(VariableType::JS_ANY(), glue, ConstantIndex::ARRAY_LENGTH_ACCESSOR); SetPropertyInlinedProps(glue, *res, intialHClass, accessor, @@ -1183,8 +622,116 @@ DECLARE_BUILTINS(ArrayConstructor) res = CallBuiltinRuntime(glue, { glue, nativeCode, func, thisValue, numArgs, argv }, true, name.c_str()); Jump(&exit); } + Bind(&slowPath1); + { + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(ArrayConstructor)); + res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); + Jump(&exit); + } Bind(&exit); Return(*res); } -} // namespace panda::ecmascript::kungfu \ No newline at end of file + +#define DECLARE_BUILTINS_OBJECT_STUB_BUILDER(type, method, retType, retDefaultValue) \ +DECLARE_BUILTINS(type##method) \ +{ \ + auto env = GetEnvironment(); \ + DEFVARIABLE(res, retType, retDefaultValue); \ + Label thisCollectionObj(env); \ + Label slowPath(env); \ + Label exit(env); \ + BuiltinsObjectStubBuilder builder(this, glue, thisValue, numArgs); \ + builder.method(&res, &exit, &slowPath); \ + Bind(&slowPath); \ + { \ + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(type##method)); \ + res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); \ + Jump(&exit); \ + } \ + Bind(&exit); \ + Return(*res); \ +} + +// Object.protetype.ToString +DECLARE_BUILTINS_OBJECT_STUB_BUILDER(Object, ToString, VariableType::JS_ANY(), Undefined()); +// Object.protetype.Create +DECLARE_BUILTINS_OBJECT_STUB_BUILDER(Object, Create, VariableType::JS_ANY(), Undefined()); +// Object.protetype.Assign +DECLARE_BUILTINS_OBJECT_STUB_BUILDER(Object, Assign, VariableType::JS_ANY(), Undefined()); +#undef DECLARE_BUILTINS_OBJECT_STUB_BUILDER + +#define DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(type, method, retType, retDefaultValue) \ +DECLARE_BUILTINS(type##method) \ +{ \ + auto env = GetEnvironment(); \ + DEFVARIABLE(res, retType, retDefaultValue); \ + Label slowPath(env); \ + Label exit(env); \ + BuiltinsCollectionStubBuilder builder(this, glue, thisValue, numArgs); \ + builder.method(&res, &exit, &slowPath); \ + Bind(&slowPath); \ + { \ + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(type##method)); \ + res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); \ + Jump(&exit); \ + } \ + Bind(&exit); \ + Return(*res); \ +} + +// Set.protetype.Clear +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Set, Clear, VariableType::JS_ANY(), Undefined()); +// Set.protetype.Values +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Set, Values, VariableType::JS_ANY(), Undefined()); +// Set.protetype.Entries +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Set, Entries, VariableType::JS_ANY(), Undefined()); +// Set.protetype.ForEach +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Set, ForEach, VariableType::JS_ANY(), Undefined()); +// Set.protetype.Add +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Set, Add, VariableType::JS_ANY(), Undefined()); +// Set.protetype.Delete +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Set, Delete, VariableType::JS_ANY(), Undefined()); +// Set.protetype.Has +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Set, Has, VariableType::JS_ANY(), Undefined()); +// Map.protetype.Clear +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, Clear, VariableType::JS_ANY(), Undefined()); +// Map.protetype.Values +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, Values, VariableType::JS_ANY(), Undefined()); +// Map.protetype.Entries +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, Entries, VariableType::JS_ANY(), Undefined()); +// Map.protetype.Keys +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, Keys, VariableType::JS_ANY(), Undefined()); +// Map.protetype.ForEach +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, ForEach, VariableType::JS_ANY(), Undefined()); +// Map.protetype.set +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, Set, VariableType::JS_ANY(), Undefined()); +// Map.protetype.Delete +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, Delete, VariableType::JS_ANY(), Undefined()); +// Map.protetype.Has +DECLARE_BUILTINS_COLLECTION_STUB_BUILDER(Map, Has, VariableType::JS_ANY(), Undefined()); +#undef DECLARE_BUILTINS_COLLECTION_STUB_BUILDER + +#define DECLARE_BUILTINS_NUMBER_STUB_BUILDER(type, method, retType, retDefaultValue) \ +DECLARE_BUILTINS(type##method) \ +{ \ + auto env = GetEnvironment(); \ + DEFVARIABLE(res, retType, retDefaultValue); \ + Label slowPath(env); \ + Label exit(env); \ + BuiltinsNumberStubBuilder builder(this, glue, thisValue, numArgs); \ + builder.method(&res, &exit, &slowPath); \ + Bind(&slowPath); \ + { \ + auto name = BuiltinsStubCSigns::GetName(BUILTINS_STUB_ID(type##method)); \ + res = CallSlowPath(nativeCode, glue, thisValue, numArgs, func, newTarget, name.c_str()); \ + Jump(&exit); \ + } \ + Bind(&exit); \ + Return(*res); \ +} + +// Number.ParseFloat +DECLARE_BUILTINS_NUMBER_STUB_BUILDER(Number, ParseFloat, VariableType::JS_ANY(), Undefined()); +#undef DECLARE_BUILTINS_NUMBER_STUB_BUILDER +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/containers_stub_builder.cpp b/ecmascript/compiler/builtins/containers_stub_builder.cpp index 1a9c4032764a8673c39eac530ccb834690312f9a..9f00e6c7d8d7e90df77f4f0905a01b031bcb83c8 100644 --- a/ecmascript/compiler/builtins/containers_stub_builder.cpp +++ b/ecmascript/compiler/builtins/containers_stub_builder.cpp @@ -119,7 +119,7 @@ void ContainersStubBuilder::ContainersCommonFuncCall(GateRef glue, GateRef thisV Branch(Int32NotEqual(tempLen, *length), &lenChange, &setValue); Bind(&lenChange); length = tempLen; - Jump(&setValue); + Branch(Int32GreaterThanOrEqual(*k, *length), &afterLoop, &setValue); Bind(&setValue); if (IsReplaceAllElements(type)) { ContainerSet(glue, *thisObj, *k, retValue, type); diff --git a/ecmascript/compiler/builtins/linked_hashtable_stub_builder.cpp b/ecmascript/compiler/builtins/linked_hashtable_stub_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2404124e5e30fe631a2e419773de93d419762793 --- /dev/null +++ b/ecmascript/compiler/builtins/linked_hashtable_stub_builder.cpp @@ -0,0 +1,641 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ecmascript/compiler/builtins/linked_hashtable_stub_builder.h" + +#include "ecmascript/compiler/builtins/builtins_stubs.h" +#include "ecmascript/compiler/new_object_stub_builder.h" +#include "ecmascript/linked_hash_table.h" +#include "ecmascript/js_set.h" +#include "ecmascript/js_map.h" + +namespace panda::ecmascript::kungfu { +template +void LinkedHashTableStubBuilder::Rehash( + GateRef linkedTable, GateRef newTable) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + env->SubCfgEntry(&entryLabel); + + GateRef numberOfAllElements = Int32Add(GetNumberOfElements(linkedTable), + GetNumberOfDeletedElements(linkedTable)); + + DEFVARIABLE(desEntry, VariableType::INT32(), Int32(0)); + DEFVARIABLE(currentDeletedElements, VariableType::INT32(), Int32(0)); + SetNextTable(linkedTable, newTable); + + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label loopExit(env); + + DEFVARIABLE(i, VariableType::INT32(), Int32(0)); + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32LessThan(*i, numberOfAllElements), &next, &loopExit); + Bind(&next); + + GateRef fromIndex = EntryToIndex(linkedTable, *i); + DEFVARIABLE(key, VariableType::JS_ANY(), GetElement(linkedTable, fromIndex)); + Label hole(env); + Label notHole(env); + Branch(TaggedIsHole(*key), &hole, ¬Hole); + Bind(&hole); + { + currentDeletedElements = Int32Add(*currentDeletedElements, Int32(1)); + SetDeletedNum(linkedTable, *i, *currentDeletedElements); + Jump(&loopEnd); + } + Bind(¬Hole); + { + Label weak(env); + Label notWeak(env); + Branch(TaggedIsWeak(*key), &weak, ¬Weak); + Bind(&weak); + { + key = RemoveTaggedWeakTag(*key); + Jump(¬Weak); + } + Bind(¬Weak); + + GateRef hash = GetHash(*key); + GateRef bucket = HashToBucket(newTable, hash); + InsertNewEntry(newTable, bucket, *desEntry); + GateRef desIndex = EntryToIndex(newTable, *desEntry); + + Label loopHead1(env); + Label loopEnd1(env); + Label next1(env); + Label loopExit1(env); + DEFVARIABLE(j, VariableType::INT32(), Int32(0)); + Jump(&loopHead1); + LoopBegin(&loopHead1); + { + Branch(Int32LessThan(*j, Int32(LinkedHashTableObject::ENTRY_SIZE)), &next1, &loopExit1); + Bind(&next1); + GateRef ele = GetElement(linkedTable, Int32Add(fromIndex, *j)); + SetElement(newTable, Int32Add(desIndex, *j), ele); + Jump(&loopEnd1); + } + Bind(&loopEnd1); + j = Int32Add(*j, Int32(1)); + LoopEnd(&loopHead1); + Bind(&loopExit1); + desEntry = Int32Add(*desEntry, Int32(1)); + Jump(&loopEnd); + } + } + Bind(&loopEnd); + i = Int32Add(*i, Int32(1)); + LoopEnd(&loopHead); + Bind(&loopExit); + + SetNumberOfElements(newTable, GetNumberOfElements(linkedTable)); + SetNumberOfDeletedElements(newTable, Int32(0)); + env->SubCfgExit(); +} + +template +GateRef LinkedHashTableStubBuilder::GrowCapacity( + GateRef linkedTable, GateRef numberOfAddedElements) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + env->SubCfgEntry(&entryLabel); + Label exit(env); + DEFVARIABLE(res, VariableType::JS_ANY(), linkedTable); + + GateRef hasSufficient = HasSufficientCapacity(linkedTable, numberOfAddedElements); + Label grow(env); + Branch(hasSufficient, &exit, &grow); + Bind(&grow); + { + GateRef newCapacity = ComputeCapacity(Int32Add(GetNumberOfElements(linkedTable), numberOfAddedElements)); + GateRef newTable = Create(newCapacity); + Rehash(linkedTable, newTable); + res = newTable; + Jump(&exit); + } + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template +GateRef LinkedHashTableStubBuilder::ComputeCapacity( + GateRef atLeastSpaceFor) +{ + if constexpr (std::is_same_v) { + return TaggedGetInt(CallRuntime(glue_, RTSTUB_ID(LinkedHashMapComputeCapacity), { + IntToTaggedInt(atLeastSpaceFor) })); + } else { + return TaggedGetInt(CallRuntime(glue_, RTSTUB_ID(LinkedHashSetComputeCapacity), { + IntToTaggedInt(atLeastSpaceFor) })); + } +} + +template +void LinkedHashTableStubBuilder::RemoveEntry( + GateRef linkedTable, GateRef entry) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + Label exit(env); + env->SubCfgEntry(&entryLabel); + DEFVARIABLE(i, VariableType::INT32(), Int32(0)); + + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label loopExit(env); + GateRef index = EntryToIndex(linkedTable, entry); + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32LessThan(*i, Int32(LinkedHashTableObject::ENTRY_SIZE)), &next, &loopExit); + Bind(&next); + + GateRef idx = Int32Add(index, *i); + SetElement(linkedTable, idx, Hole()); + Jump(&loopEnd); + } + Bind(&loopEnd); + i = Int32Add(*i, Int32(1)); + LoopEnd(&loopHead); + Bind(&loopExit); + + GateRef newNofe = Int32Sub(GetNumberOfElements(linkedTable), Int32(1)); + SetNumberOfElements(linkedTable, newNofe); + GateRef newNofd = Int32Add(GetNumberOfDeletedElements(linkedTable), Int32(1)); + SetNumberOfDeletedElements(linkedTable, newNofd); + env->SubCfgExit(); +} + +template +GateRef LinkedHashTableStubBuilder::HasSufficientCapacity( + GateRef linkedTable, GateRef numOfAddElements) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + Label exit(env); + env->SubCfgEntry(&entryLabel); + DEFVARIABLE(res, VariableType::BOOL(), False()); + + GateRef numberOfElements = GetNumberOfElements(linkedTable); + GateRef numOfDelElements = GetNumberOfDeletedElements(linkedTable); + GateRef nof = Int32Add(numberOfElements, numOfAddElements); + GateRef capacity = GetCapacity(linkedTable); + GateRef less = Int32LessThan(nof, capacity); + GateRef half = Int32Div(Int32Sub(capacity, nof), Int32(2)); + GateRef lessHalf = Int32LessThanOrEqual(numOfDelElements, half); + + Label lessLable(env); + Branch(BoolAnd(less, lessHalf), &lessLable, &exit); + Bind(&lessLable); + { + Label need(env); + Branch(Int32LessThanOrEqual(Int32Add(nof, Int32Div(nof, Int32(2))), capacity), &need, &exit); + Bind(&need); + { + res = True(); + Jump(&exit); + } + } + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template +GateRef LinkedHashTableStubBuilder::GetHash(GateRef key) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + Label exit(env); + env->SubCfgEntry(&entryLabel); + DEFVARIABLE(res, VariableType::INT32(), Int32(0)); + + Label symbolKey(env); + Label stringCheck(env); + Branch(TaggedIsSymbol(key), &symbolKey, &stringCheck); + Bind(&symbolKey); + { + res = Load(VariableType::INT32(), key, IntPtr(JSSymbol::HASHFIELD_OFFSET)); + Jump(&exit); + } + Bind(&stringCheck); + Label stringKey(env); + Label slowGetHash(env); + Branch(TaggedIsString(key), &stringKey, &slowGetHash); + Bind(&stringKey); + { + res = GetHashcodeFromString(glue_, key); + Jump(&exit); + } + Bind(&slowGetHash); + { + // GetHash(); + GateRef hash = CallRuntime(glue_, RTSTUB_ID(GetLinkedHash), { key }); + res = GetInt32OfTInt(hash); + Jump(&exit); + } + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template +GateRef LinkedHashTableStubBuilder::HashObjectIsMatch( + GateRef key, GateRef other) +{ + return SameValueZero(glue_, key, other); +} + +template +GateRef LinkedHashTableStubBuilder::FindElement( + GateRef linkedTable, GateRef key) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + env->SubCfgEntry(&entryLabel); + + DEFVARIABLE(res, VariableType::INT32(), Int32(-1)); + Label exit(env); + Label isKey(env); + Branch(IsKey(key), &isKey, &exit); + Bind(&isKey); + { + GateRef hash = GetHash(key); + GateRef bucket = HashToBucket(linkedTable, hash); + GateRef index = BucketToIndex(bucket); + DEFVARIABLE(entry, VariableType::JS_ANY(), GetElement(linkedTable, index)); + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label loopExit(env); + + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(TaggedIsHole(*entry), &loopExit, &next); + Bind(&next); + + DEFVARIABLE(element, VariableType::JS_ANY(), GetKey(linkedTable, TaggedGetInt(*entry))); + Label notHole(env); + Branch(TaggedIsHole(*element), &loopEnd, ¬Hole); + Bind(¬Hole); + { + Label weak(env); + Label notWeak(env); + Branch(TaggedIsWeak(*element), &weak, ¬Weak); + Bind(&weak); + { + element = RemoveTaggedWeakTag(*element); + Jump(¬Weak); + } + Bind(¬Weak); + Label match(env); + Branch(HashObjectIsMatch(key, *element), &match, &loopEnd); + Bind(&match); + { + res = TaggedGetInt(*entry); + Jump(&loopExit); + } + } + } + Bind(&loopEnd); + entry = GetNextEntry(linkedTable, TaggedGetInt(*entry)); + LoopEnd(&loopHead); + Bind(&loopExit); + Jump(&exit); + } + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template +GateRef LinkedHashTableStubBuilder::GetDeletedElementsAt( + GateRef linkedTable, GateRef entry) +{ + auto env = GetEnvironment(); + Label entryLabel(env); + env->SubCfgEntry(&entryLabel); + Label exit(env); + DEFVARIABLE(res, VariableType::INT32(), Int32(0)); + DEFVARIABLE(currentEntry, VariableType::INT32(), Int32Sub(entry, Int32(1))); + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label loopExit(env); + + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32GreaterThanOrEqual(*currentEntry, Int32(0)), &next, &loopExit); + Bind(&next); + GateRef key = GetKey(linkedTable, *currentEntry); + Label hole(env); + Branch(TaggedIsHole(key), &hole, &loopEnd); + Bind(&hole); + { + GateRef deletedNum = GetDeletedNum(linkedTable, *currentEntry); + res = deletedNum; + Jump(&exit); + } + } + Bind(&loopEnd); + currentEntry = Int32Sub(*currentEntry, Int32(1)); + LoopEnd(&loopHead); + Bind(&loopExit); + Jump(&exit); + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template +GateRef LinkedHashTableStubBuilder::Create(GateRef numberOfElements) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + Label exit(env); + + // new LinkedHashTable + GateRef length = CalNewTaggedArrayLength(numberOfElements); + NewObjectStubBuilder newBuilder(this); + GateRef array = newBuilder.NewTaggedArray(glue_, length); + + Label noException(env); + Branch(TaggedIsException(array), &exit, &noException); + Bind(&noException); + { + // SetNumberOfElements + SetNumberOfElements(array, Int32(0)); + // SetNumberOfDeletedElements + SetNumberOfDeletedElements(array, Int32(0)); + // SetCapacity + SetCapacity(array, numberOfElements); + Jump(&exit); + } + Bind(&exit); + env->SubCfgExit(); + return array; +} + +template +GateRef LinkedHashTableStubBuilder::Clear(GateRef linkedTable) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + Label exit(env); + Label setLinked(env); + + GateRef newTable = Create(Int32(LinkedHashTableType::MIN_CAPACITY)); + Label noException(env); + Branch(TaggedIsException(newTable), &exit, &noException); + Bind(&noException); + + GateRef cap = GetCapacity(linkedTable); + Label capGreaterZero(env); + Branch(Int32GreaterThan(cap, Int32(0)), &capGreaterZero, &exit); + Bind(&capGreaterZero); + { + // NextTable + SetNextTable(linkedTable, newTable); + // SetNumberOfDeletedElements + SetNumberOfDeletedElements(linkedTable, Int32(-1)); + Jump(&exit); + } + + Bind(&exit); + env->SubCfgExit(); + return newTable; +} + +template GateRef LinkedHashTableStubBuilder::Clear(GateRef); +template GateRef LinkedHashTableStubBuilder::Clear(GateRef); + +template +GateRef LinkedHashTableStubBuilder::ForEach(GateRef thisValue, + GateRef srcLinkedTable, GateRef numArgs) +{ + auto env = GetEnvironment(); + Label entry(env); + env->SubCfgEntry(&entry); + Label exit(env); + DEFVARIABLE(res, VariableType::JS_ANY(), Undefined()); + + // caller checked callbackFnHandle callable + GateRef callbackFnHandle = GetCallArg0(numArgs); + GateRef thisArg = GetCallArg1(numArgs); + DEFVARIABLE(linkedTable, VariableType::JS_ANY(), srcLinkedTable); + + GateRef numberOfElements = GetNumberOfElements(*linkedTable); + GateRef numberOfDeletedElements = GetNumberOfDeletedElements(*linkedTable); + GateRef tmpTotalElements = Int32Add(numberOfElements, numberOfDeletedElements); + DEFVARIABLE(totalElements, VariableType::INT32(), tmpTotalElements); + DEFVARIABLE(index, VariableType::INT32(), Int32(0)); + + Label loopHead(env); + Label loopEnd(env); + Label next(env); + Label loopExit(env); + Jump(&loopHead); + LoopBegin(&loopHead); + { + Branch(Int32LessThan(*index, *totalElements), &next, &loopExit); + Bind(&next); + GateRef valueIndex = *index; + + GateRef key = GetKey(*linkedTable, *index); + index = Int32Add(*index, Int32(1)); + Label keyNotHole(env); + Branch(TaggedIsHole(key), &loopEnd, &keyNotHole); + Bind(&keyNotHole); + + GateRef value = key; + if constexpr (std::is_same_v) { + value = GetValue(*linkedTable, valueIndex); + } + Label hasException(env); + Label notHasException(env); + GateRef retValue = JSCallDispatch(glue_, callbackFnHandle, Int32(NUM_MANDATORY_JSFUNC_ARGS), 0, + Circuit::NullGate(), JSCallMode::CALL_THIS_ARG3_WITH_RETURN, { thisArg, value, key, thisValue }); + Branch(HasPendingException(glue_), &hasException, ¬HasException); + Bind(&hasException); + { + res = retValue; + Jump(&exit); + } + Bind(¬HasException); + { + // Maybe add or delete, get next table + GateRef tmpNextTable = GetNextTable(*linkedTable); + DEFVARIABLE(nextTable, VariableType::JS_ANY(), tmpNextTable); + Label loopHead1(env); + Label loopEnd1(env); + Label next1(env); + Label loopExit1(env); + Jump(&loopHead1); + LoopBegin(&loopHead1); + { + Branch(TaggedIsHole(*nextTable), &loopExit1, &next1); + Bind(&next1); + GateRef deleted = GetDeletedElementsAt(*linkedTable, *index); + index = Int32Sub(*index, deleted); + linkedTable = *nextTable; + nextTable = GetNextTable(*linkedTable); + Jump(&loopEnd1); + } + Bind(&loopEnd1); + LoopEnd(&loopHead1); + Bind(&loopExit1); + // update totalElements + GateRef numberOfEle = GetNumberOfElements(*linkedTable); + GateRef numberOfDeletedEle = GetNumberOfDeletedElements(*linkedTable); + totalElements = Int32Add(numberOfEle, numberOfDeletedEle); + Jump(&loopEnd); + } + } + Bind(&loopEnd); + LoopEnd(&loopHead); + Bind(&loopExit); + Jump(&exit); + + Bind(&exit); + env->SubCfgExit(); + return *res; +} + +template GateRef LinkedHashTableStubBuilder::ForEach(GateRef thisValue, + GateRef linkedTable, GateRef numArgs); +template GateRef LinkedHashTableStubBuilder::ForEach(GateRef thisValue, + GateRef linkedTable, GateRef numArgs); + +template +GateRef LinkedHashTableStubBuilder::Insert( + GateRef linkedTable, GateRef key, GateRef value) +{ + auto env = GetEnvironment(); + Label cfgEntry(env); + env->SubCfgEntry(&cfgEntry); + Label exit(env); + DEFVARIABLE(res, VariableType::JS_ANY(), linkedTable); + GateRef entry = FindElement(linkedTable, key); + Label findEntry(env); + Label notFind(env); + Branch(Int32Equal(entry, Int32(-1)), ¬Find, &findEntry); + Bind(&findEntry); + { + SetValue(linkedTable, entry, value); + Jump(&exit); + } + Bind(¬Find); + { + GateRef newTable = GrowCapacity(linkedTable, Int32(1)); + res = newTable; + GateRef hash = GetHash(key); + GateRef bucket = HashToBucket(newTable, hash); + GateRef numberOfElements = GetNumberOfElements(newTable); + + GateRef newEntry = Int32Add(numberOfElements, GetNumberOfDeletedElements(newTable)); + InsertNewEntry(newTable, bucket, newEntry); + SetKey(newTable, newEntry, key); + SetValue(newTable, newEntry, value); + GateRef newNumberOfElements = Int32Add(numberOfElements, Int32(1)); + SetNumberOfElements(newTable, newNumberOfElements); + Jump(&exit); + } + + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template GateRef LinkedHashTableStubBuilder::Insert( + GateRef linkedTable, GateRef key, GateRef value); +template GateRef LinkedHashTableStubBuilder::Insert( + GateRef linkedTable, GateRef key, GateRef value); + +template +GateRef LinkedHashTableStubBuilder::Delete( + GateRef linkedTable, GateRef key) +{ + auto env = GetEnvironment(); + Label cfgEntry(env); + env->SubCfgEntry(&cfgEntry); + Label exit(env); + DEFVARIABLE(res, VariableType::JS_ANY(), TaggedFalse()); + GateRef entry = FindElement(linkedTable, key); + Label findEntry(env); + Branch(Int32Equal(entry, Int32(-1)), &exit, &findEntry); + Bind(&findEntry); + { + RemoveEntry(linkedTable, entry); + res = TaggedTrue(); + Jump(&exit); + } + + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template GateRef LinkedHashTableStubBuilder::Delete( + GateRef linkedTable, GateRef key); +template GateRef LinkedHashTableStubBuilder::Delete( + GateRef linkedTable, GateRef key); + +template +GateRef LinkedHashTableStubBuilder::Has( + GateRef linkedTable, GateRef key) +{ + auto env = GetEnvironment(); + Label cfgEntry(env); + env->SubCfgEntry(&cfgEntry); + Label exit(env); + DEFVARIABLE(res, VariableType::JS_ANY(), TaggedFalse()); + GateRef entry = FindElement(linkedTable, key); + Label findEntry(env); + Branch(Int32Equal(entry, Int32(-1)), &exit, &findEntry); + Bind(&findEntry); + { + res = TaggedTrue(); + Jump(&exit); + } + + Bind(&exit); + auto ret = *res; + env->SubCfgExit(); + return ret; +} + +template GateRef LinkedHashTableStubBuilder::Has( + GateRef linkedTable, GateRef key); +template GateRef LinkedHashTableStubBuilder::Has( + GateRef linkedTable, GateRef key); +} // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins/linked_hashtable_stub_builder.h b/ecmascript/compiler/builtins/linked_hashtable_stub_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..74d60f3b52d48cc7b22decd44fdd8e018caf8d23 --- /dev/null +++ b/ecmascript/compiler/builtins/linked_hashtable_stub_builder.h @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2022 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ECMASCRIPT_COMPILER_BUILTINS_LINKED_HASHTABLE_STUB_BUILDER_H +#define ECMASCRIPT_COMPILER_BUILTINS_LINKED_HASHTABLE_STUB_BUILDER_H +#include "ecmascript/compiler/stub_builder-inl.h" + +namespace panda::ecmascript::kungfu { + +template +class LinkedHashTableStubBuilder : public BuiltinsStubBuilder { +public: + explicit LinkedHashTableStubBuilder(BuiltinsStubBuilder *parent, GateRef glue) + : BuiltinsStubBuilder(parent), glue_(glue) {} + ~LinkedHashTableStubBuilder() override = default; + NO_MOVE_SEMANTIC(LinkedHashTableStubBuilder); + NO_COPY_SEMANTIC(LinkedHashTableStubBuilder); + void GenerateCircuit() override {} + + GateRef Create(GateRef numberOfElements); + GateRef Clear(GateRef linkedTable); + GateRef ForEach(GateRef thisValue, GateRef linkedTable, GateRef numArgs); + GateRef Insert(GateRef linkedTable, GateRef key, GateRef value); + GateRef Delete(GateRef linkedTable, GateRef key); + GateRef Has(GateRef linkedTable, GateRef key); + +private: + GateRef IsKey(GateRef key) + { + return TaggedIsNotHole(key); + } + + GateRef HashToBucket(GateRef linkedTable, GateRef hash) + { + GateRef cap = GetCapacity(linkedTable); + return Int32And(hash, Int32Sub(cap, Int32(1))); + } + + GateRef BucketToIndex(GateRef bucket) + { + return Int32Add(bucket, Int32(LinkedHashTableType::ELEMENTS_START_INDEX)); + } + + GateRef GetHash(GateRef key); + GateRef HashObjectIsMatch(GateRef key, GateRef other); + GateRef FindElement(GateRef linkedTable, GateRef key); + GateRef GetKey(GateRef linkedTable, GateRef entry) + { + GateRef index = EntryToIndex(linkedTable, entry); + return GetElement(linkedTable, index); + } + + void SetKey(GateRef linkedTable, GateRef entry, GateRef key) + { + GateRef index = EntryToIndex(linkedTable, entry); + SetElement(linkedTable, index, key); + } + + GateRef GetValue(GateRef linkedTable, GateRef entry) + { + GateRef index = EntryToIndex(linkedTable, entry); + GateRef valueIndex = Int32(LinkedHashTableObject::ENTRY_VALUE_INDEX); + return GetElement(linkedTable, Int32Add(index, valueIndex)); + } + + void SetValue(GateRef linkedTable, GateRef entry, GateRef value) + { + GateRef index = EntryToIndex(linkedTable, entry); + GateRef valueIndex = Int32(LinkedHashTableObject::ENTRY_VALUE_INDEX); + SetElement(linkedTable, Int32Add(index, valueIndex), value); + } + + GateRef EntryToIndex(GateRef linkedTable, GateRef entry) + { + int32_t startIndex = LinkedHashTableType::ELEMENTS_START_INDEX; + int32_t entrySize = LinkedHashTableObject::ENTRY_SIZE; + GateRef sumEntrySize = Int32Mul(entry, Int32Add(Int32(entrySize), Int32(1))); + return Int32Add(Int32(startIndex), Int32Add(GetCapacity(linkedTable), sumEntrySize)); + } + + GateRef GetElement(GateRef linkedTable, GateRef index) + { + return GetValueFromTaggedArray(linkedTable, index); + } + + void SetElement(GateRef linkedTable, GateRef index, GateRef value) + { + SetValueToTaggedArray(VariableType::JS_ANY(), glue_, linkedTable, index, value); + } + + GateRef GetDeletedNum(GateRef linkedTable, GateRef entry) + { + return TaggedGetInt(GetNextEntry(linkedTable, entry)); + } + + void SetDeletedNum(GateRef linkedTable, GateRef entry, GateRef num) + { + SetNextEntry(linkedTable, entry, IntToTaggedInt(num)); + } + + GateRef GetNextEntry(GateRef linkedTable, GateRef entry) + { + GateRef entryIndex = EntryToIndex(linkedTable, entry); + return GetElement(linkedTable, Int32Add(entryIndex, Int32(LinkedHashTableObject::ENTRY_SIZE))); + } + + void SetNextEntry(GateRef linkedTable, GateRef entry, GateRef nextEntry) + { + GateRef entryIndex = EntryToIndex(linkedTable, entry); + SetElement(linkedTable, Int32Add(entryIndex, Int32(LinkedHashTableObject::ENTRY_SIZE)), nextEntry); + } + + GateRef GetCapacity(GateRef linkedTable) + { + GateRef capacityIndex = Int32(LinkedHashTableType::CAPACITY_INDEX); + GateRef capacity = GetValueFromTaggedArray(linkedTable, capacityIndex); + return TaggedGetInt(capacity); + } + + void SetCapacity(GateRef linkedTable, GateRef numberOfElements) + { + GateRef capacityIndex = Int32(LinkedHashTableType::CAPACITY_INDEX); + SetValueToTaggedArray(VariableType::JS_NOT_POINTER(), glue_, linkedTable, capacityIndex, + IntToTaggedInt(numberOfElements)); + } + + GateRef GetNumberOfElements(GateRef linkedTable) + { + int32_t elementsIndex = LinkedHashTableType::NUMBER_OF_ELEMENTS_INDEX; + GateRef tmpNumberOfElements = GetValueFromTaggedArray(linkedTable, Int32(elementsIndex)); + return TaggedGetInt(tmpNumberOfElements); + } + + void SetNumberOfElements(GateRef linkedTable, GateRef num) + { + int32_t elementsIndex = LinkedHashTableType::NUMBER_OF_ELEMENTS_INDEX; + SetValueToTaggedArray(VariableType::JS_NOT_POINTER(), glue_, linkedTable, Int32(elementsIndex), + IntToTaggedInt(num)); + } + + GateRef GetNumberOfDeletedElements(GateRef linkedTable) + { + GateRef deletedIndex = Int32(LinkedHashTableType::NUMBER_OF_DELETED_ELEMENTS_INDEX); + GateRef tmpNumberOfDeletedElements = GetValueFromTaggedArray(linkedTable, deletedIndex); + return TaggedGetInt(tmpNumberOfDeletedElements); + } + + void SetNumberOfDeletedElements(GateRef linkedTable, GateRef num) + { + GateRef deletedIndex = Int32(LinkedHashTableType::NUMBER_OF_DELETED_ELEMENTS_INDEX); + SetValueToTaggedArray(VariableType::JS_NOT_POINTER(), glue_, linkedTable, deletedIndex, IntToTaggedInt(num)); + } + + GateRef GetNextTable(GateRef linkedTable) + { + GateRef nextTableIndex = Int32(LinkedHashTableType::NEXT_TABLE_INDEX); + return GetValueFromTaggedArray(linkedTable, nextTableIndex); + } + + void SetNextTable(GateRef linkedTable, GateRef nexTable) + { + GateRef nextTableIndex = Int32(LinkedHashTableType::NEXT_TABLE_INDEX); + SetValueToTaggedArray(VariableType::JS_POINTER(), glue_, linkedTable, nextTableIndex, nexTable); + } + + GateRef CalNewTaggedArrayLength(GateRef numberOfElements) + { + GateRef startIndex = Int32(LinkedHashTableType::ELEMENTS_START_INDEX); + GateRef entrySize = Int32(LinkedHashTableObject::ENTRY_SIZE); + GateRef nEntrySize = Int32Mul(numberOfElements, Int32Add(entrySize, Int32(1))); + GateRef length = Int32Add(startIndex, Int32Add(numberOfElements, nEntrySize)); + return length; + } + + void InsertNewEntry(GateRef linkedTable, GateRef bucket, GateRef entry) + { + GateRef bucketIndex = BucketToIndex(bucket); + GateRef previousEntry = GetElement(linkedTable, bucketIndex); + SetNextEntry(linkedTable, entry, previousEntry); + SetElement(linkedTable, bucketIndex, IntToTaggedInt(entry)); + } + + GateRef GetDeletedElementsAt(GateRef linkedTable, GateRef entry); + GateRef GrowCapacity(GateRef linkedTable, GateRef numberOfAddedElements); + GateRef HasSufficientCapacity(GateRef linkedTable, GateRef numOfAddElements); + void Rehash(GateRef linkedTable, GateRef newTable); + GateRef ComputeCapacity(GateRef atLeastSpaceFor); + void RemoveEntry(GateRef linkedTable, GateRef entry); + + GateRef glue_; +}; +} // namespace panda::ecmascript::kungfu +#endif // ECMASCRIPT_COMPILER_BUILTINS_LINKED_HASHTABLE_STUB_BUILDER_H diff --git a/ecmascript/compiler/builtins_lowering.cpp b/ecmascript/compiler/builtins_lowering.cpp index 7fdd479d346b906adaadd4836359c0ab2dc1867b..5dbd6af01601eaf0cb99e277f991658f22694d8c 100644 --- a/ecmascript/compiler/builtins_lowering.cpp +++ b/ecmascript/compiler/builtins_lowering.cpp @@ -15,6 +15,8 @@ #include "ecmascript/compiler/builtins_lowering.h" +#include "ecmascript/global_env.h" + namespace panda::ecmascript::kungfu { void BuiltinLowering::LowerTypedCallBuitin(GateRef gate) { @@ -36,6 +38,25 @@ void BuiltinLowering::LowerTypedCallBuitin(GateRef gate) case BUILTINS_STUB_ID(LocaleCompare): LowerTypedLocaleCompare(gate); break; + case BUILTINS_STUB_ID(SORT): + LowerTypedArraySort(gate); + break; + case BUILTINS_STUB_ID(STRINGIFY): + LowerTypedStringify(gate); + break; + case BUILTINS_STUB_ID(MAP_PROTO_ITERATOR): + case BUILTINS_STUB_ID(SET_PROTO_ITERATOR): + case BUILTINS_STUB_ID(STRING_PROTO_ITERATOR): + case BUILTINS_STUB_ID(ARRAY_PROTO_ITERATOR): + case BUILTINS_STUB_ID(TYPED_ARRAY_PROTO_ITERATOR): + LowerBuiltinIterator(gate, id); + break; + case BUILTINS_STUB_ID(MAP_ITERATOR_PROTO_NEXT): + case BUILTINS_STUB_ID(SET_ITERATOR_PROTO_NEXT): + case BUILTINS_STUB_ID(STRING_ITERATOR_PROTO_NEXT): + case BUILTINS_STUB_ID(ARRAY_ITERATOR_PROTO_NEXT): + LowerIteratorNext(gate, id); + break; default: break; } @@ -58,7 +79,7 @@ GateRef BuiltinLowering::TypedTrigonometric(GateRef gate, BuiltinsStubCSigns::ID Label exit(&builder_); GateRef para1 = acc_.GetValueIn(gate, 0); - DEFVAlUE(result, (&builder_), VariableType::JS_ANY(), builder_.HoleConstant()); + DEFVALUE(result, (&builder_), VariableType::JS_ANY(), builder_.HoleConstant()); builder_.Branch(builder_.TaggedIsNumber(para1), &numberBranch, ¬NumberBranch); builder_.Bind(&numberBranch); @@ -151,7 +172,7 @@ GateRef BuiltinLowering::TypedAbs(GateRef gate) Label exit(&builder_); GateRef para1 = acc_.GetValueIn(gate, 0); - DEFVAlUE(result, (&builder_), VariableType::JS_ANY(), builder_.HoleConstant()); + DEFVALUE(result, (&builder_), VariableType::JS_ANY(), builder_.HoleConstant()); Label isInt(&builder_); Label notInt(&builder_); @@ -237,17 +258,84 @@ void BuiltinLowering::LowerTypedLocaleCompare(GateRef gate) ReplaceHirWithValue(gate, result); } +void BuiltinLowering::LowerTypedArraySort(GateRef gate) +{ + GateRef glue = acc_.GetGlueFromArgList(); + GateRef thisObj = acc_.GetValueIn(gate, 0); + GateRef result = LowerCallRuntime(glue, gate, RTSTUB_ID(ArraySort), { thisObj }); + ReplaceHirWithValue(gate, result); +} + GateRef BuiltinLowering::LowerCallTargetCheck(Environment *env, GateRef gate) { builder_.SetEnvironment(env); GateRef idGate = acc_.GetValueIn(gate, 1); BuiltinsStubCSigns::ID id = static_cast(acc_.GetConstantValue(idGate)); - GateRef constantFunction = builder_.GetGlobalConstantValue(GET_TYPED_CONSTANT_INDEX(id)); + switch (id) { + case BuiltinsStubCSigns::ID::MAP_PROTO_ITERATOR: + case BuiltinsStubCSigns::ID::SET_PROTO_ITERATOR: + case BuiltinsStubCSigns::ID::STRING_PROTO_ITERATOR: + case BuiltinsStubCSigns::ID::ARRAY_PROTO_ITERATOR: + case BuiltinsStubCSigns::ID::TYPED_ARRAY_PROTO_ITERATOR: { + return LowerCallTargetCheckWithDetector(gate, id); + } + default: { + return LowerCallTargetCheckDefault(gate, id); + } + } +} +GateRef BuiltinLowering::LowerCallTargetCheckDefault(GateRef gate, BuiltinsStubCSigns::ID id) +{ + GateRef constantFunction = builder_.GetGlobalConstantValue(GET_TYPED_CONSTANT_INDEX(id)); GateRef function = acc_.GetValueIn(gate, 0); // 0: function return builder_.Equal(function, constantFunction); } +GateRef BuiltinLowering::LowerCallTargetCheckWithDetector(GateRef gate, BuiltinsStubCSigns::ID id) +{ + JSType expectType = JSType::INVALID; + uint8_t detectorIndex = 0; + switch (id) { + case BuiltinsStubCSigns::ID::MAP_PROTO_ITERATOR: { + expectType = JSType::JS_MAP; + detectorIndex = GlobalEnv::MAP_ITERATOR_DETECTOR_INDEX; + break; + } + case BuiltinsStubCSigns::ID::SET_PROTO_ITERATOR: { + expectType = JSType::JS_SET; + detectorIndex = GlobalEnv::SET_ITERATOR_DETECTOR_INDEX; + break; + } + case BuiltinsStubCSigns::ID::STRING_PROTO_ITERATOR: { + expectType = JSType::STRING_FIRST; + detectorIndex = GlobalEnv::STRING_ITERATOR_DETECTOR_INDEX; + break; + } + case BuiltinsStubCSigns::ID::ARRAY_PROTO_ITERATOR: { + expectType = JSType::JS_ARRAY; + detectorIndex = GlobalEnv::ARRAY_ITERATOR_DETECTOR_INDEX; + break; + } + case BuiltinsStubCSigns::ID::TYPED_ARRAY_PROTO_ITERATOR: { + expectType = JSType::JS_TYPED_ARRAY_FIRST; + detectorIndex = GlobalEnv::TYPED_ARRAY_ITERATOR_DETECTOR_INDEX; + break; + } + default: { + LOG_COMPILER(FATAL) << "this branch is unreachable"; + UNREACHABLE(); + } + } + GateRef obj = acc_.GetValueIn(gate, 2); // 2: iterator obj + GateRef check1 = builder_.BoolAnd( + builder_.TaggedIsHeapObjectOp(obj), builder_.IsSpecificObjectType(obj, expectType)); + GateRef glueGlobalEnv = builder_.GetGlobalEnv(); + GateRef markerCell = builder_.GetGlobalEnvObj(glueGlobalEnv, detectorIndex); + GateRef check2 = builder_.BoolAnd(check1, builder_.IsMarkerCellValid(markerCell)); + return check2; +} + GateRef BuiltinLowering::CheckPara(GateRef gate, GateRef funcCheck) { GateRef idGate = acc_.GetValueIn(gate, 1); @@ -266,10 +354,94 @@ GateRef BuiltinLowering::CheckPara(GateRef gate, GateRef funcCheck) case BuiltinsStubCSigns::ID::SQRT: // NumberSpeculativeRetype is checked return funcCheck; + case BuiltinsStubCSigns::ID::LocaleCompare: + case BuiltinsStubCSigns::ID::SORT: + case BuiltinsStubCSigns::ID::STRINGIFY: + case BuiltinsStubCSigns::ID::MAP_PROTO_ITERATOR: + case BuiltinsStubCSigns::ID::SET_PROTO_ITERATOR: + case BuiltinsStubCSigns::ID::STRING_PROTO_ITERATOR: + case BuiltinsStubCSigns::ID::ARRAY_PROTO_ITERATOR: + case BuiltinsStubCSigns::ID::TYPED_ARRAY_PROTO_ITERATOR: + case BuiltinsStubCSigns::ID::MAP_ITERATOR_PROTO_NEXT: + case BuiltinsStubCSigns::ID::SET_ITERATOR_PROTO_NEXT: + case BuiltinsStubCSigns::ID::STRING_ITERATOR_PROTO_NEXT: + case BuiltinsStubCSigns::ID::ARRAY_ITERATOR_PROTO_NEXT: + // Don't need check para + return funcCheck; default: { LOG_COMPILER(FATAL) << "this branch is unreachable"; UNREACHABLE(); } } } + +void BuiltinLowering::LowerTypedStringify(GateRef gate) +{ + GateRef glue = acc_.GetGlueFromArgList(); + GateRef value = acc_.GetValueIn(gate, 0); + std::vector args; + args.emplace_back(value); + GateRef result = LowerCallRuntime(glue, gate, RTSTUB_ID(FastStringify), args); + ReplaceHirWithValue(gate, result); +} + +void BuiltinLowering::LowerBuiltinIterator(GateRef gate, BuiltinsStubCSigns::ID id) +{ + GateRef glue = acc_.GetGlueFromArgList(); + GateRef obj = acc_.GetValueIn(gate, 0); + GateRef result = Circuit::NullGate(); + switch (id) { + case BUILTINS_STUB_ID(MAP_PROTO_ITERATOR): { + result = builder_.CallStub(glue, gate, CommonStubCSigns::CreateJSMapIterator, { glue, obj }); + break; + } + case BUILTINS_STUB_ID(SET_PROTO_ITERATOR): { + result = builder_.CallStub(glue, gate, CommonStubCSigns::CreateJSSetIterator, { glue, obj }); + break; + } + case BUILTINS_STUB_ID(STRING_PROTO_ITERATOR): { + result = LowerCallRuntime(glue, gate, RTSTUB_ID(CreateStringIterator), { obj }, true); + break; + } + case BUILTINS_STUB_ID(ARRAY_PROTO_ITERATOR): { + result = LowerCallRuntime(glue, gate, RTSTUB_ID(NewJSArrayIterator), { obj }, true); + break; + } + case BUILTINS_STUB_ID(TYPED_ARRAY_PROTO_ITERATOR): { + result = LowerCallRuntime(glue, gate, RTSTUB_ID(NewJSTypedArrayIterator), { obj }, true); + break; + } + default: + UNREACHABLE(); + } + ReplaceHirWithValue(gate, result); +} + +void BuiltinLowering::LowerIteratorNext(GateRef gate, BuiltinsStubCSigns::ID id) +{ + GateRef glue = acc_.GetGlueFromArgList(); + GateRef thisObj = acc_.GetValueIn(gate, 0); + GateRef result = Circuit::NullGate(); + switch (id) { + case BUILTINS_STUB_ID(MAP_ITERATOR_PROTO_NEXT): { + result = LowerCallRuntime(glue, gate, RTSTUB_ID(MapIteratorNext), { thisObj }, true); + break; + } + case BUILTINS_STUB_ID(SET_ITERATOR_PROTO_NEXT): { + result = LowerCallRuntime(glue, gate, RTSTUB_ID(SetIteratorNext), { thisObj }, true); + break; + } + case BUILTINS_STUB_ID(STRING_ITERATOR_PROTO_NEXT): { + result = LowerCallRuntime(glue, gate, RTSTUB_ID(StringIteratorNext), { thisObj }, true); + break; + } + case BUILTINS_STUB_ID(ARRAY_ITERATOR_PROTO_NEXT): { + result = LowerCallRuntime(glue, gate, RTSTUB_ID(ArrayIteratorNext), { thisObj }, true); + break; + } + default: + UNREACHABLE(); + } + ReplaceHirWithValue(gate, result); +} } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/builtins_lowering.h b/ecmascript/compiler/builtins_lowering.h index 2853ed1950ac17e5a7eee6e7d84c3cfbb40a8e87..d4d9a71d5b20dba838fd9a746dc6f70f9d3421b5 100644 --- a/ecmascript/compiler/builtins_lowering.h +++ b/ecmascript/compiler/builtins_lowering.h @@ -31,6 +31,7 @@ public: void LowerTypedSqrt(GateRef gate); GateRef CheckPara(GateRef gate, GateRef funcCheck); void LowerTypedLocaleCompare(GateRef gate); + void LowerTypedArraySort(GateRef gate); private: void LowerTypedTrigonometric(GateRef gate, BuiltinsStubCSigns::ID id); @@ -41,6 +42,11 @@ private: GateRef LowerCallRuntime(GateRef glue, GateRef gate, int index, const std::vector &args, bool useLabel = false); void ReplaceHirWithValue(GateRef hirGate, GateRef value, bool noThrow = false); + GateRef LowerCallTargetCheckDefault(GateRef gate, BuiltinsStubCSigns::ID id); + GateRef LowerCallTargetCheckWithDetector(GateRef gate, BuiltinsStubCSigns::ID id); + void LowerTypedStringify(GateRef gate); + void LowerBuiltinIterator(GateRef gate, BuiltinsStubCSigns::ID id); + void LowerIteratorNext(GateRef gate, BuiltinsStubCSigns::ID id); Circuit *circuit_ {nullptr}; CircuitBuilder builder_; diff --git a/ecmascript/compiler/bytecode_circuit_builder.cpp b/ecmascript/compiler/bytecode_circuit_builder.cpp index b1dbee7c98ddb20cc21e3328f60bf7090abf920d..4a199a163b3c80ba2877de175396efdea5310c95 100644 --- a/ecmascript/compiler/bytecode_circuit_builder.cpp +++ b/ecmascript/compiler/bytecode_circuit_builder.cpp @@ -42,7 +42,8 @@ void BytecodeCircuitBuilder::BuildRegionInfo() infoData_.resize(size); byteCodeToJSGates_.resize(size, std::vector(0)); regionsInfo_.InsertHead(0); // 0: start pc - for (iterator.GotoStart(); !iterator.Done(); ++iterator) { + iterator.GotoStart(); + while (!iterator.Done()) { auto index = iterator.Index(); auto &info = infoData_[index]; auto pc = pcOffsets_[index]; @@ -50,6 +51,7 @@ void BytecodeCircuitBuilder::BuildRegionInfo() ASSERT(!info.metaData_.IsInvalid()); BytecodeInfo::InitBytecodeInfo(this, info, pc); CollectRegionInfo(index); + ++iterator; } } @@ -131,7 +133,7 @@ void BytecodeCircuitBuilder::CollectTryCatchBlockInfo(ExceptionInfo &byteCodeExc auto catchBlockBcIndex = FindBcIndexByPc(catchBlockPc); regionsInfo_.InsertHead(catchBlockBcIndex); // try block associate catch block - byteCodeException.back().catchs.emplace_back(catchBlockPc); + byteCodeException.back().catches.emplace_back(catchBlockPc); return true; }); return true; @@ -140,11 +142,11 @@ void BytecodeCircuitBuilder::CollectTryCatchBlockInfo(ExceptionInfo &byteCodeExc void BytecodeCircuitBuilder::BuildEntryBlock() { - BytecodeRegion &entryBlock = graph_[0]; - BytecodeRegion &nextBlock = graph_[1]; + BytecodeRegion &entryBlock = RegionAt(0); + BytecodeRegion &nextBlock = RegionAt(1); entryBlock.succs.emplace_back(&nextBlock); nextBlock.preds.emplace_back(&entryBlock); - entryBlock.bytecodeIterator_.Reset(this, INVALID_INDEX, INVALID_INDEX); + entryBlock.bytecodeIterator_.Reset(this, 0, BytecodeIterator::INVALID_INDEX); } void BytecodeCircuitBuilder::BuildRegions(const ExceptionInfo &byteCodeException) @@ -155,7 +157,10 @@ void BytecodeCircuitBuilder::BuildRegions(const ExceptionInfo &byteCodeException // 1 : entry block. if the loop head is in the first bb block, the variables used in the head cannot correctly // generate Phi nodes through the dominator-tree algorithm, resulting in an infinite loop. Therefore, an empty // BB block is generated as an entry block - graph_.resize(blockSize + 1); + graph_.resize(blockSize + 1, nullptr); + for (size_t i = 0; i < graph_.size(); i++) { + graph_[i] = circuit_->chunk()->New(circuit_->chunk()); + } // build entry block BuildEntryBlock(); @@ -167,7 +172,7 @@ void BytecodeCircuitBuilder::BuildRegions(const ExceptionInfo &byteCodeException curBlock.id = blockId; curBlock.start = item.GetStartBcIndex(); if (blockId != 1) { - auto &prevBlock = graph_[blockId - 1]; + auto &prevBlock = RegionAt(blockId - 1); prevBlock.end = curBlock.start - 1; prevBlock.bytecodeIterator_.Reset(this, prevBlock.start, prevBlock.end); // fall through @@ -178,7 +183,7 @@ void BytecodeCircuitBuilder::BuildRegions(const ExceptionInfo &byteCodeException } blockId++; } - auto &lastBlock = graph_[blockId - 1]; // 1: last block + auto &lastBlock = RegionAt(blockId - 1); // 1: last block lastBlock.end = GetLastBcIndex(); lastBlock.bytecodeIterator_.Reset(this, lastBlock.start, lastBlock.end); @@ -195,17 +200,22 @@ void BytecodeCircuitBuilder::BuildRegions(const ExceptionInfo &byteCodeException if (byteCodeException.size() != 0) { BuildCatchBlocks(byteCodeException); } + UpdateCFG(); + if (HasTryCatch()) { + CollectTryPredsInfo(); + } + RemoveUnreachableRegion(); if (IsLogEnabled()) { - PrintGraph("Build Basic Block"); + PrintGraph("Update CFG"); } - ComputeDominatorTree(); + BuildCircuit(); } void BytecodeCircuitBuilder::BuildCatchBlocks(const ExceptionInfo &byteCodeException) { // try catch block associate for (size_t i = 0; i < graph_.size(); i++) { - auto &bb = graph_[i]; + auto &bb = RegionAt(i); auto startIndex = bb.start; const auto pc = pcOffsets_[startIndex]; for (auto it = byteCodeException.cbegin(); it != byteCodeException.cend(); it++) { @@ -213,12 +223,12 @@ void BytecodeCircuitBuilder::BuildCatchBlocks(const ExceptionInfo &byteCodeExcep continue; } // try block interval - const auto &catchs = it->catchs; // catchs start pc + const auto &catches = it->catches; // catches start pc for (size_t j = i + 1; j < graph_.size(); j++) { - auto &catchBB = graph_[j]; + auto &catchBB = RegionAt(j); const auto catchStart = pcOffsets_[catchBB.start]; - if (std::find(catchs.cbegin(), catchs.cend(), catchStart) != catchs.cend()) { - bb.catchs.insert(bb.catchs.cbegin(), &catchBB); + if (std::find(catches.cbegin(), catches.cend(), catchStart) != catches.cend()) { + bb.catches.insert(bb.catches.cbegin(), &catchBB); bb.succs.emplace_back(&catchBB); catchBB.preds.emplace_back(&bb); } @@ -231,279 +241,126 @@ void BytecodeCircuitBuilder::BuildCatchBlocks(const ExceptionInfo &byteCodeExcep } } -void BytecodeCircuitBuilder::ComputeDominatorTree() +void BytecodeCircuitBuilder::CollectTryPredsInfo() { - // Construct graph backward order - std::unordered_map bbIdToDfsTimestamp; - std::unordered_map dfsFatherIdx; - std::unordered_map bbDfsTimestampToIdx; - std::vector basicBlockList; - size_t timestamp = 0; - std::deque pendingList; - std::vector visited(graph_.size(), 0); - auto basicBlockId = graph_[0].id; - visited[graph_[0].id] = 1; - pendingList.emplace_back(basicBlockId); - while (!pendingList.empty()) { - size_t curBlockId = pendingList.back(); - pendingList.pop_back(); - basicBlockList.emplace_back(curBlockId); - bbIdToDfsTimestamp[curBlockId] = timestamp++; - for (const auto &succBlock: graph_[curBlockId].succs) { - if (visited[succBlock->id] == 0) { - visited[succBlock->id] = 1; - pendingList.emplace_back(succBlock->id); - dfsFatherIdx[succBlock->id] = bbIdToDfsTimestamp[curBlockId]; + for (size_t i = 0; i < graph_.size(); i++) { + auto &bb = RegionAt(i); + if (bb.catches.empty()) { + continue; + } else if (bb.catches.size() > 1) { // 1: cache size + for (auto it = bb.catches.begin() + 1; it != bb.catches.end();) { // 1: invalid catch bb + bb.EraseThisBlock((*it)->trys); + it = bb.catches.erase(it); } } - } - - for (size_t idx = 0; idx < basicBlockList.size(); idx++) { - bbDfsTimestampToIdx[basicBlockList[idx]] = idx; - } - RemoveDeadRegions(bbIdToDfsTimestamp); - std::vector immDom(basicBlockList.size()); // immediate dominator with dfs order index - std::vector semiDom(basicBlockList.size()); - std::vector realImmDom(graph_.size()); // immediate dominator with real index - std::vector > semiDomTree(basicBlockList.size()); - { - std::vector parent(basicBlockList.size()); - std::iota(parent.begin(), parent.end(), 0); - std::vector minIdx(basicBlockList.size()); - std::function unionFind = [&] (size_t idx) -> size_t { - if (parent[idx] == idx) return idx; - size_t unionFindSetRoot = unionFind(parent[idx]); - if (semiDom[minIdx[idx]] > semiDom[minIdx[parent[idx]]]) { - minIdx[idx] = minIdx[parent[idx]]; - } - return parent[idx] = unionFindSetRoot; - }; - auto merge = [&] (size_t fatherIdx, size_t sonIdx) -> void { - size_t parentFatherIdx = unionFind(fatherIdx); - size_t parentSonIdx = unionFind(sonIdx); - parent[parentSonIdx] = parentFatherIdx; - }; - std::iota(semiDom.begin(), semiDom.end(), 0); - semiDom[0] = semiDom.size(); - for (size_t idx = basicBlockList.size() - 1; idx >= 1; idx--) { - for (const auto &preBlock : graph_[basicBlockList[idx]].preds) { - if (bbDfsTimestampToIdx[preBlock->id] < idx) { - semiDom[idx] = std::min(semiDom[idx], bbDfsTimestampToIdx[preBlock->id]); - } else { - unionFind(bbDfsTimestampToIdx[preBlock->id]); - semiDom[idx] = std::min(semiDom[idx], semiDom[minIdx[bbDfsTimestampToIdx[preBlock->id]]]); - } - } - for (const auto & succDomIdx : semiDomTree[idx]) { - unionFind(succDomIdx); - if (idx == semiDom[minIdx[succDomIdx]]) { - immDom[succDomIdx] = idx; - } else { - immDom[succDomIdx] = minIdx[succDomIdx]; + EnumerateBlock(bb, [&bb](const BytecodeInfo &bytecodeInfo) -> bool { + if (bytecodeInfo.IsGeneral()) { + // if block which can throw exception has serval catchs block, + // only the innermost catch block is useful + ASSERT(bb.catches.size() == 1); // 1: cache size + if (!bytecodeInfo.NoThrow()) { + bb.catches.at(0)->numOfStatePreds++; } } - minIdx[idx] = idx; - merge(dfsFatherIdx[basicBlockList[idx]], idx); - semiDomTree[semiDom[idx]].emplace_back(idx); - } - for (size_t idx = 1; idx < basicBlockList.size(); idx++) { - if (immDom[idx] != semiDom[idx]) { - immDom[idx] = immDom[immDom[idx]]; - } - realImmDom[basicBlockList[idx]] = basicBlockList[immDom[idx]]; - } - semiDom[0] = 0; - } - - if (IsLogEnabled()) { - PrintGraph("Computed Dom Trees"); - } - - BuildImmediateDominator(realImmDom); -} - -void BytecodeCircuitBuilder::BuildImmediateDominator(const std::vector &immDom) -{ - graph_[0].iDominator = &graph_[0]; - for (size_t i = 1; i < immDom.size(); i++) { - auto dominatedBlock = &graph_[i]; - if (dominatedBlock->isDead) { - continue; - } - auto immDomBlock = &graph_[immDom[i]]; - dominatedBlock->iDominator = immDomBlock; - } - - for (auto &block : graph_) { - if (block.isDead) { - continue; - } - if (block.iDominator->id != block.id) { - block.iDominator->immDomBlocks.emplace_back(&block); - } + return true; + }); } - - ComputeDomFrontiers(immDom); - InsertPhi(); - UpdateCFG(); - BuildCircuit(); } -void BytecodeCircuitBuilder::ComputeDomFrontiers(const std::vector &immDom) +void BytecodeCircuitBuilder::RemoveUnusedPredsInfo(BytecodeRegion& bb) { - std::vector> domFrontiers(immDom.size()); - for (auto &bb : graph_) { - if (bb.isDead) { - continue; - } - if (bb.preds.size() < 2) { // 2: pred num - continue; - } - for (size_t i = 0; i < bb.preds.size(); i++) { - auto runner = bb.preds[i]; - while (runner->id != immDom[bb.id]) { - domFrontiers[runner->id].insert(&bb); - runner = &graph_[immDom[runner->id]]; + EnumerateBlock(bb, [&bb](const BytecodeInfo &bytecodeInfo) -> bool { + if (bytecodeInfo.IsGeneral()) { + ASSERT(bb.catches.size() == 1); // 1: cache size + if (!bytecodeInfo.NoThrow()) { + bb.catches.at(0)->numOfStatePreds--; } } - } - - for (size_t i = 0; i < domFrontiers.size(); i++) { - for (auto iter = domFrontiers[i].cbegin(); iter != domFrontiers[i].cend(); iter++) { - graph_[i].domFrontiers.emplace_back(*iter); - } - } + return true; + }); } -void BytecodeCircuitBuilder::RemoveDeadRegions(const std::unordered_map &bbIdToDfsTimestamp) +void BytecodeCircuitBuilder::ClearUnreachableRegion(ChunkVector& pendingList) { - for (auto &block: graph_) { - std::vector newPreds; - for (auto &bb : block.preds) { - if (bbIdToDfsTimestamp.count(bb->id)) { - newPreds.emplace_back(bb); - } + auto bb = pendingList.back(); + pendingList.pop_back(); + for (auto it = bb->preds.begin(); it != bb->preds.end(); it++) { + if ((*it)->numOfStatePreds != 0) { + bb->EraseThisBlock((*it)->succs); } - block.preds = newPreds; } - - for (auto &block : graph_) { - block.isDead = !bbIdToDfsTimestamp.count(block.id); - if (block.isDead) { - block.succs.clear(); + for (auto it = bb->succs.begin(); it != bb->succs.end(); it++) { + auto bbNext = *it; + if (bbNext->numOfStatePreds != 0) { + bb->EraseThisBlock(bbNext->preds); + bbNext->numOfStatePreds--; + if (bbNext->numOfStatePreds == 0) { + pendingList.emplace_back(bbNext); + } } } -} - -void BytecodeCircuitBuilder::InsertPhi() -{ - std::unordered_map> defsitesInfo; // - for (auto &bb : graph_) { - if (bb.isDead) { - continue; + for (auto it = bb->trys.begin(); it != bb->trys.end(); it++) { + if ((*it)->numOfStatePreds != 0) { + bb->EraseThisBlock((*it)->catches); } - EnumerateBlock(bb, [this, &defsitesInfo, &bb] - (const BytecodeInfo &bytecodeInfo) -> bool { - if (bytecodeInfo.IsBc(EcmaOpcode::RESUMEGENERATOR)) { - auto numVRegs = GetNumberVRegsWithEnv(); - for (size_t i = 0; i < numVRegs; i++) { - defsitesInfo[i].insert(bb.id); - } - } - for (const auto &vreg: bytecodeInfo.vregOut) { - defsitesInfo[vreg].insert(bb.id); - } - return true; - }); } - - // handle phi generated from multiple control flow in the same source block - InsertExceptionPhi(defsitesInfo); - - for (const auto&[variable, defsites] : defsitesInfo) { - std::queue workList; - for (auto blockId: defsites) { - workList.push(blockId); - } - while (!workList.empty()) { - auto currentId = workList.front(); - workList.pop(); - for (auto &block : graph_[currentId].domFrontiers) { - if (!block->phi.count(variable)) { - block->phi.insert(variable); - if (!defsitesInfo[variable].count(block->id)) { - workList.push(block->id); - } - } + for (auto it = bb->catches.begin(); it != bb->catches.end(); it++) { + auto bbNext = *it; + if (bbNext->numOfStatePreds != 0) { + RemoveUnusedPredsInfo(*bb); + bb->EraseThisBlock(bbNext->trys); + if (bbNext->numOfStatePreds == 0) { + pendingList.emplace_back(bbNext); } } } - - if (IsLogEnabled()) { - PrintGraph("Inserted Phis"); - } + bb->preds.clear(); + bb->succs.clear(); + bb->trys.clear(); + bb->catches.clear(); + numOfLiveBB_--; } -void BytecodeCircuitBuilder::InsertExceptionPhi(std::unordered_map> &defsitesInfo) +void BytecodeCircuitBuilder::RemoveUnreachableRegion() { - // handle try catch defsite - for (auto &bb : graph_) { - if (bb.isDead) { - continue; - } - if (bb.catchs.size() == 0) { - continue; - } - std::set vregs; - EnumerateBlock(bb, [this, &vregs] - (const BytecodeInfo &bytecodeInfo) -> bool { - if (bytecodeInfo.IsBc(EcmaOpcode::RESUMEGENERATOR)) { - auto numVRegs = GetNumberVRegsWithEnv(); - for (size_t i = 0; i < numVRegs; i++) { - vregs.insert(i); - } - return false; - } - for (const auto &vreg: bytecodeInfo.vregOut) { - vregs.insert(vreg); - } - return true; - }); - - for (auto &vreg : vregs) { - defsitesInfo[vreg].insert(bb.catchs.at(0)->id); - bb.catchs.at(0)->phi.insert(vreg); + numOfLiveBB_ = graph_.size(); + ChunkVector pendingList(circuit_->chunk()); + for (size_t i = 1; i < graph_.size(); i++) { // 1: skip entry bb + auto &bb = RegionAt(i); + if (bb.numOfStatePreds == 0) { + pendingList.emplace_back(&bb); } } + while (!pendingList.empty()) { + ClearUnreachableRegion(pendingList); + } } // Update CFG's predecessor, successor and try catch associations void BytecodeCircuitBuilder::UpdateCFG() { - for (auto &bb: graph_) { - if (bb.isDead) { - continue; - } + for (size_t i = 0; i < graph_.size(); i++) { + auto &bb = RegionAt(i); bb.preds.clear(); bb.trys.clear(); - std::vector newSuccs; + ChunkVector newSuccs(circuit_->chunk()); for (const auto &succ: bb.succs) { - if (std::count(bb.catchs.cbegin(), bb.catchs.cend(), succ)) { + if (std::count(bb.catches.cbegin(), bb.catches.cend(), succ)) { continue; } newSuccs.emplace_back(succ); } - bb.succs = newSuccs; + bb.succs.clear(); + bb.succs.insert(bb.succs.end(), newSuccs.begin(), newSuccs.end()); } - for (auto &bb: graph_) { - if (bb.isDead) { - continue; - } + for (size_t i = 0; i < graph_.size(); i++) { + auto &bb = RegionAt(i); for (auto &succ: bb.succs) { succ->preds.emplace_back(&bb); + succ->numOfStatePreds++; } - for (auto &catchBlock: bb.catchs) { + for (auto &catchBlock: bb.catches) { catchBlock->trys.emplace_back(&bb); } } @@ -538,163 +395,20 @@ void BytecodeCircuitBuilder::BuildCircuitArgs() void BytecodeCircuitBuilder::BuildFrameArgs() { - auto metaData = circuit_->FrameArgs(); - size_t numArgs = static_cast(FrameArgIdx::NUM_OF_ARGS); + UInt32PairAccessor accessor(0, 0); + auto metaData = circuit_->FrameArgs(accessor.ToValue()); + size_t numArgs = metaData->GetNumIns(); std::vector args(numArgs, Circuit::NullGate()); size_t idx = 0; args[idx++] = argAcc_.GetCommonArgGate(CommonArgIdx::FUNC); args[idx++] = argAcc_.GetCommonArgGate(CommonArgIdx::NEW_TARGET); args[idx++] = argAcc_.GetCommonArgGate(CommonArgIdx::THIS_OBJECT); args[idx++] = argAcc_.GetCommonArgGate(CommonArgIdx::ACTUAL_ARGC); + args[idx++] = GetPreFrameArgs(); GateRef frameArgs = circuit_->NewGate(metaData, args); argAcc_.SetFrameArgs(frameArgs); } -bool BytecodeCircuitBuilder::ShouldBeDead(BytecodeRegion &curBlock) -{ - if (curBlock.iDominator->isDead) { - return true; - } - auto isDead = false; - for (auto bbPred : curBlock.preds) { - if (!bbPred->isDead) { - return false; - } - isDead = true; - } - for (auto bbTry : curBlock.trys) { - if (!bbTry->isDead) { - return false; - } - isDead = true; - } - return isDead; -} - -void BytecodeCircuitBuilder::CollectPredsInfo() -{ - for (auto &bb: graph_) { - if (bb.isDead) { - continue; - } - bb.numOfStatePreds = 0; - } - // get number of expanded state predicates of each block - // one block-level try catch edge may correspond to multiple bytecode-level edges - for (auto &bb: graph_) { - if (bb.isDead) { - continue; - } - if (ShouldBeDead(bb)) { - bb.UpdateTryCatchInfoForDeadBlock(); - bb.isDead = true; - continue; - } - bool noThrow = true; - EnumerateBlock(bb, [&noThrow, &bb] - (const BytecodeInfo &bytecodeInfo) -> bool { - if (bytecodeInfo.IsGeneral()) { - if (!bb.catchs.empty() && !bytecodeInfo.NoThrow()) { - noThrow = false; - bb.catchs.at(0)->numOfStatePreds++; - } - } - if (bytecodeInfo.IsCondJump() && bb.succs.size() == 1) { - ASSERT(bb.succs[0]->id == bb.id + 1); - bb.succs[0]->numOfStatePreds++; - } - return true; - }); - bb.UpdateRedundantTryCatchInfo(noThrow); - bb.UpdateTryCatchInfoIfNoThrow(noThrow); - for (auto &succ: bb.succs) { - succ->numOfStatePreds++; - } - } - - CollectLoopBack(); - if (EnableLoopOptimization()) { - for (auto &head : loopHeads_) { - loopSize_ = 0; - ComputeLoopDepth(head.second); - head.first = loopSize_; - } - sort(loopHeads_.begin(), loopHeads_.end()); - } - - for (auto &bb: graph_) { - if (bb.isDead) { - continue; - } - bb.phiAcc = (bb.numOfStatePreds > 1) || (!bb.trys.empty()); - } -} - -void BytecodeCircuitBuilder::NewMerge(GateRef &state, GateRef &depend, size_t numOfIns) -{ - state = circuit_->NewGate(circuit_->Merge(numOfIns), - std::vector(numOfIns, Circuit::NullGate())); - depend = circuit_->NewGate(circuit_->DependSelector(numOfIns), - std::vector(numOfIns + 1, Circuit::NullGate())); - gateAcc_.NewIn(depend, 0, state); -} - -void BytecodeCircuitBuilder::NewLoopBegin(BytecodeRegion &bb, GateRef &state, GateRef &depend) -{ - if (bb.numOfLoopBacks > 1) { - NewMerge(bb.loopBackStateMerge, bb.loopBackDependMerge, bb.numOfLoopBacks); - } - auto loopBack = circuit_->NewGate(circuit_->LoopBack(), { Circuit::NullGate() }); - auto loopBegin = circuit_->NewGate(circuit_->LoopBegin(), { Circuit::NullGate(), loopBack }); - // 2: the number of depend inputs and it is in accord with LOOP_BEGIN - auto loopDepend = circuit_->NewGate(circuit_->DependSelector(2), - { loopBegin, Circuit::NullGate(), Circuit::NullGate() }); - if (state == circuit_->GetStateRoot()) { - ASSERT(depend == circuit_->GetDependRoot()); - gateAcc_.NewIn(loopBegin, 0, state); - gateAcc_.NewIn(loopDepend, 1, depend); - } - state = loopBegin; - depend = loopDepend; -} - -void BytecodeCircuitBuilder::NewLoopExit(GateRef &state, GateRef &depend) -{ - auto loopExit = circuit_->NewGate(circuit_->LoopExit(), - { state }); - depend = circuit_->NewGate(circuit_->LoopExitDepend(), - { loopExit, depend }); - state = loopExit; -} - -void BytecodeCircuitBuilder::TryInsertLoopExit(BytecodeRegion &bb, BytecodeRegion &bbNext, - GateRef &state, GateRef &depend) -{ - size_t diff = LoopExitCount(bb.id, bbNext.id); - for (size_t i = 0; i < diff; ++i) { - NewLoopExit(state, depend); - } -} - -void BytecodeCircuitBuilder::BuildBlockCircuitHead(BytecodeRegion &bb, GateRef &state, GateRef &depend) -{ - auto mergeCount = bb.numOfStatePreds - bb.numOfLoopBacks; - if (mergeCount == 0) { - state = circuit_->GetStateRoot(); - depend = circuit_->GetDependRoot(); - } - - if (mergeCount > 1) { - NewMerge(bb.stateMerge, bb.dependMerge, mergeCount); - state = bb.stateMerge; - depend = bb.dependMerge; - } - - if (bb.numOfLoopBacks > 0) { - NewLoopBegin(bb, state, depend); - } -} - std::vector BytecodeCircuitBuilder::CreateGateInList( const BytecodeInfo &info, const GateMetaData *meta) { @@ -730,86 +444,6 @@ std::vector BytecodeCircuitBuilder::CreateGateInList( return inList; } -void BytecodeCircuitBuilder::SetLoopBlockPred(BytecodeRegion &bb, BytecodeRegion &bbNext, - GateRef &state, GateRef &depend) -{ - ASSERT(bbNext.numOfLoopBacks > 0); - ASSERT(gateAcc_.GetOpCode(bbNext.stateCurrent) == OpCode::LOOP_BEGIN); - ASSERT(gateAcc_.GetOpCode(bbNext.dependCurrent) == OpCode::DEPEND_SELECTOR); - // loop back - if (bbNext.loopbackBlocks.count(bb.id)) { - if (bbNext.loopBackStateMerge != Circuit::NullGate()) { - ASSERT(bbNext.loopBackDependMerge != Circuit::NullGate()); - gateAcc_.NewIn(bbNext.loopBackStateMerge, bbNext.loopBackIndex, state); - gateAcc_.NewIn(bbNext.loopBackDependMerge, bbNext.loopBackIndex + 1, depend); - state = bbNext.loopBackStateMerge; - depend = bbNext.loopBackDependMerge; - } - if (bbNext.loopBackIndex == 0) { - auto loopBack = gateAcc_.GetState(bbNext.stateCurrent, 1); // 1: LoopBack - gateAcc_.NewIn(loopBack, 0, state); - gateAcc_.NewIn(bbNext.dependCurrent, 2, depend); // 2: loopback depend - } - bbNext.loopBackIndex++; - ASSERT(bbNext.loopBackIndex <= bbNext.numOfLoopBacks); - } else { - if (bbNext.stateMerge != Circuit::NullGate()) { - ASSERT(bbNext.dependMerge != Circuit::NullGate()); - gateAcc_.NewIn(bbNext.stateMerge, bbNext.forwardIndex, state); - gateAcc_.NewIn(bbNext.dependMerge, bbNext.forwardIndex + 1, depend); - state = bbNext.stateMerge; - depend = bbNext.dependMerge; - } - if (bbNext.forwardIndex == 0) { - gateAcc_.NewIn(bbNext.stateCurrent, 0, state); - gateAcc_.NewIn(bbNext.dependCurrent, 1, depend); // 1: first depend - } - bbNext.forwardIndex++; - ASSERT(bbNext.forwardIndex <= bbNext.numOfStatePreds - bbNext.numOfLoopBacks); - } -} - -void BytecodeCircuitBuilder::SetBlockPred(BytecodeRegion &bb, BytecodeRegion &bbNext, - const GateRef &state, const GateRef &depend) -{ - auto stateCur = state; - auto dependCur = depend; - - if (EnableLoopOptimization()) { - TryInsertLoopExit(bb, bbNext, stateCur, dependCur); - } - - // Init block head if not exists - if (bbNext.stateCurrent == Circuit::NullGate()) { - ASSERT(bbNext.dependCurrent == Circuit::NullGate()); - BuildBlockCircuitHead(bbNext, bbNext.stateCurrent, bbNext.dependCurrent); - // no loop head, no merge bb - if (bbNext.stateCurrent == Circuit::NullGate()) { - ASSERT(bbNext.dependCurrent == Circuit::NullGate()); - bbNext.stateCurrent = stateCur; - bbNext.dependCurrent = dependCur; - bbNext.statePredIndex++; - return; - } - } - - // loop bb - if (bbNext.numOfLoopBacks > 0) { - SetLoopBlockPred(bb, bbNext, stateCur, dependCur); - bbNext.statePredIndex++; - return; - } - - // merge bb - if (bbNext.stateMerge != Circuit::NullGate()) { - ASSERT(bbNext.dependMerge != Circuit::NullGate()); - gateAcc_.NewIn(bbNext.stateMerge, bbNext.statePredIndex, stateCur); - gateAcc_.NewIn(bbNext.dependMerge, bbNext.statePredIndex + 1, dependCur); // 1: skip state - } - bbNext.statePredIndex++; - ASSERT(bbNext.statePredIndex <= bbNext.numOfStatePreds); -} - GateRef BytecodeCircuitBuilder::NewConst(const BytecodeInfo &info) { auto opcode = info.GetOpcode(); @@ -876,14 +510,56 @@ GateRef BytecodeCircuitBuilder::NewConst(const BytecodeInfo &info) return gate; } -void BytecodeCircuitBuilder::NewJSGate(BytecodeRegion &bb, GateRef &state, GateRef &depend) +void BytecodeCircuitBuilder::MergeThrowGate(BytecodeRegion &bb, uint32_t bcIndex) +{ + auto state = frameStateBuilder_.GetCurrentState(); + auto depend = frameStateBuilder_.GetCurrentDepend(); + if (!bb.catches.empty()) { + auto ifSuccess = circuit_->NewGate(circuit_->IfSuccess(), {state}); + auto ifException = circuit_->NewGate(circuit_->IfException(), {state, depend}); + frameStateBuilder_.UpdateStateDepend(ifException, ifException); + ASSERT(bb.catches.size() == 1); // 1: one catch + auto bbNext = bb.catches.at(0); + frameStateBuilder_.MergeIntoSuccessor(bb, *bbNext); + bbNext->expandedPreds.push_back({bb.id, bcIndex, true}); + state = ifSuccess; + } + auto constant = circuit_->GetConstantGate(MachineType::I64, + JSTaggedValue::VALUE_EXCEPTION, + GateType::TaggedValue()); + circuit_->NewGate(circuit_->Return(), + { state, depend, constant, circuit_->GetReturnRoot() }); +} + +void BytecodeCircuitBuilder::MergeExceptionGete(BytecodeRegion &bb, + const BytecodeInfo& bytecodeInfo, uint32_t bcIndex) +{ + auto state = frameStateBuilder_.GetCurrentState(); + auto depend = frameStateBuilder_.GetCurrentDepend(); + auto ifSuccess = circuit_->NewGate(circuit_->IfSuccess(), {state}); + ASSERT(bb.catches.size() == 1); // 1: one catch + auto bbNext = bb.catches.at(0); + auto ifException = circuit_->NewGate(circuit_->IfException(), {state, depend}); + frameStateBuilder_.UpdateStateDepend(ifException, ifException); + frameStateBuilder_.MergeIntoSuccessor(bb, *bbNext); + if (bytecodeInfo.GetOpcode() == EcmaOpcode::CREATEASYNCGENERATOROBJ_V8) { + bbNext->expandedPreds.push_back({bb.id, bcIndex + 1, true}); // 1: next pc + } else { + bbNext->expandedPreds.push_back({bb.id, bcIndex, true}); + } + frameStateBuilder_.UpdateStateDepend(ifSuccess, depend); +} + +void BytecodeCircuitBuilder::NewJSGate(BytecodeRegion &bb) { auto &iterator = bb.GetBytecodeIterator(); const BytecodeInfo& bytecodeInfo = iterator.GetBytecodeInfo(); + GateRef state = frameStateBuilder_.GetCurrentState(); + GateRef depend = frameStateBuilder_.GetCurrentDepend(); size_t numValueInputs = bytecodeInfo.ComputeValueInputCount(); GateRef gate = 0; bool writable = !bytecodeInfo.NoSideEffects(); - bool hasFrameState = bytecodeInfo.HasFrameArgs(); + bool hasFrameState = bytecodeInfo.HasFrameState(); size_t pcOffset = GetPcOffset(iterator.Index()); auto meta = circuit_->JSBytecode(numValueInputs, bytecodeInfo.GetOpcode(), pcOffset, writable, hasFrameState); std::vector inList = CreateGateInList(bytecodeInfo, meta); @@ -898,113 +574,74 @@ void BytecodeCircuitBuilder::NewJSGate(BytecodeRegion &bb, GateRef &state, GateR jsGatesToByteCode_[gate] = iterator.Index(); gateAcc_.NewIn(gate, 0, state); gateAcc_.NewIn(gate, 1, depend); - state = gate; + frameStateBuilder_.UpdateStateDepend(gate, gate); + frameStateBuilder_.UpdateFrameValues(bytecodeInfo, iterator.Index(), gate); if (bytecodeInfo.IsThrow()) { - depend = gate; - - if (!bb.catchs.empty()) { - auto &bbNext = bb.catchs.at(0); - SetBlockPred(bb, *bbNext, gate, gate); - bbNext->expandedPreds.push_back({bb.id, iterator.Index(), true}); - } else { - auto constant = circuit_->GetConstantGate(MachineType::I64, - JSTaggedValue::VALUE_EXCEPTION, - GateType::TaggedValue()); - circuit_->NewGate(circuit_->Return(), - { state, depend, constant, circuit_->GetReturnRoot() }); - } + MergeThrowGate(bb, iterator.Index()); return; } - if (!bb.catchs.empty() && !bytecodeInfo.NoThrow()) { - auto ifSuccess = circuit_->NewGate(circuit_->IfSuccess(), {gate}); - auto ifException = circuit_->NewGate(circuit_->IfException(), {gate, gate}); - auto &bbNext = bb.catchs.at(0); - SetBlockPred(bb, *bbNext, ifException, ifException); - if (bytecodeInfo.GetOpcode() == EcmaOpcode::CREATEASYNCGENERATOROBJ_V8) { - bbNext->expandedPreds.push_back({bb.id, iterator.Index() + 1, true}); // 1: next pc - } else { - bbNext->expandedPreds.push_back({bb.id, iterator.Index(), true}); - } - state = ifSuccess; + if (!bb.catches.empty() && !bytecodeInfo.NoThrow()) { + MergeExceptionGete(bb, bytecodeInfo, iterator.Index()); } if (bytecodeInfo.IsGeneratorRelative()) { - //exclude... - if (bytecodeInfo.GetOpcode() == EcmaOpcode::SUSPENDGENERATOR_V8 || - bytecodeInfo.GetOpcode() == EcmaOpcode::ASYNCGENERATORRESOLVE_V8_V8_V8 || - bytecodeInfo.GetOpcode() == EcmaOpcode::CREATEOBJECTWITHEXCLUDEDKEYS_IMM8_V8_V8) { - auto hole = circuit_->GetConstantGate(MachineType::I64, - JSTaggedValue::VALUE_HOLE, - GateType::TaggedValue()); - uint32_t numRegs = GetNumberVRegsWithEnv(); - std::vector vec(numRegs + 1, hole); - vec[0] = depend; - GateRef saveRegs = - circuit_->NewGate(circuit_->SaveRegister(numRegs), vec); - gateAcc_.ReplaceDependIn(gate, saveRegs); - } suspendAndResumeGates_.emplace_back(gate); } - depend = gate; } -void BytecodeCircuitBuilder::NewJump(BytecodeRegion &bb, GateRef &state, GateRef &depend) +void BytecodeCircuitBuilder::NewJump(BytecodeRegion &bb) { auto &iterator = bb.GetBytecodeIterator(); const BytecodeInfo& bytecodeInfo = iterator.GetBytecodeInfo(); + GateRef state = frameStateBuilder_.GetCurrentState(); + GateRef depend = frameStateBuilder_.GetCurrentDepend(); size_t numValueInputs = bytecodeInfo.ComputeValueInputCount(); - if (bytecodeInfo.IsCondJump()) { + if (bytecodeInfo.IsCondJump() && bb.succs.size() == 2) { // 2: two succ size_t pcOffset = GetPcOffset(iterator.Index()); auto meta = circuit_->JSBytecode(numValueInputs, bytecodeInfo.GetOpcode(), pcOffset, false, false); auto numValues = meta->GetNumIns(); GateRef gate = circuit_->NewGate(meta, std::vector(numValues, Circuit::NullGate())); gateAcc_.NewIn(gate, 0, state); gateAcc_.NewIn(gate, 1, depend); + frameStateBuilder_.UpdateStateDepend(gate, gate); + frameStateBuilder_.UpdateFrameValues(bytecodeInfo, iterator.Index(), gate); + auto ifTrue = circuit_->NewGate(circuit_->IfTrue(), {gate}); auto trueRelay = circuit_->NewGate(circuit_->DependRelay(), {ifTrue, gate}); auto ifFalse = circuit_->NewGate(circuit_->IfFalse(), {gate}); auto falseRelay = circuit_->NewGate(circuit_->DependRelay(), {ifFalse, gate}); - if (bb.succs.size() == 1) { - auto &bbNext = bb.succs[0]; - ASSERT(bbNext->id == bb.id + 1); - SetBlockPred(bb, *bbNext, ifFalse, falseRelay); - SetBlockPred(bb, *bbNext, ifTrue, trueRelay); - bbNext->expandedPreds.push_back({bb.id, iterator.Index(), false}); - bbNext->expandedPreds.push_back({bb.id, iterator.Index(), false}); - } else { - ASSERT(bb.succs.size() == 2); // 2 : 2 num of successors - [[maybe_unused]] uint32_t bitSet = 0; - for (auto &bbNext: bb.succs) { - if (bbNext->id == bb.id + 1) { - SetBlockPred(bb, *bbNext, ifFalse, falseRelay); - bbNext->expandedPreds.push_back({bb.id, iterator.Index(), false}); - bitSet |= 1; - } else { - SetBlockPred(bb, *bbNext, ifTrue, trueRelay); - bbNext->expandedPreds.push_back({bb.id, iterator.Index(), false}); - bitSet |= 2; // 2:verify - } + for (auto &bbNext: bb.succs) { + if (bbNext->id == bb.id + 1) { + frameStateBuilder_.UpdateStateDepend(ifFalse, falseRelay); + frameStateBuilder_.MergeIntoSuccessor(bb, *bbNext); + bbNext->expandedPreds.push_back({bb.id, iterator.Index(), false}); + } else { + frameStateBuilder_.UpdateStateDepend(ifTrue, trueRelay); + frameStateBuilder_.MergeIntoSuccessor(bb, *bbNext); + bbNext->expandedPreds.push_back({bb.id, iterator.Index(), false}); } - ASSERT(bitSet == 3); // 3:Verify the number of successor blocks } byteCodeToJSGates_[iterator.Index()].emplace_back(gate); jsGatesToByteCode_[gate] = iterator.Index(); } else { ASSERT(bb.succs.size() == 1); auto &bbNext = bb.succs.at(0); - SetBlockPred(bb, *bbNext, state, depend); + frameStateBuilder_.MergeIntoSuccessor(bb, *bbNext); bbNext->expandedPreds.push_back({bb.id, iterator.Index(), false}); } } -void BytecodeCircuitBuilder::NewReturn(BytecodeRegion &bb, GateRef &state, GateRef &depend) +GateRef BytecodeCircuitBuilder::NewReturn(BytecodeRegion &bb) { ASSERT(bb.succs.empty()); auto &iterator = bb.GetBytecodeIterator(); const BytecodeInfo& bytecodeInfo = iterator.GetBytecodeInfo(); + GateRef state = frameStateBuilder_.GetCurrentState(); + GateRef depend = frameStateBuilder_.GetCurrentDepend(); + GateRef gate = Circuit::NullGate(); if (bytecodeInfo.GetOpcode() == EcmaOpcode::RETURN) { // handle return.dyn bytecode - auto gate = circuit_->NewGate(circuit_->Return(), + gate = circuit_->NewGate(circuit_->Return(), { state, depend, Circuit::NullGate(), circuit_->GetReturnRoot() }); byteCodeToJSGates_[iterator.Index()].emplace_back(gate); jsGatesToByteCode_[gate] = iterator.Index(); @@ -1013,464 +650,111 @@ void BytecodeCircuitBuilder::NewReturn(BytecodeRegion &bb, GateRef &state, GateR auto constant = circuit_->GetConstantGate(MachineType::I64, JSTaggedValue::VALUE_UNDEFINED, GateType::TaggedValue()); - auto gate = circuit_->NewGate(circuit_->Return(), + gate = circuit_->NewGate(circuit_->Return(), { state, depend, constant, circuit_->GetReturnRoot() }); byteCodeToJSGates_[iterator.Index()].emplace_back(gate); jsGatesToByteCode_[gate] = iterator.Index(); } + return gate; } -void BytecodeCircuitBuilder::NewByteCode(BytecodeRegion &bb, GateRef &state, GateRef &depend) +void BytecodeCircuitBuilder::NewByteCode(BytecodeRegion &bb) { auto &iterator = bb.GetBytecodeIterator(); const BytecodeInfo& bytecodeInfo = iterator.GetBytecodeInfo(); + FrameLiveOut* liveout; + auto bcId = iterator.Index(); + if (iterator.IsInRange(bcId - 1)) { + liveout = frameStateBuilder_.GetOrOCreateBCEndLiveOut(bcId - 1); + } else { + liveout = frameStateBuilder_.GetOrOCreateBBLiveOut(bb.id); + } + frameStateBuilder_.AdvanceToNextBc(bytecodeInfo, liveout, bcId); + GateRef gate = Circuit::NullGate(); if (bytecodeInfo.IsSetConstant()) { // handle bytecode command to get constants - GateRef gate = NewConst(bytecodeInfo); + gate = NewConst(bytecodeInfo); byteCodeToJSGates_[iterator.Index()].emplace_back(gate); jsGatesToByteCode_[gate] = iterator.Index(); } else if (bytecodeInfo.IsGeneral()) { // handle general ecma.* bytecodes - NewJSGate(bb, state, depend); + NewJSGate(bb); } else if (bytecodeInfo.IsJump()) { // handle conditional jump and unconditional jump bytecodes - NewJump(bb, state, depend); + NewJump(bb); } else if (bytecodeInfo.IsReturn()) { // handle return.dyn and returnundefined bytecodes - NewReturn(bb, state, depend); - } else if (!bytecodeInfo.IsDiscarded() && !bytecodeInfo.IsMov()) { + gate = NewReturn(bb); + } else if (bytecodeInfo.IsMov()) { + frameStateBuilder_.UpdateMoveValues(bytecodeInfo, iterator.Index()); + } else if (!bytecodeInfo.IsDiscarded()) { LOG_ECMA(FATAL) << "this branch is unreachable"; UNREACHABLE(); } + if (gate != Circuit::NullGate()) { + frameStateBuilder_.UpdateFrameValues(bytecodeInfo, iterator.Index(), gate); + } } void BytecodeCircuitBuilder::BuildSubCircuit() { - auto &entryBlock = graph_[0]; - BuildBlockCircuitHead(entryBlock, entryBlock.stateCurrent, entryBlock.dependCurrent); - for (auto &bbId: dfsList_) { - auto &bb = graph_[bbId]; - auto stateCur = bb.stateCurrent; - auto dependCur = bb.dependCurrent; - ASSERT(stateCur != Circuit::NullGate()); - ASSERT(dependCur != Circuit::NullGate()); + auto &entryBlock = RegionAt(0); + frameStateBuilder_.InitEntryBB(entryBlock); + auto& rpoList = frameStateBuilder_.GetRpoList(); + for (auto &bbId: rpoList) { + auto &bb = RegionAt(bbId); + frameStateBuilder_.AdvanceToNextBB(bb); if (IsEntryBlock(bb.id)) { - if (!isInline_) { - stateCur = circuit_->NewGate(circuit_->UpdateHotness(), {stateCur, dependCur}); - dependCur = stateCur; - } - auto &bbNext = graph_[bb.id + 1]; - SetBlockPred(bb, bbNext, stateCur, dependCur); + if (NeedCheckSafePointAndStackOver()) { + GateRef state = frameStateBuilder_.GetCurrentState(); + GateRef depend = frameStateBuilder_.GetCurrentDepend(); + auto stackCheck = circuit_->NewGate(circuit_->CheckSafePointAndStackOver(), {state, depend}); + bb.dependCache = stackCheck; + frameStateBuilder_.UpdateStateDepend(stackCheck, stackCheck); + } + auto &bbNext = RegionAt(bb.id + 1); + frameStateBuilder_.MergeIntoSuccessor(bb, bbNext); bbNext.expandedPreds.push_back({bb.id, bb.end, false}); continue; } if (!bb.trys.empty()) { - dependCur = circuit_->NewGate(circuit_->GetException(), - MachineType::I64, {stateCur, dependCur}, GateType::AnyType()); - bb.dependCurrent = dependCur; - } - EnumerateBlock(bb, [this, &stateCur, &dependCur, &bb] + GateRef state = frameStateBuilder_.GetCurrentState(); + GateRef depend = frameStateBuilder_.GetCurrentDepend(); + auto getException = circuit_->NewGate(circuit_->GetException(), + MachineType::I64, {state, depend}, GateType::AnyType()); + frameStateBuilder_.UpdateAccumulator(getException); + frameStateBuilder_.UpdateStateDepend(state, getException); + } + EnumerateBlock(bb, [this, &bb] (const BytecodeInfo &bytecodeInfo) -> bool { - NewByteCode(bb, stateCur, dependCur); + NewByteCode(bb); if (bytecodeInfo.IsJump() || bytecodeInfo.IsThrow()) { return false; } return true; }); - const BytecodeInfo& bytecodeInfo = GetBytecodeInfo(bb.end); - if (bytecodeInfo.needFallThrough()) { - auto &bbNext = graph_[bb.id + 1]; - SetBlockPred(bb, bbNext, stateCur, dependCur); + bool needFallThrough = true; + if (!bb.IsEmptryBlock()) { + const BytecodeInfo& bytecodeInfo = GetBytecodeInfo(bb.end); + needFallThrough = bytecodeInfo.needFallThrough(); + } + // fallThrough or empty merge bb + if (needFallThrough) { + ASSERT(bb.succs.size() == 1); // 1: fall through + auto &bbNext = RegionAt(bb.succs[0]->id); + frameStateBuilder_.MergeIntoSuccessor(bb, bbNext); bbNext.expandedPreds.push_back({bb.id, bb.end, false}); } } } -GateRef BytecodeCircuitBuilder::NewLoopBackPhi(BytecodeRegion &bb, uint16_t reg, bool acc) -{ - if (bb.numOfLoopBacks == 1) { - for (size_t i = 0; i < bb.numOfStatePreds; ++i) { - auto &[predId, predBcIdx, isException] = bb.expandedPreds.at(i); - if (bb.loopbackBlocks.count(predId)) { - return NewValueFromPredBB(bb, i, gateAcc_.GetState(bb.stateCurrent, 1), reg, acc); - } - } - UNREACHABLE(); - LOG_COMPILER(FATAL) << "this branch is unreachable"; - } - auto inList = std::vector(1 + bb.numOfLoopBacks, Circuit::NullGate()); - auto loopBackValue = circuit_->NewGate(circuit_->ValueSelector(bb.numOfLoopBacks), - MachineType::I64, inList.size(), inList.data(), GateType::AnyType()); - gateAcc_.NewIn(loopBackValue, 0, bb.loopBackStateMerge); - size_t loopBackIndex = 1; // 1: start index of value inputs - for (size_t i = 0; i < bb.numOfStatePreds; ++i) { - auto &[predId, predBcIdx, isException] = bb.expandedPreds.at(i); - if (bb.loopbackBlocks.count(predId)) { - GateRef ans = NewValueFromPredBB(bb, i, gateAcc_.GetState(bb.loopBackStateMerge, loopBackIndex - 1), - reg, acc); - gateAcc_.NewIn(loopBackValue, loopBackIndex++, ans); - } - } - return loopBackValue; -} - -size_t BytecodeCircuitBuilder::LoopExitCount(size_t from, size_t to) -{ - if (!EnableLoopOptimization()) { - return 0; - } - const auto &bb = GetBasicBlockById(from); - const auto &bbNext = GetBasicBlockById(to); - size_t headDep = ((bbNext.numOfLoopBacks > 0) && (bbNext.loopbackBlocks.count(bb.id) == 0)) ? 1 : 0; - ASSERT(bbNext.loopDepth >= headDep); - size_t nextDep = bbNext.loopDepth - headDep; - ASSERT(bb.loopDepth >= nextDep); - return bb.loopDepth > nextDep; -} - -GateRef BytecodeCircuitBuilder::NewValueFromPredBB(BytecodeRegion &bb, size_t idx, - GateRef exit, uint16_t reg, bool acc) -{ - auto &[predId, predBcIdx, isException] = bb.expandedPreds.at(idx); - if (LoopExitCount(predId, bb.id) == 0) { - return ResolveDef(predId, predBcIdx, reg, acc); - } - while (gateAcc_.GetOpCode(exit) != OpCode::LOOP_EXIT) { - exit = gateAcc_.GetState(exit); - } - if (IsLoopExitValueExists(exit, reg, acc)) { - return GetLoopExitValue(exit, reg, acc); - } - GateRef res = ResolveDef(predId, predBcIdx, reg, acc); - return NewLoopExitValue(exit, reg, acc, res); -} - -GateRef BytecodeCircuitBuilder::NewLoopForwardPhi(BytecodeRegion &bb, uint16_t reg, bool acc) -{ - auto mergeCount = bb.numOfStatePreds - bb.numOfLoopBacks; - if (mergeCount == 1) { - for (size_t i = 0; i < bb.numOfStatePreds; ++i) { - auto &[predId, predBcIdx, isException] = bb.expandedPreds.at(i); - if (!bb.loopbackBlocks.count(predId)) { - return NewValueFromPredBB(bb, i, gateAcc_.GetState(bb.stateCurrent, 0), reg, acc); - } - } - UNREACHABLE(); - LOG_COMPILER(FATAL) << "this branch is unreachable"; - } - auto inList = std::vector(1 + mergeCount, Circuit::NullGate()); - auto forwardValue = circuit_->NewGate( - circuit_->ValueSelector(mergeCount), MachineType::I64, - inList.size(), inList.data(), GateType::AnyType()); - gateAcc_.NewIn(forwardValue, 0, bb.stateMerge); - size_t forwardIndex = 1; // 1: start index of value inputs - for (size_t i = 0; i < bb.numOfStatePreds; ++i) { - auto &[predId, predBcIdx, isException] = bb.expandedPreds.at(i); - if (!bb.loopbackBlocks.count(predId)) { - GateRef ans = NewValueFromPredBB(bb, i, gateAcc_.GetState(bb.stateMerge, forwardIndex - 1), reg, acc); - gateAcc_.NewIn(forwardValue, forwardIndex++, ans); - } - } - return forwardValue; -} - -void BytecodeCircuitBuilder::NewPhi(BytecodeRegion &bb, uint16_t reg, bool acc, GateRef ¤tPhi) -{ - if (bb.numOfLoopBacks == 0) { - if (bb.numOfStatePreds == 1) { - currentPhi = NewValueFromPredBB(bb, 0, bb.stateCurrent, reg, acc); - ASSERT(currentPhi != 0); - return; - } - ASSERT(bb.stateMerge != Circuit::NullGate()); - auto inList = std::vector(1 + bb.numOfStatePreds, Circuit::NullGate()); - currentPhi = - circuit_->NewGate(circuit_->ValueSelector(bb.numOfStatePreds), MachineType::I64, - inList.size(), inList.data(), GateType::AnyType()); - gateAcc_.NewIn(currentPhi, 0, bb.stateMerge); - for (size_t i = 0; i < bb.numOfStatePreds; ++i) { - GateRef ans = NewValueFromPredBB(bb, i, gateAcc_.GetIn(bb.stateMerge, i), reg, acc); - gateAcc_.NewIn(currentPhi, i + 1, ans); - } - } else { - ASSERT(gateAcc_.GetOpCode(bb.stateCurrent) == OpCode::LOOP_BEGIN); - // 2: the number of value inputs and it is in accord with LOOP_BEGIN - currentPhi = circuit_->NewGate(circuit_->ValueSelector(2), MachineType::I64, - {bb.stateCurrent, Circuit::NullGate(), Circuit::NullGate()}, GateType::AnyType()); - auto loopBackValue = NewLoopBackPhi(bb, reg, acc); - auto forwardValue = NewLoopForwardPhi(bb, reg, acc); - gateAcc_.NewIn(currentPhi, 1, forwardValue); // 1: index of forward value input - gateAcc_.NewIn(currentPhi, 2, loopBackValue); // 2: index of loop-back value input - } - bb.phiGate.insert(currentPhi); -} - -bool BytecodeCircuitBuilder::IsLoopExitValueExists(GateRef loopExit, uint16_t reg, bool acc) -{ - if (acc) { - return loopExitToAccGate_.count(loopExit) > 0; - } else { - return loopExitToVregGate_.count(std::make_pair(loopExit, reg)) > 0; - } -} - -GateRef BytecodeCircuitBuilder::GetLoopExitValue(GateRef loopExit, uint16_t reg, bool acc) -{ - if (acc) { - return loopExitToAccGate_.at(loopExit); - } else { - return loopExitToVregGate_.at(std::make_pair(loopExit, reg)); - } -} - -GateRef BytecodeCircuitBuilder::CreateLoopExitValue(GateRef loopExit, uint16_t reg, bool acc, GateRef value) -{ - GateRef newPhi = circuit_->NewGate(circuit_->LoopExitValue(), gateAcc_.GetMachineType(value), - {loopExit, value}, gateAcc_.GetGateType(value)); - if (acc) { - return loopExitToAccGate_[loopExit] = newPhi; - } else { - auto key = std::make_pair(loopExit, reg); - return loopExitToVregGate_[key] = newPhi; - } -} - -GateRef BytecodeCircuitBuilder::NewLoopExitValue(GateRef loopExit, uint16_t reg, bool acc, GateRef value) -{ - ASSERT(gateAcc_.GetOpCode(loopExit) == OpCode::LOOP_EXIT); - ChunkVector exitList(circuit_->chunk()); - while (gateAcc_.GetOpCode(loopExit) == OpCode::LOOP_EXIT) { - exitList.push_back(loopExit); - loopExit = gateAcc_.GetState(loopExit); - } - while (!exitList.empty()) { - GateRef exit = exitList.back(); - value = CreateLoopExitValue(exit, reg, acc, value); - exitList.pop_back(); - } - return value; -} - -GateRef BytecodeCircuitBuilder::ResolveDef(const BytecodeRegion &bb, int32_t bcId, const uint16_t reg, const bool acc) -{ - // Ensure that bcId is not negative - if (bcId == 0) { - return ResolveDef(bb.id, bcId, reg, acc, false); - } - return ResolveDef(bb.id, bcId - 1, reg, acc); -} - -// recursive variables renaming algorithm -GateRef BytecodeCircuitBuilder::ResolveDef(const size_t bbId, int32_t bcId, - const uint16_t reg, const bool acc, bool needIter) -{ - auto tmpReg = reg; - // find def-site in bytecodes of basic block - auto ans = Circuit::NullGate(); - auto &bb = graph_.at(bbId); - GateType type = GateType::AnyType(); - auto tmpAcc = acc; - - if (needIter) { - BytecodeIterator iterator(this, bb.start, bcId); - for (iterator.Goto(bcId); !iterator.Done(); --iterator) { - const BytecodeInfo& curInfo = iterator.GetBytecodeInfo(); - // original bc use acc as input && current bc use acc as output - bool isTransByAcc = tmpAcc && curInfo.AccOut(); - // 0 : the index in vreg-out list - bool isTransByVreg = (!tmpAcc && curInfo.IsOut(tmpReg, 0)); - if (isTransByAcc || isTransByVreg) { - if (curInfo.IsMov()) { - tmpAcc = curInfo.AccIn(); - if (!curInfo.inputs.empty()) { - ASSERT(!tmpAcc); - ASSERT(curInfo.inputs.size() == 1); - tmpReg = std::get(curInfo.inputs.at(0)).GetId(); - } - if (HasTypes()) { - type = typeRecorder_.UpdateType(iterator.Index(), type); - } - } else { - ans = byteCodeToJSGates_.at(iterator.Index()).at(0); - auto oldType = gateAcc_.GetGateType(ans); - if (!type.IsAnyType() && oldType.IsAnyType()) { - typeRecorder_.GetOrUpdatePGOType(tsManager_, gateAcc_.TryGetPcOffset(ans), type); - gateAcc_.SetGateType(ans, type); - } - break; - } - } - if (curInfo.GetOpcode() != EcmaOpcode::RESUMEGENERATOR) { - continue; - } - // New RESTORE_REGISTER HIR, used to restore the register content when processing resume instruction. - // New SAVE_REGISTER HIR, used to save register content when processing suspend instruction. - auto resumeGate = byteCodeToJSGates_.at(iterator.Index()).at(0); - ans = GetExistingRestore(resumeGate, tmpReg); - if (ans != Circuit::NullGate()) { - break; - } - ans = circuit_->NewGate(circuit_->RestoreRegister(tmpReg), MachineType::I64, - { resumeGate }, GateType::AnyType()); - SetExistingRestore(resumeGate, tmpReg, ans); - auto saveRegGate = ResolveDef(bbId, iterator.Index() - 1, tmpReg, tmpAcc); - [[maybe_unused]] EcmaOpcode opcode = Bytecodes::GetOpcode(iterator.PeekPrevPc(2)); // 2: prev bc - ASSERT(opcode == EcmaOpcode::SUSPENDGENERATOR_V8 || opcode == EcmaOpcode::ASYNCGENERATORRESOLVE_V8_V8_V8); - GateRef suspendGate = byteCodeToJSGates_.at(iterator.Index() - 2).at(0); // 2: prev bc - GateRef saveRegs = gateAcc_.GetDep(suspendGate); - gateAcc_.ReplaceValueIn(saveRegs, saveRegGate, tmpReg); - break; - } - } - // find GET_EXCEPTION gate if this is a catch block - if (ans == Circuit::NullGate() && tmpAcc) { - if (!bb.trys.empty()) { - GateRef getExceptionGate = bb.dependCurrent; - ASSERT(gateAcc_.GetOpCode(getExceptionGate) == OpCode::GET_EXCEPTION); - ASSERT(getExceptionGate != Circuit::NullGate()); - ans = getExceptionGate; - } - } - // find def-site in value selectors of vregs - if (ans == Circuit::NullGate() && !tmpAcc && bb.phi.count(tmpReg)) { - if (!bb.vregToValueGate.count(tmpReg)) { - NewPhi(bb, tmpReg, tmpAcc, bb.vregToValueGate[tmpReg]); - } - ans = bb.vregToValueGate.at(tmpReg); - } - // find def-site in value selectors of acc - if (ans == Circuit::NullGate() && tmpAcc && bb.phiAcc) { - if (bb.valueSelectorAccGate == Circuit::NullGate()) { - NewPhi(bb, tmpReg, tmpAcc, bb.valueSelectorAccGate); - } - ans = bb.valueSelectorAccGate; - } - if (ans == Circuit::NullGate() && IsEntryBlock(bbId)) { // entry block - // find def-site in function args - ASSERT(!tmpAcc); - if (tmpReg == GetEnvVregIdx()) { - ans = gateAcc_.GetInitialEnvGate(argAcc_.GetCommonArgGate(CommonArgIdx::FUNC)); - } else if (argAcc_.ArgGateNotExisted(tmpReg)) { - // when GetArgGate fail, return hole - ans = circuit_->GetConstantGate(MachineType::I64, - JSTaggedValue::VALUE_HOLE, - GateType::TaggedValue()); - } else { - ans = argAcc_.GetArgGate(tmpReg); - } - return ans; - } - if (EnableLoopOptimization()) { - // find def-site in value selectors of vregs - if (ans == Circuit::NullGate() && !tmpAcc) { - if (!bb.vregToValueGate.count(tmpReg)) { - bb.vregToValueGate[tmpReg] = Circuit::NullGate(); - NewPhi(bb, tmpReg, tmpAcc, bb.vregToValueGate[tmpReg]); - } else if (bb.vregToValueGate.at(tmpReg) == Circuit::NullGate()) { - NewPhi(bb, tmpReg, tmpAcc, bb.vregToValueGate[tmpReg]); - } - ans = bb.vregToValueGate.at(tmpReg); - } - // find def-site in value selectors of acc - if (ans == Circuit::NullGate() && tmpAcc) { - if (bb.valueSelectorAccGate == Circuit::NullGate()) { - NewPhi(bb, tmpReg, tmpAcc, bb.valueSelectorAccGate); - } - ans = bb.valueSelectorAccGate; - } - } - if (ans == Circuit::NullGate()) { - // recursively find def-site in dominator block - GateRef res = ResolveDef(bb.iDominator->id, bb.iDominator->end, tmpReg, tmpAcc); - return res; - } else { - // def-site already found - return ans; - } -} - void BytecodeCircuitBuilder::BuildCircuit() { // create arg gates array BuildCircuitArgs(); - CollectPredsInfo(); + frameStateBuilder_.DoBytecodeAnalysis(); // build states sub-circuit of each block BuildSubCircuit(); - // verification of soundness of CFG - for (auto &bb: graph_) { - if (bb.isDead) { - continue; - } - ASSERT(bb.statePredIndex == bb.numOfStatePreds); - ASSERT(bb.loopBackIndex == bb.numOfLoopBacks); - if (bb.numOfLoopBacks) { - ASSERT(bb.forwardIndex == bb.numOfStatePreds - bb.numOfLoopBacks); - } - // resolve def-site of virtual regs and set all value inputs - EnumerateBlock(bb, [&](const BytecodeInfo &bytecodeInfo) -> bool { - auto &iterator = bb.GetBytecodeIterator(); - const auto bcIndex = iterator.Index(); - const auto bbIndex = bb.id; - GateRef gate = GetGateByBcIndex(bcIndex); - if (gate == Circuit::NullGate()) { - return true; - } - if (gateAcc_.IsConstant(gate)) { - return true; - } - - auto type = typeRecorder_.GetType(bcIndex); - if (HasValidType(type)) { - gateAcc_.SetGateType(gate, type); - } - auto pgoType = typeRecorder_.GetOrUpdatePGOType(tsManager_, gateAcc_.TryGetPcOffset(gate), type); - gateAcc_.TrySetPGOType(gate, pgoType); - - auto valueCount = gateAcc_.GetInValueCount(gate); - [[maybe_unused]] size_t numValueInputs = bytecodeInfo.ComputeValueInputCount(); - [[maybe_unused]] size_t numValueOutputs = bytecodeInfo.ComputeOutCount(); - // RETURNUNDEFINED has value input, but not from acc - ASSERT(numValueInputs == valueCount || bytecodeInfo.GetOpcode() == EcmaOpcode::RETURNUNDEFINED); - ASSERT(numValueOutputs <= 1 + (bytecodeInfo.EnvOut() ? 1 : 0)); - auto valueStarts = gateAcc_.GetInValueStarts(gate); - for (size_t valueIdx = 0; valueIdx < valueCount; valueIdx++) { - auto inIdx = valueIdx + valueStarts; - if (!gateAcc_.IsInGateNull(gate, inIdx)) { - continue; - } - if (bytecodeInfo.GetOpcode() == EcmaOpcode::CREATEOBJECTWITHEXCLUDEDKEYS_IMM8_V8_V8) { - GateRef depIn = gateAcc_.GetDep(gate); - size_t depCount = gateAcc_.GetNumValueIn(depIn); - GateRef defVreg = Circuit::NullGate(); - for (size_t idx = 0; idx < depCount; idx++) { - defVreg = ResolveDef(bb, bcIndex, idx, false); - gateAcc_.ReplaceValueIn(depIn, defVreg, idx); - } - } - if (valueIdx < bytecodeInfo.inputs.size()) { - auto vregId = std::get(bytecodeInfo.inputs.at(valueIdx)).GetId(); - GateRef defVreg = Circuit::NullGate(); - if (IsFirstBCEnvIn(bbIndex, bcIndex, vregId)) { - defVreg = gateAcc_.GetInitialEnvGate(argAcc_.GetCommonArgGate(CommonArgIdx::FUNC)); - } else { - defVreg = ResolveDef(bb, bcIndex, vregId, false); - } - gateAcc_.NewIn(gate, inIdx, defVreg); - } else { - GateRef defAcc = ResolveDef(bb, bcIndex, 0, true); - gateAcc_.NewIn(gate, inIdx, defAcc); - } - } - return true; - }); - } - - if (IsTypeLoweringEnabled()) { - frameStateBuilder_.BuildFrameState(); - } - - gateAcc_.EliminateRedundantPhi(); - if (IsLogEnabled()) { PrintGraph("Bytecode2Gate"); LOG_COMPILER(INFO) << "\033[34m" << "============= " @@ -1482,115 +766,12 @@ void BytecodeCircuitBuilder::BuildCircuit() } } -GateRef BytecodeCircuitBuilder::GetExistingRestore(GateRef resumeGate, uint16_t tmpReg) const -{ - auto pr = std::make_pair(resumeGate, tmpReg); - if (resumeRegToRestore_.count(pr)) { - return resumeRegToRestore_.at(pr); - } - return Circuit::NullGate(); -} - -void BytecodeCircuitBuilder::SetExistingRestore(GateRef resumeGate, uint16_t tmpReg, GateRef restoreGate) -{ - auto pr = std::make_pair(resumeGate, tmpReg); - resumeRegToRestore_[pr] = restoreGate; -} - -void BytecodeCircuitBuilder::CollectLoopBack() -{ - auto size = GetBasicBlockCount(); - ChunkVector workList(circuit_->chunk()); - ChunkVector visitState(circuit_->chunk()); - visitState.resize(size, VisitState::UNVISITED); - size_t entryId = 0; // entry id - workList.emplace_back(entryId); - while (!workList.empty()) { - size_t bbId = workList.back(); - auto &bb = GetBasicBlockById(bbId); - if (visitState[bbId] == VisitState::UNVISITED) { - dfsList_.emplace_back(bbId); - visitState[bbId] = VisitState::PENDING; - } - bool allVisited = true; - - for (const auto &succBlock: bb.succs) { - size_t succId = succBlock->id; - if (visitState[succId] == VisitState::UNVISITED) { - // dfs - workList.emplace_back(succId); - allVisited = false; - break; - } else if (visitState[succId] == VisitState::PENDING) { - // back edge - CountLoopBackEdge(bbId, succId); - } - } - - for (const auto &succBlock: bb.catchs) { - size_t succId = succBlock->id; - if (visitState[succId] == VisitState::UNVISITED) { - // dfs - workList.emplace_back(succId); - allVisited = false; - break; - } else if (visitState[succId] == VisitState::PENDING) { - // back edge - CountLoopBackEdge(bbId, succId); - } - } - if (allVisited) { - workList.pop_back(); - visitState[bbId] = VisitState::VISITED; - } - } -} - -void BytecodeCircuitBuilder::CountLoopBackEdge(size_t fromId, size_t toId) -{ - auto &toBlock = GetBasicBlockById(toId); - if (toBlock.numOfLoopBacks == 0) { - loopHeads_.emplace_back(std::make_pair(0, toId)); - } - toBlock.loopbackBlocks.insert(fromId); - toBlock.numOfLoopBacks = toBlock.loopbackBlocks.size(); -} - -void BytecodeCircuitBuilder::ComputeLoopDepth(size_t loopHead) -{ - ChunkSet visited (circuit_->chunk()); - ChunkQueue workList (circuit_->chunk()); - visited.insert(loopHead); - auto &headBB = GetBasicBlockById(loopHead); - headBB.loopDepth++; - for (auto loopBack : headBB.loopbackBlocks) { - workList.push(loopBack); - } - while (!workList.empty()) { - size_t cur = workList.front(); - workList.pop(); - if (visited.count(cur) > 0) { - continue; - } - visited.insert(cur); - auto &curBB = GetBasicBlockById(cur); - curBB.loopDepth++; - for (const auto& pred : curBB.preds) { - workList.push(pred->id); - } - for (const auto& pred : curBB.trys) { - workList.push(pred->id); - } - } - loopSize_ = visited.size(); -} - void BytecodeCircuitBuilder::PrintGraph(const char* title) { LOG_COMPILER(INFO) << "======================== " << title << " ========================"; for (size_t i = 0; i < graph_.size(); i++) { - BytecodeRegion& bb = graph_[i]; - if (bb.isDead) { + BytecodeRegion& bb = RegionAt(i); + if (!IsEntryBlock(bb.id) && bb.numOfStatePreds == 0) { LOG_COMPILER(INFO) << "B" << bb.id << ": ;preds= invalid BB"; LOG_COMPILER(INFO) << "\tBytecodePC: [" << std::to_string(bb.start) << ", " << std::to_string(bb.end) << ")"; @@ -1614,9 +795,9 @@ void BytecodeCircuitBuilder::PrintGraph(const char* title) } LOG_COMPILER(INFO) << log1; - for (size_t j = 0; j < bb.catchs.size(); j++) { - LOG_COMPILER(INFO) << "\tcatch [: " << std::to_string(bb.catchs[j]->start) << ", " - << std::to_string(bb.catchs[j]->end) << ")"; + for (size_t j = 0; j < bb.catches.size(); j++) { + LOG_COMPILER(INFO) << "\tcatch [: " << std::to_string(bb.catches[j]->start) << ", " + << std::to_string(bb.catches[j]->end) << ")"; } std::string log2("\tTrys: "); @@ -1625,32 +806,6 @@ void BytecodeCircuitBuilder::PrintGraph(const char* title) } LOG_COMPILER(INFO) << log2; - std::string log3 = "\tDom: "; - for (size_t j = 0; j < bb.immDomBlocks.size(); j++) { - log3 += "B" + std::to_string(bb.immDomBlocks[j]->id) + std::string(", "); - } - LOG_COMPILER(INFO) << log3; - - if (bb.iDominator) { - LOG_COMPILER(INFO) << "\tIDom B" << bb.iDominator->id; - } - - std::string log4("\tDom Frontiers: "); - for (const auto &frontier: bb.domFrontiers) { - log4 += std::to_string(frontier->id) + " , "; - } - LOG_COMPILER(INFO) << log4; - - std::string log5("\tPhi: "); - for (auto variable: bb.phi) { - log5 += std::to_string(variable) + " , "; - } - LOG_COMPILER(INFO) << log5; - - std::string log6("\tLoop Depth: "); - log6 += std::to_string(bb.loopDepth); - LOG_COMPILER(INFO) << log6; - PrintBytecodeInfo(bb); LOG_COMPILER(INFO) << ""; } @@ -1658,9 +813,6 @@ void BytecodeCircuitBuilder::PrintGraph(const char* title) void BytecodeCircuitBuilder::PrintBytecodeInfo(BytecodeRegion& bb) { - if (bb.isDead) { - return; - } if (IsEntryBlock(bb.id)) { LOG_COMPILER(INFO) << "\tBytecode[] = Empty"; return; diff --git a/ecmascript/compiler/bytecode_circuit_builder.h b/ecmascript/compiler/bytecode_circuit_builder.h index 5e2e8ad5de43fe552a5ba97020988bbc99ccf899..86a7ddcf98ce2e7c09155489f8ef73a9671a5f5d 100644 --- a/ecmascript/compiler/bytecode_circuit_builder.h +++ b/ecmascript/compiler/bytecode_circuit_builder.h @@ -42,10 +42,10 @@ namespace panda::ecmascript::kungfu { struct ExceptionItem { uint8_t* startPc; uint8_t* endPc; - std::vector catchs; + std::vector catches; - ExceptionItem(uint8_t* startPc, uint8_t* endPc, std::vector catchs) - : startPc(startPc), endPc(endPc), catchs(catchs) {} + ExceptionItem(uint8_t* startPc, uint8_t* endPc, std::vector catches) + : startPc(startPc), endPc(endPc), catches(catches) {} }; using ExceptionInfo = std::vector; @@ -160,37 +160,22 @@ struct BytecodeRegion { size_t id {0}; uint32_t start {0}; uint32_t end {0}; - std::vector preds {}; // List of predessesor blocks - std::vector succs {}; // List of successors blocks - std::vector trys {}; // List of trys blocks - std::vector catchs {}; // List of catches blocks - std::vector immDomBlocks {}; // List of dominated blocks - BytecodeRegion *iDominator {nullptr}; // Block that dominates the current block - std::vector domFrontiers {}; // List of dominace frontiers - std::set loopbackBlocks {}; // List of loopback block ids - bool isDead {false}; - bool phiAcc {false}; - size_t loopDepth {0}; - std::set phi {}; // phi node - std::set phiGate {}; // phi gate + ChunkVector preds; // List of predessesor blocks + ChunkVector succs; // List of successors blocks + ChunkVector trys; // List of trys blocks + ChunkVector catches; // List of catches blocks size_t numOfStatePreds {0}; - size_t numOfLoopBacks {0}; - size_t statePredIndex {0}; - size_t forwardIndex {0}; - size_t loopBackIndex {0}; - std::vector> expandedPreds {}; - GateRef loopExitState {Circuit::NullGate()}; - GateRef loopExitDepend {Circuit::NullGate()}; - GateRef stateCurrent {Circuit::NullGate()}; - GateRef dependCurrent {Circuit::NullGate()}; - GateRef stateMerge {Circuit::NullGate()}; - GateRef dependMerge {Circuit::NullGate()}; - GateRef loopBackStateMerge {Circuit::NullGate()}; - GateRef loopBackDependMerge {Circuit::NullGate()}; - std::unordered_map vregToValueGate {}; // corresponding value gates of vregs - GateRef valueSelectorAccGate {Circuit::NullGate()}; + size_t loopNumber {0}; + size_t loopIndex {0}; + ChunkVector> expandedPreds; + GateRef dependCache {Circuit::NullGate()}; BytecodeIterator bytecodeIterator_ {}; + BytecodeRegion(Chunk* chunk) : preds(chunk), succs(chunk), + trys(chunk), catches(chunk), expandedPreds(chunk) + { + } + BytecodeIterator &GetBytecodeIterator() { return bytecodeIterator_; } @@ -202,54 +187,28 @@ struct BytecodeRegion { void SortCatches() { - if (catchs.size() > 1) { - std::sort(catchs.begin(), catchs.end(), [](BytecodeRegion *first, BytecodeRegion *second) { + if (catches.size() > 1) { + std::sort(catches.begin(), catches.end(), [](BytecodeRegion *first, BytecodeRegion *second) { return first->start < second->start; }); } } - void UpdateTryCatchInfoForDeadBlock() - { - // Try-Catch infos of dead block should be cleared - UpdateTryCatchInfo(); - isDead = true; - } - - void UpdateRedundantTryCatchInfo(bool noThrow) - { - // if block which can throw exception has serval catchs block, only the innermost catch block is useful - if (!noThrow && catchs.size() > 1) { - size_t innerMostIndex = 1; - UpdateTryCatchInfo(innerMostIndex); - } - } - - void UpdateTryCatchInfoIfNoThrow(bool noThrow) + void EraseThisBlock(ChunkVector &blocks) { - // if block has no general insts, try-catch infos of it should be cleared - if (noThrow && !catchs.empty()) { - UpdateTryCatchInfo(); + auto it = std::find(blocks.begin(), blocks.end(), this); + if (it != blocks.end()) { + blocks.erase(it); } } -private: - void UpdateTryCatchInfo(size_t index = 0) + bool IsEmptryBlock() const { - for (auto catchBlock = catchs.begin() + index; catchBlock != catchs.end(); catchBlock++) { - auto tryBlock = std::find((*catchBlock)->trys.begin(), (*catchBlock)->trys.end(), this); - if (tryBlock != (*catchBlock)->trys.end()) { - (*catchBlock)->trys.erase(tryBlock); - } - if ((*catchBlock)->trys.size() == 0) { - (*catchBlock)->isDead = true; - } - } - catchs.erase(catchs.begin() + index, catchs.end()); + return end == static_cast(BytecodeIterator::INVALID_INDEX); } }; -using BytecodeGraph = std::vector; +using BytecodeGraph = ChunkVector; class BytecodeCircuitBuilder { public: @@ -265,19 +224,20 @@ public: std::string name, const CString &recordName, PGOProfilerDecoder *decoder, - bool isInline) - : tsManager_(tsManager), circuit_(circuit), file_(jsPandaFile), + bool isInline, + bool enableOptTrackField) + : tsManager_(tsManager), circuit_(circuit), graph_(circuit->chunk()), file_(jsPandaFile), method_(methodLiteral), gateAcc_(circuit), argAcc_(circuit, method_), - typeRecorder_(jsPandaFile, method_, tsManager, recordName, decoder, methodPCInfo, bytecodes), + typeRecorder_(jsPandaFile, method_, tsManager, recordName, decoder, methodPCInfo, bytecodes, + enableOptTrackField), hasTypes_(hasTypes), enableLog_(enableLog), enableTypeLowering_(enableTypeLowering), pcOffsets_(methodPCInfo.pcOffsets), frameStateBuilder_(this, circuit, methodLiteral), methodName_(name), recordName_(recordName), bytecodes_(bytecodes), - dfsList_(circuit->chunk()), - loopExitToVregGate_(circuit->chunk()), - loopExitToAccGate_(circuit->chunk()), + loopHeaderGates_(circuit->chunk()), preFrameState_(circuit_->GetRoot()), + preFrameArgs_(circuit_->GetRoot()), isInline_(isInline) { } @@ -286,8 +246,6 @@ public: NO_MOVE_SEMANTIC(BytecodeCircuitBuilder); void PUBLIC_API BytecodeToCircuit(); void CollectRegionInfo(uint32_t bcIndex); - GateRef ResolveDef(const size_t bbId, int32_t bcId, const uint16_t reg, const bool acc, bool needIter = true); - GateRef ResolveDef(const BytecodeRegion &bb, int32_t bcId, const uint16_t reg, const bool acc); [[nodiscard]] Circuit* GetCircuit() const { @@ -315,6 +273,19 @@ public: return jsGatesToByteCode_.at(gate); } + bool IsBcIndexByGate(GateRef gate) const + { + if (jsGatesToByteCode_.find(gate) == jsGatesToByteCode_.end()) { + return false; + } + return true; + } + + bool NeedCheckSafePointAndStackOver() const + { + return !isInline_ && !method_->IsNoGC(); + } + void UpdateBcIndexGate(GateRef gate, uint32_t bcIndex) { ASSERT(gateAcc_.GetOpCode(gate) == OpCode::JS_BYTECODE); @@ -323,11 +294,6 @@ public: jsGatesToByteCode_[gate] = bcIndex; } - const std::vector>& GetLoopHeads() const - { - return loopHeads_; - } - [[nodiscard]] const MethodLiteral* GetMethod() const { return method_; @@ -371,24 +337,27 @@ public: template void EnumerateBlock(BytecodeRegion &bb, const Callback &cb) { - // Entry block is a empty block - if (IsEntryBlock(bb.id)) { - return; - } auto &iterator = bb.GetBytecodeIterator(); - for (iterator.GotoStart(); !iterator.Done(); ++iterator) { + iterator.GotoStart(); + while (!iterator.Done()) { auto &bytecodeInfo = iterator.GetBytecodeInfo(); bool ret = cb(bytecodeInfo); if (!ret) { break; } + ++iterator; } } BytecodeRegion &GetBasicBlockById(size_t id) { ASSERT(id < graph_.size()); - return graph_[id]; + return RegionAt(id); + } + + void AddBasicBlock(BytecodeRegion* region) + { + graph_.emplace_back(region); } size_t GetBasicBlockCount() const @@ -417,6 +386,16 @@ public: return typeRecorder_.GetRwOpType(GetPcOffsetByGate(gate)); } + std::vector LoadElementsKinds(GateRef gate) const + { + return typeRecorder_.LoadElementsKinds(GetPcOffsetByGate(gate)); + } + + ElementsKind GetArrayElementsKind(GateRef gate) const + { + return typeRecorder_.GetElementsKind(gateAcc_.TryGetPcOffset(gate)); + } + bool ShouldPGOTypeInfer(GateRef gate) const { return jsGatesToByteCode_.find(gate) != jsGatesToByteCode_.end(); @@ -485,9 +464,11 @@ public: bool EnableLoopOptimization() const { - return (!HasTryCatch()) && (loopHeads_.size() != 0); + return (!HasTryCatch()) && frameStateBuilder_.HasLoop(); } + void RemoveUnreachableRegion(); + GateRef GetFrameArgs() const { return argAcc_.GetFrameArgs(); @@ -503,9 +484,14 @@ public: preFrameState_ = gate; } - const ChunkVector& GetDfsList() const + GateRef GetPreFrameArgs() const { - return dfsList_; + return preFrameArgs_; + } + + void SetPreFrameArgs(GateRef gate) + { + preFrameArgs_ = gate; } inline bool IsEntryBlock(const size_t bbId) const @@ -518,49 +504,58 @@ public: return bbId == 1; } + TSManager *GetTSManager() const + { + return tsManager_; + } + + const TypeRecorder *GetTypeRecorder() const + { + return &typeRecorder_; + } + + GateRef GetArgGate(const size_t currentVreg) const + { + return argAcc_.GetArgGate(currentVreg); + } + + GateRef ArgGateNotExisted(const size_t currentVreg) + { + return argAcc_.ArgGateNotExisted(currentVreg); + } + + ChunkVector& GetLoopHeaderGates() + { + return loopHeaderGates_; + } + + size_t NumberOfLiveBlock() const + { + return numOfLiveBB_; + } + private: void CollectTryCatchBlockInfo(ExceptionInfo &Exception); void BuildCatchBlocks(const ExceptionInfo &Exception); void BuildEntryBlock(); void BuildRegions(const ExceptionInfo &Exception); - void ComputeDominatorTree(); - void BuildImmediateDominator(const std::vector &immDom); - void ComputeDomFrontiers(const std::vector &immDom); - void RemoveDeadRegions(const std::unordered_map &bbIdToDfsTimestamp); - void InsertPhi(); - void InsertExceptionPhi(std::unordered_map> &defsitesInfo); - void UpdateCFG(); - bool ShouldBeDead(BytecodeRegion &curBlock); // build circuit void BuildCircuitArgs(); - void CollectPredsInfo(); - void NewMerge(GateRef &state, GateRef &depend, size_t numOfIns); - void NewLoopBegin(BytecodeRegion &bb, GateRef &state, GateRef &depend); - void NewLoopExit(GateRef &state, GateRef &depend); - void TryInsertLoopExit(BytecodeRegion &bb, BytecodeRegion &bbNext, GateRef &state, GateRef &depend); - void BuildBlockCircuitHead(BytecodeRegion &bb, GateRef &state, GateRef &depend); std::vector CreateGateInList(const BytecodeInfo &info, const GateMetaData *meta); - void SetBlockPred(BytecodeRegion &bb, BytecodeRegion &bbNext, const GateRef &state, const GateRef &depend); - void SetLoopBlockPred(BytecodeRegion &bb, BytecodeRegion &bbNext, - GateRef &state, GateRef &depend); GateRef NewConst(const BytecodeInfo &info); - void NewJSGate(BytecodeRegion &bb, GateRef &state, GateRef &depend); - void NewJump(BytecodeRegion &bb, GateRef &state, GateRef &depend); - void NewReturn(BytecodeRegion &bb, GateRef &state, GateRef &depend); - void NewByteCode(BytecodeRegion &bb, GateRef &state, GateRef &depend); + void NewJSGate(BytecodeRegion &bb); + void NewJump(BytecodeRegion &bbd); + GateRef NewReturn(BytecodeRegion &bb); + void NewByteCode(BytecodeRegion &bb); + void MergeThrowGate(BytecodeRegion &bb, uint32_t bcIndex); + void MergeExceptionGete(BytecodeRegion &bb, const BytecodeInfo& bytecodeInfo, uint32_t bcIndex); void BuildSubCircuit(); - void NewPhi(BytecodeRegion &bb, uint16_t reg, bool acc, GateRef ¤tPhi); - GateRef NewLoopBackPhi(BytecodeRegion &bb, uint16_t reg, bool acc); - GateRef NewLoopForwardPhi(BytecodeRegion &bb, uint16_t reg, bool acc); - bool IsLoopExitValueExists(GateRef loopExit, uint16_t reg, bool acc); - GateRef GetLoopExitValue(GateRef loopExit, uint16_t reg, bool acc); - GateRef CreateLoopExitValue(GateRef loopExit, uint16_t reg, bool acc, GateRef value); - GateRef NewLoopExitValue(GateRef loopExit, uint16_t reg, bool acc, GateRef value); - GateRef NewValueFromPredBB(BytecodeRegion &bb, size_t idx, GateRef exit, uint16_t reg, bool acc); + void UpdateCFG(); + void CollectTryPredsInfo(); + void ClearUnreachableRegion(ChunkVector& pendingList); + void RemoveUnusedPredsInfo(BytecodeRegion& bb); void BuildCircuit(); - GateRef GetExistingRestore(GateRef resumeGate, uint16_t tmpReg) const; - void SetExistingRestore(GateRef resumeGate, uint16_t tmpReg, GateRef restoreGate); void PrintGraph(); void PrintBBInfo(); void PrintGraph(const char* title); @@ -568,19 +563,9 @@ private: void PrintDefsitesInfo(const std::unordered_map> &defsitesInfo); void BuildRegionInfo(); void BuildFrameArgs(); - size_t LoopExitCount(size_t from, size_t to); - void CollectLoopBack(); - void ComputeLoopDepth(size_t loopHead); - void CountLoopBackEdge(size_t fromId, size_t toId); - - inline bool IsFirstBCEnvIn(const size_t bbId, const size_t bcIndex, const uint16_t reg) const - { - return (IsFirstBasicBlock(bbId) && bcIndex == 0 && reg == GetNumberVRegs()); - } - - inline bool HasValidType(GateType type) + BytecodeRegion &RegionAt(size_t i) { - return HasTypes() && !type.IsAnyType() && !tsManager_->IsPGOGT(type.GetGTRef()); + return *graph_[i]; } TSManager *tsManager_; @@ -598,7 +583,6 @@ private: bool enableTypeLowering_ {false}; std::vector suspendAndResumeGates_ {}; std::vector pcOffsets_; - std::map, kungfu::GateRef> resumeRegToRestore_ {}; FrameStateBuilder frameStateBuilder_; std::string methodName_; const CString &recordName_; @@ -606,13 +590,11 @@ private: RegionsInfo regionsInfo_{}; std::vector infoData_ {}; bool hasTryCatch_ {false}; - std::vector> loopHeads_; - size_t loopSize_{0}; - ChunkVector dfsList_; - ChunkMap, GateRef> loopExitToVregGate_; - ChunkMap loopExitToAccGate_; + ChunkVector loopHeaderGates_; GateRef preFrameState_ {Circuit::NullGate()}; + GateRef preFrameArgs_ {Circuit::NullGate()}; + size_t numOfLiveBB_; bool isInline_ {false}; }; } // namespace panda::ecmascript::kungfu -#endif // ECMASCRIPT_CLASS_LINKER_BYTECODE_CIRCUIT_IR_BUILDER_H +#endif // ECMASCRIPT_CLASS_LINKER_BYTECODE_CIRCUIT_IR_BUILDER_H \ No newline at end of file diff --git a/ecmascript/compiler/bytecode_info_collector.cpp b/ecmascript/compiler/bytecode_info_collector.cpp index 6a03a7059c6b01ff083a3be4767c5beae59a943a..aec03b359303c6c93d6bc87c5cacf47d05e63cda 100644 --- a/ecmascript/compiler/bytecode_info_collector.cpp +++ b/ecmascript/compiler/bytecode_info_collector.cpp @@ -15,10 +15,10 @@ #include "ecmascript/compiler/bytecode_info_collector.h" -#include "ecmascript/base/path_helper.h" #include "ecmascript/compiler/type_recorder.h" #include "ecmascript/interpreter/interpreter-inl.h" #include "ecmascript/jspandafile/type_literal_extractor.h" +#include "ecmascript/module/module_path_helper.h" #include "ecmascript/pgo_profiler/pgo_profiler_decoder.h" #include "ecmascript/ts_types/ts_type_parser.h" #include "libpandafile/code_data_accessor.h" @@ -36,6 +36,7 @@ BytecodeInfoCollector::BytecodeInfoCollector(EcmaVM *vm, JSPandaFile *jsPandaFil jsPandaFile_(jsPandaFile), bytecodeInfo_(maxAotMethodSize), pfDecoder_(pfDecoder), + snapshotCPData_(vm, jsPandaFile), enableCollectLiteralInfo_(enableCollectLiteralInfo) { vm_->GetJSThread()->GetCurrentEcmaContext()->GetTSManager()->SetBytecodeInfoCollector(this); @@ -43,6 +44,20 @@ BytecodeInfoCollector::BytecodeInfoCollector(EcmaVM *vm, JSPandaFile *jsPandaFil ProcessEnvs(); } +BytecodeInfoCollector::BytecodeInfoCollector(EcmaVM *vm, JSPandaFile *jsPandaFile, JSHandle &jsFunction, + PGOProfilerDecoder &pfDecoder, bool enableCollectLiteralInfo) + : vm_(vm), + jsPandaFile_(jsPandaFile), + bytecodeInfo_(1), + pfDecoder_(pfDecoder), + snapshotCPData_(vm, jsPandaFile), + enableCollectLiteralInfo_(enableCollectLiteralInfo) +{ + vm_->GetJSThread()->GetCurrentEcmaContext()->GetTSManager()->SetBytecodeInfoCollector(this); + ProcessMethod(jsFunction); + ProcessEnvs(); +} + BytecodeInfoCollector::~BytecodeInfoCollector() { if (envManager_ != nullptr) { @@ -67,7 +82,7 @@ void BytecodeInfoCollector::ProcessClasses() MethodLiteral *methods = jsPandaFile_->GetMethodLiterals(); const panda_file::File *pf = jsPandaFile_->GetPandaFile(); size_t methodIdx = 0; - std::map> processedInsns; + std::map> processedMethod; Span classIndexes = jsPandaFile_->GetClasses(); auto &recordNames = bytecodeInfo_.GetRecordNames(); @@ -82,7 +97,7 @@ void BytecodeInfoCollector::ProcessClasses() panda_file::ClassDataAccessor cda(*pf, classId); CString desc = utf::Mutf8AsCString(cda.GetDescriptor()); const CString recordName = JSPandaFile::ParseEntryPoint(desc); - cda.EnumerateMethods([this, methods, &methodIdx, pf, &processedInsns, + cda.EnumerateMethods([this, methods, &methodIdx, pf, &processedMethod, &recordNames, &methodPcInfos, &recordName, &methodIndexes, &classConstructIndexes] (panda_file::MethodDataAccessor &mda) { auto methodId = mda.GetMethodId(); @@ -114,21 +129,21 @@ void BytecodeInfoCollector::ProcessClasses() panda_file::CodeDataAccessor codeDataAccessor(*pf, codeId.value()); uint32_t codeSize = codeDataAccessor.GetCodeSize(); const uint8_t *insns = codeDataAccessor.GetInstructions(); - auto it = processedInsns.find(insns); - if (it == processedInsns.end()) { + auto it = processedMethod.find(methodOffset); + if (it == processedMethod.end()) { std::vector classNameVec; CollectMethodPcsFromBC(codeSize, insns, methodLiteral, classNameVec, recordName, methodOffset, classConstructIndexes); - processedInsns[insns] = std::make_pair(methodPcInfos.size() - 1, methodOffset); + processedMethod[methodOffset] = std::make_pair(methodPcInfos.size() - 1, methodOffset); // collect className and literal offset for type infer if (EnableCollectLiteralInfo()) { CollectClassLiteralInfo(methodLiteral, classNameVec); } } - SetMethodPcInfoIndex(methodOffset, processedInsns[insns]); + SetMethodPcInfoIndex(methodOffset, processedMethod[methodOffset]); jsPandaFile_->SetMethodLiteralToMap(methodLiteral); - pfDecoder_.MatchAndMarkMethod(recordName, name.c_str(), methodId); + pfDecoder_.MatchAndMarkMethod(jsPandaFile_, recordName, name.c_str(), methodId); }); } // class Construct need to use new target, can not fastcall @@ -149,6 +164,61 @@ void BytecodeInfoCollector::ProcessClasses() << methodIdx; } +void BytecodeInfoCollector::ProcessMethod(JSHandle &jsFunction) +{ + (void)jsFunction; + auto &recordNames = bytecodeInfo_.GetRecordNames(); + auto &methodPcInfos = bytecodeInfo_.GetMethodPcInfos(); + + Method *method = Method::Cast(jsFunction->GetMethod().GetTaggedObject()); + const panda_file::File *pf = jsPandaFile_->GetPandaFile(); + panda_file::File::EntityId methodIdx = method->GetMethodId(); + panda_file::MethodDataAccessor mda(*pf, methodIdx); + panda_file::File::EntityId classIdx = panda_file::MethodDataAccessor::GetClassId(*pf, methodIdx); + panda_file::ClassDataAccessor cda(*pf, classIdx); + CString desc = utf::Mutf8AsCString(cda.GetDescriptor()); + const CString recordName = JSPandaFile::ParseEntryPoint(desc); + recordNames.emplace_back(recordName); + auto methodId = mda.GetMethodId(); + CollectFunctionTypeId(methodId); + + // Generate all constpool + [[maybe_unused]] JSTaggedValue constpool = vm_->GetJSThread()->GetCurrentEcmaContext()->FindConstpool(jsPandaFile_, methodId); + ASSERT(!constpool.IsHole()); + + auto methodOffset = methodId.GetOffset(); + CString name = reinterpret_cast(jsPandaFile_->GetStringData(mda.GetNameId()).data); + if (JSPandaFile::IsEntryOrPatch(name)) { + // ASSERT(0); + } + + // MethodLiteral *methodLiteral = method->GetMethodLiteral(); + MethodLiteral *methodLiteral = method->GetMethodLiteral(); + ASSERT(jsPandaFile_->IsNewVersion()); + + auto codeId = mda.GetCodeId(); + ASSERT(codeId.has_value()); + panda_file::CodeDataAccessor codeDataAccessor(*pf, codeId.value()); + uint32_t codeSize = codeDataAccessor.GetCodeSize(); + const uint8_t *insns = codeDataAccessor.GetInstructions(); + + std::map> processedMethod; + std::vector classConstructIndexes; + std::vector classNameVec; + + CollectMethodPcsFromBC(codeSize, insns, methodLiteral, classNameVec, + recordName, methodOffset, classConstructIndexes); + processedMethod[methodOffset] = std::make_pair(methodPcInfos.size() - 1, methodOffset); + // collect className and literal offset for type infer + if (EnableCollectLiteralInfo()) { + CollectClassLiteralInfo(methodLiteral, classNameVec); + } + + SetMethodPcInfoIndex(methodOffset, processedMethod[methodOffset]); +// jsPandaFile_->SetMethodLiteralToMap(methodLiteral); +// pfDecoder_.MatchAndMarkMethod(recordName, name.c_str(), methodId); +} + void BytecodeInfoCollector::CollectClassLiteralInfo(const MethodLiteral *method, const std::vector &classNameVec) { @@ -242,6 +312,8 @@ void BytecodeInfoCollector::CollectMethodPcsFromBC(const uint32_t insSz, const u auto &pcOffsets = methodPcInfos.back().pcOffsets; const uint8_t *curPc = bcIns.GetAddress(); bool canFastCall = true; + bool noGC = true; + bool debuggerStmt = false; while (bcIns.GetAddress() != bcInsLast.GetAddress()) { bool fastCallFlag = true; @@ -250,16 +322,24 @@ void BytecodeInfoCollector::CollectMethodPcsFromBC(const uint32_t insSz, const u canFastCall = false; } CollectModuleInfoFromBC(bcIns, method, recordName); - CollectConstantPoolIndexInfoFromBC(bcIns, method); + snapshotCPData_.Record(bcIns, bcIndex, recordName, method); pgoBCInfo_.Record(bcIns, bcIndex, recordName, method); + if (noGC && !bytecodes_.GetBytecodeMetaData(curPc).IsNoGC()) { + noGC = false; + } + if (!debuggerStmt && bytecodes_.GetBytecodeMetaData(curPc).HasDebuggerStmt()) { + debuggerStmt = true; + } curPc = bcIns.GetAddress(); auto nextInst = bcIns.GetNext(); bcIns = nextInst; pcOffsets.emplace_back(curPc); bcIndex++; } - bytecodeInfo_.SetMethodOffsetToCanFastCall(methodOffset, canFastCall); + bytecodeInfo_.SetMethodOffsetToFastCallInfo(methodOffset, canFastCall, noGC); method->SetIsFastCall(canFastCall); + method->SetNoGCBit(noGC); + method->SetHasDebuggerStmtBit(debuggerStmt); } void BytecodeInfoCollector::SetMethodPcInfoIndex(uint32_t methodOffset, @@ -620,7 +700,8 @@ void BytecodeInfoCollector::CollectRecordReferenceREL() { auto &recordNames = bytecodeInfo_.GetRecordNames(); for (auto &record : recordNames) { - if (jsPandaFile_->HasTSTypes(record) && jsPandaFile_->IsModule(vm_->GetJSThread(), record)) { + JSRecordInfo info = jsPandaFile_->FindRecordInfo(record); + if (jsPandaFile_->HasTSTypes(info)|| jsPandaFile_->IsModule(info)) { CollectRecordImportInfo(record); CollectRecordExportInfo(record); } @@ -650,7 +731,7 @@ void BytecodeInfoCollector::CollectRecordImportInfo(const CString &recordName) for (size_t index = 0; index < length; index++) { JSTaggedValue resolvedBinding = moduleArray->Get(index); // if resolvedBinding.IsHole(), means that importname is * or it belongs to empty Aot module. - if (resolvedBinding.IsHole()) { + if (!resolvedBinding.IsResolvedIndexBinding()) { continue; } ResolvedIndexBinding *binding = ResolvedIndexBinding::Cast(resolvedBinding.GetTaggedObject()); @@ -682,11 +763,11 @@ void BytecodeInfoCollector::CollectRecordExportInfo(const CString &recordName) starExportEntry.Update(starEntriesArray->Get(index)); JSTaggedValue moduleRequest = starExportEntry->GetModuleRequest(); CString moduleRequestName = ConvertToString(EcmaString::Cast(moduleRequest.GetTaggedObject())); - if (base::PathHelper::IsNativeModuleRequest(moduleRequestName)) { + if (ModulePathHelper::IsNativeModuleRequest(moduleRequestName)) { return; } CString baseFileName = jsPandaFile_->GetJSPandaFileDesc(); - CString entryPoint = base::PathHelper::ConcatFileNameWithMerge(thread, jsPandaFile_, + CString entryPoint = ModulePathHelper::ConcatFileNameWithMerge(thread, jsPandaFile_, baseFileName, recordName, moduleRequestName); if (jsPandaFile_->HasTypeSummaryOffset(entryPoint)) { bytecodeInfo_.AddStarExportToRecord(recordName, entryPoint); @@ -703,83 +784,6 @@ void BytecodeInfoCollector::RearrangeInnerMethods() } } -void BytecodeInfoCollector::CollectConstantPoolIndexInfoFromBC(const BytecodeInstruction &bcIns, - const MethodLiteral *method) -{ - BytecodeInstruction::Opcode opcode = static_cast(bcIns.GetOpcode()); - uint32_t methodOffset = method->GetMethodId().GetOffset(); - switch (opcode) { - case BytecodeInstruction::Opcode::LDA_STR_ID16: - case BytecodeInstruction::Opcode::STOWNBYNAME_IMM8_ID16_V8: - case BytecodeInstruction::Opcode::STOWNBYNAME_IMM16_ID16_V8: - case BytecodeInstruction::Opcode::CREATEREGEXPWITHLITERAL_IMM8_ID16_IMM8: - case BytecodeInstruction::Opcode::CREATEREGEXPWITHLITERAL_IMM16_ID16_IMM8: - case BytecodeInstruction::Opcode::STCONSTTOGLOBALRECORD_IMM16_ID16: - case BytecodeInstruction::Opcode::TRYLDGLOBALBYNAME_IMM8_ID16: - case BytecodeInstruction::Opcode::TRYLDGLOBALBYNAME_IMM16_ID16: - case BytecodeInstruction::Opcode::TRYSTGLOBALBYNAME_IMM8_ID16: - case BytecodeInstruction::Opcode::TRYSTGLOBALBYNAME_IMM16_ID16: - case BytecodeInstruction::Opcode::STTOGLOBALRECORD_IMM16_ID16: - case BytecodeInstruction::Opcode::STOWNBYNAMEWITHNAMESET_IMM8_ID16_V8: - case BytecodeInstruction::Opcode::STOWNBYNAMEWITHNAMESET_IMM16_ID16_V8: - case BytecodeInstruction::Opcode::LDTHISBYNAME_IMM8_ID16: - case BytecodeInstruction::Opcode::LDTHISBYNAME_IMM16_ID16: - case BytecodeInstruction::Opcode::STTHISBYNAME_IMM8_ID16: - case BytecodeInstruction::Opcode::STTHISBYNAME_IMM16_ID16: - case BytecodeInstruction::Opcode::LDGLOBALVAR_IMM16_ID16: - case BytecodeInstruction::Opcode::LDOBJBYNAME_IMM8_ID16: - case BytecodeInstruction::Opcode::LDOBJBYNAME_IMM16_ID16: - case BytecodeInstruction::Opcode::STOBJBYNAME_IMM8_ID16_V8: - case BytecodeInstruction::Opcode::STOBJBYNAME_IMM16_ID16_V8: - case BytecodeInstruction::Opcode::LDSUPERBYNAME_IMM8_ID16: - case BytecodeInstruction::Opcode::LDSUPERBYNAME_IMM16_ID16: - case BytecodeInstruction::Opcode::STSUPERBYNAME_IMM8_ID16_V8: - case BytecodeInstruction::Opcode::STSUPERBYNAME_IMM16_ID16_V8: - case BytecodeInstruction::Opcode::STGLOBALVAR_IMM16_ID16: - case BytecodeInstruction::Opcode::LDBIGINT_ID16: { - auto index = bcIns.GetId().AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::STRING, index, methodOffset); - break; - } - case BytecodeInstruction::Opcode::DEFINEFUNC_IMM8_ID16_IMM8: - case BytecodeInstruction::Opcode::DEFINEFUNC_IMM16_ID16_IMM8: - case BytecodeInstruction::Opcode::DEFINEMETHOD_IMM8_ID16_IMM8: - case BytecodeInstruction::Opcode::DEFINEMETHOD_IMM16_ID16_IMM8: { - auto index = bcIns.GetId().AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::METHOD, index, methodOffset); - break; - } - case BytecodeInstruction::Opcode::CREATEOBJECTWITHBUFFER_IMM8_ID16: - case BytecodeInstruction::Opcode::CREATEOBJECTWITHBUFFER_IMM16_ID16: { - auto index = bcIns.GetId().AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::OBJECT_LITERAL, index, methodOffset); - break; - } - case BytecodeInstruction::Opcode::CREATEARRAYWITHBUFFER_IMM8_ID16: - case BytecodeInstruction::Opcode::CREATEARRAYWITHBUFFER_IMM16_ID16: { - auto index = bcIns.GetId().AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::ARRAY_LITERAL, index, methodOffset); - break; - } - case BytecodeInstruction::Opcode::DEFINECLASSWITHBUFFER_IMM8_ID16_ID16_IMM16_V8: { - auto methodIndex = (bcIns.GetId ()).AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::METHOD, methodIndex, methodOffset); - auto literalIndex = (bcIns.GetId ()).AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::CLASS_LITERAL, literalIndex, methodOffset); - break; - } - case BytecodeInstruction::Opcode::DEFINECLASSWITHBUFFER_IMM16_ID16_ID16_IMM16_V8: { - auto methodIndex = (bcIns.GetId ()).AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::METHOD, methodIndex, methodOffset); - auto literalIndex = (bcIns.GetId ()).AsRawValue(); - AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType::CLASS_LITERAL, literalIndex, methodOffset); - break; - } - default: - break; - } -} - LexEnvManager::LexEnvManager(BCInfo &bcInfo) : lexEnvs_(bcInfo.GetMethodList().size()) { @@ -820,13 +824,4 @@ uint32_t LexEnvManager::GetTargetLexEnv(uint32_t methodId, uint32_t level) const } return offset; } - -void ConstantPoolInfo::AddIndexToCPItem(ItemType type, uint32_t index, uint32_t methodOffset) -{ - Item &item = GetCPItem(type); - if (item.find(index) != item.end()) { - return; - } - item.insert({index, ItemData {index, methodOffset, nullptr}}); -} } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/bytecode_info_collector.h b/ecmascript/compiler/bytecode_info_collector.h index 153ae3ae7a71b226754480d26cec0a4d23aa97cf..aef5ee5f6ba0e31ea4637319b752a78f98b7bb91 100644 --- a/ecmascript/compiler/bytecode_info_collector.h +++ b/ecmascript/compiler/bytecode_info_collector.h @@ -16,9 +16,10 @@ #ifndef ECMASCRIPT_COMPILER_BYTECODE_INFO_COLLECTOR_H #define ECMASCRIPT_COMPILER_BYTECODE_INFO_COLLECTOR_H +#include "ecmascript/compiler/aot_snapshot/snapshot_constantpool_data.h" +#include "ecmascript/compiler/pgo_bc_info.h" #include "ecmascript/jspandafile/js_pandafile.h" #include "ecmascript/pgo_profiler/pgo_profiler_decoder.h" -#include "ecmascript/compiler/pgo_bc_info.h" #include "libpandafile/bytecode_instruction-inl.h" namespace panda::ecmascript::kungfu { @@ -54,6 +55,8 @@ namespace panda::ecmascript::kungfu { * of global and function f will be created when methods are executed. */ +using PGOProfilerDecoder = pgo::PGOProfilerDecoder; + enum class LexicalEnvStatus : uint8_t { VIRTUAL_LEXENV, REALITY_LEXENV @@ -389,41 +392,9 @@ private: bool isNamespace_ {false}; }; - -class ConstantPoolInfo { -public: - enum ItemType { - STRING = 0, - METHOD, - CLASS_LITERAL, - OBJECT_LITERAL, - ARRAY_LITERAL, - - ITEM_TYPE_NUM, - ITEM_TYPE_FIRST = STRING, - ITEM_TYPE_LAST = ARRAY_LITERAL, - }; - - struct ItemData { - uint32_t index {0}; - uint32_t outerMethodOffset {0}; - CString *recordName {nullptr}; - }; - - // key:constantpool index, value:ItemData - using Item = std::unordered_map; - - ConstantPoolInfo() : items_(ItemType::ITEM_TYPE_NUM, Item{}) {} - - Item& GetCPItem(ItemType type) - { - ASSERT(ItemType::ITEM_TYPE_FIRST <= type && type <= ItemType::ITEM_TYPE_LAST); - return items_[type]; - } - - void AddIndexToCPItem(ItemType type, uint32_t index, uint32_t methodOffset); -private: - std::vector items_; +struct FastCallInfo { + bool canFastCall_ {false}; + bool isNoGC_ {false}; }; class BCInfo { @@ -466,6 +437,11 @@ public: return true; } + const std::set& GetSkippedMethodSet() const + { + return skippedMethods_; + } + void AddSkippedMethod(uint32_t methodOffset) { skippedMethods_.insert(methodOffset); @@ -490,6 +466,11 @@ public: return recordNames_[index]; } + bool FindMethodOffsetToRecordName(uint32_t methodOffset) + { + return methodOffsetToRecordName_.find(methodOffset) != methodOffsetToRecordName_.end(); + } + void AddMethodOffsetToRecordName(uint32_t methodOffset, CString recordName) { methodOffsetToRecordName_.emplace(methodOffset, recordName); @@ -500,22 +481,6 @@ public: return skippedMethods_.size(); } - void AddIndexToCPInfo(ConstantPoolInfo::ItemType type, uint32_t index, uint32_t methodOffset) - { - cpInfo_.AddIndexToCPItem(type, index, methodOffset); - } - - template - void IterateConstantPoolInfo(ConstantPoolInfo::ItemType type, const Callback &cb) - { - auto &item = cpInfo_.GetCPItem(type); - for (auto &iter : item) { - ConstantPoolInfo::ItemData &data = iter.second; - data.recordName = &methodOffsetToRecordName_[data.outerMethodOffset]; - cb(data); - } - } - uint32_t GetDefineMethod(const uint32_t classLiteralOffset) const { return classTypeLOffsetToDefMethod_.at(classLiteralOffset); @@ -611,29 +576,34 @@ public: return recordToImportRecordsInfo_; } - bool IterateMethodOffsetToCanFastCall(uint32_t methodOffset, bool *isValid) + FastCallInfo IterateMethodOffsetToFastCallInfo(uint32_t methodOffset, bool *isValid) { - auto iter = methodOffsetToCanFastCall_.find(methodOffset); - if (iter != methodOffsetToCanFastCall_.end()) { + auto iter = methodOffsetToFastCallInfos_.find(methodOffset); + if (iter != methodOffsetToFastCallInfos_.end()) { *isValid = true; return iter->second; } *isValid = false; - return false; + return FastCallInfo(); } - void SetMethodOffsetToCanFastCall(uint32_t methodOffset, bool canFastCall) + void SetMethodOffsetToFastCallInfo(uint32_t methodOffset, bool canFastCall, bool noGC) { - if (methodOffsetToCanFastCall_.find(methodOffset) == methodOffsetToCanFastCall_.end()) { - methodOffsetToCanFastCall_.emplace(methodOffset, canFastCall); + if (methodOffsetToFastCallInfos_.find(methodOffset) == methodOffsetToFastCallInfos_.end()) { + methodOffsetToFastCallInfos_.emplace(methodOffset, FastCallInfo { canFastCall, noGC }); } } void ModifyMethodOffsetToCanFastCall(uint32_t methodOffset, bool canFastCall) { - methodOffsetToCanFastCall_.erase(methodOffset); - if (methodOffsetToCanFastCall_.find(methodOffset) == methodOffsetToCanFastCall_.end()) { - methodOffsetToCanFastCall_.emplace(methodOffset, canFastCall); + auto iter = methodOffsetToFastCallInfos_.find(methodOffset); + bool isNoGC = false; + if (iter != methodOffsetToFastCallInfos_.end()) { + isNoGC = iter->second.isNoGC_; + } + methodOffsetToFastCallInfos_.erase(methodOffset); + if (methodOffsetToFastCallInfos_.find(methodOffset) == methodOffsetToFastCallInfos_.end()) { + methodOffsetToFastCallInfos_.emplace(methodOffset, FastCallInfo { canFastCall, isNoGC }); } } private: @@ -643,13 +613,12 @@ private: std::unordered_map methodList_ {}; std::unordered_map methodOffsetToRecordName_ {}; std::set skippedMethods_ {}; - ConstantPoolInfo cpInfo_; size_t maxMethodSize_; std::unordered_map classTypeLOffsetToDefMethod_ {}; std::unordered_map functionTypeIdToMethodOffset_ {}; std::unordered_map recordNameToExportInfo_ {}; std::unordered_map recordToImportRecordsInfo_ {}; - std::unordered_map methodOffsetToCanFastCall_ {}; + std::unordered_map methodOffsetToFastCallInfos_ {}; }; class LexEnvManager { @@ -687,6 +656,10 @@ class BytecodeInfoCollector { public: BytecodeInfoCollector(EcmaVM *vm, JSPandaFile *jsPandaFile, PGOProfilerDecoder &pfDecoder, size_t maxAotMethodSize, bool enableCollectLiteralInfo); + + BytecodeInfoCollector(EcmaVM *vm, JSPandaFile *jsPandaFile, JSHandle &jsFunction, + PGOProfilerDecoder &pfDecoder, bool enableCollectLiteralInfo); + ~BytecodeInfoCollector(); NO_COPY_SEMANTIC(BytecodeInfoCollector); NO_MOVE_SEMANTIC(BytecodeInfoCollector); @@ -696,6 +669,11 @@ public: return enableCollectLiteralInfo_; } + Bytecodes* GetByteCodes() + { + return &bytecodes_; + } + BCInfo& GetBytecodeInfo() { return bytecodeInfo_; @@ -706,11 +684,21 @@ public: return &bytecodeInfo_; } - PGOBCInfo* GetPGOBCInfo() + const PGOBCInfo* GetPGOBCInfo() const { return &pgoBCInfo_; } + void StoreDataToGlobalData(SnapshotGlobalData &snapshotData) + { + snapshotCPData_.StoreDataToGlobalData(snapshotData, GetSkippedMethodSet()); + } + + const std::set& GetSkippedMethodSet() const + { + return bytecodeInfo_.GetSkippedMethodSet(); + } + bool IsSkippedMethod(uint32_t methodOffset) const { return bytecodeInfo_.IsSkippedMethod(methodOffset); @@ -732,9 +720,13 @@ public: } template - void IterateConstantPoolInfo(ConstantPoolInfo::ItemType type, const Callback &cb) + void IterateAllMethods(const Callback &cb) { - bytecodeInfo_.IterateConstantPoolInfo(type, cb); + auto &methodList = bytecodeInfo_.GetMethodList(); + for (const auto &method : methodList) { + uint32_t methodOffset = method.first; + cb(methodOffset); + } } private: @@ -745,12 +737,6 @@ private: return methodInfoIndex_++; } - void AddConstantPoolIndexToBCInfo(ConstantPoolInfo::ItemType type, - uint32_t index, uint32_t methodOffset) - { - bytecodeInfo_.AddIndexToCPInfo(type, index, methodOffset); - } - inline std::string GetClassName(const EntityId entityId) { std::string className(MethodLiteral::GetMethodName(jsPandaFile_, entityId)); @@ -763,6 +749,7 @@ private: const CString GetEntryFunName(const std::string_view &entryPoint) const; void ProcessClasses(); + void ProcessMethod(JSHandle &jsFunction); void RearrangeInnerMethods(); void CollectMethodPcsFromBC(const uint32_t insSz, const uint8_t *insArr, MethodLiteral *method, std::vector &classNameVec, const CString &recordName, @@ -780,7 +767,6 @@ private: bool *canFastCall); void CollectModuleInfoFromBC(const BytecodeInstruction &bcIns, const MethodLiteral *method, const CString &recordName); - void CollectConstantPoolIndexInfoFromBC(const BytecodeInstruction &bcIns, const MethodLiteral *method); void IterateLiteral(const MethodLiteral *method, std::vector &classOffsetVector); void StoreClassTypeOffset(const uint32_t typeOffset, std::vector &classOffsetVector); void CollectClassLiteralInfo(const MethodLiteral *method, const std::vector &classNameVec); @@ -798,10 +784,12 @@ private: BCInfo bytecodeInfo_; PGOProfilerDecoder &pfDecoder_; PGOBCInfo pgoBCInfo_ {}; + SnapshotConstantPoolData snapshotCPData_; size_t methodInfoIndex_ {0}; bool enableCollectLiteralInfo_ {false}; std::set classDefBCIndexes_ {}; LexEnvManager* envManager_ {nullptr}; + Bytecodes bytecodes_; }; } // namespace panda::ecmascript::kungfu #endif // ECMASCRIPT_COMPILER_BYTECODE_INFO_COLLECTOR_H diff --git a/ecmascript/compiler/bytecodes.cpp b/ecmascript/compiler/bytecodes.cpp index 77692ec1ec5d5c8a1cb508d6b82a9bbb8480e528..b4bdc7359696a1b52114c61eb764206c17a1d594 100644 --- a/ecmascript/compiler/bytecodes.cpp +++ b/ecmascript/compiler/bytecodes.cpp @@ -60,17 +60,84 @@ BytecodeMetaData BytecodeMetaData::InitBytecodeMetaData(const uint8_t *pc) } switch (inst.GetOpcode()) { + case EcmaOpcode::MOV_V4_V4: + case EcmaOpcode::MOV_V8_V8: + case EcmaOpcode::MOV_V16_V16: + case EcmaOpcode::STA_V8: + case EcmaOpcode::LDA_V8: + case EcmaOpcode::LDHOLE: + case EcmaOpcode::LDAI_IMM32: + case EcmaOpcode::FLDAI_IMM64: + case EcmaOpcode::LDFUNCTION: + case EcmaOpcode::TYPEOF_IMM8: + case EcmaOpcode::TYPEOF_IMM16: + case EcmaOpcode::LDNAN: + case EcmaOpcode::LDINFINITY: + case EcmaOpcode::LDUNDEFINED: + case EcmaOpcode::LDNULL: + case EcmaOpcode::LDTRUE: + case EcmaOpcode::LDFALSE: + case EcmaOpcode::LDSYMBOL: + case EcmaOpcode::LDGLOBAL: + case EcmaOpcode::LDBIGINT_ID16: + case EcmaOpcode::LDLEXVAR_IMM4_IMM4: + case EcmaOpcode::LDLEXVAR_IMM8_IMM8: + case EcmaOpcode::WIDE_LDLEXVAR_PREF_IMM16_IMM16: + case EcmaOpcode::WIDE_LDPATCHVAR_PREF_IMM16: + case EcmaOpcode::LDA_STR_ID16: + case EcmaOpcode::RETURN: + case EcmaOpcode::RETURNUNDEFINED: + flags |= BytecodeFlags::NO_GC; + break; + default: + break; + } + + switch (inst.GetOpcode()) { + case EcmaOpcode::MOV_V4_V4: + case EcmaOpcode::MOV_V8_V8: + case EcmaOpcode::MOV_V16_V16: + case EcmaOpcode::STA_V8: + case EcmaOpcode::LDA_V8: + case EcmaOpcode::LDNAN: + case EcmaOpcode::LDINFINITY: + case EcmaOpcode::LDUNDEFINED: + case EcmaOpcode::LDNULL: + case EcmaOpcode::LDTRUE: + case EcmaOpcode::LDFALSE: + case EcmaOpcode::LDHOLE: + case EcmaOpcode::LDAI_IMM32: + case EcmaOpcode::FLDAI_IMM64: + case EcmaOpcode::LDFUNCTION: + case EcmaOpcode::LDA_STR_ID16: case EcmaOpcode::TYPEOF_IMM8: case EcmaOpcode::TYPEOF_IMM16: case EcmaOpcode::ISTRUE: case EcmaOpcode::ISFALSE: + case EcmaOpcode::JEQZ_IMM8: + case EcmaOpcode::JEQZ_IMM16: + case EcmaOpcode::JEQZ_IMM32: + case EcmaOpcode::JNEZ_IMM8: + case EcmaOpcode::JNEZ_IMM16: + case EcmaOpcode::JNEZ_IMM32: + case EcmaOpcode::JMP_IMM8: + case EcmaOpcode::JMP_IMM16: + case EcmaOpcode::JMP_IMM32: case EcmaOpcode::STMODULEVAR_IMM8: case EcmaOpcode::WIDE_STMODULEVAR_PREF_IMM16: + case EcmaOpcode::LDEXTERNALMODULEVAR_IMM8: + case EcmaOpcode::WIDE_LDEXTERNALMODULEVAR_PREF_IMM16: + case EcmaOpcode::NEWLEXENV_IMM8: + case EcmaOpcode::WIDE_NEWLEXENV_PREF_IMM16: case EcmaOpcode::POPLEXENV: case EcmaOpcode::NEWLEXENVWITHNAME_IMM8_ID16: case EcmaOpcode::WIDE_NEWLEXENVWITHNAME_PREF_IMM16_ID16: + case EcmaOpcode::ASYNCFUNCTIONENTER: + case EcmaOpcode::SETGENERATORSTATE_IMM8: case EcmaOpcode::GETRESUMEMODE: case EcmaOpcode::RESUMEGENERATOR: + case EcmaOpcode::RETURN: + case EcmaOpcode::RETURNUNDEFINED: case EcmaOpcode::LDLEXVAR_IMM4_IMM4: case EcmaOpcode::LDLEXVAR_IMM8_IMM8: case EcmaOpcode::WIDE_LDLEXVAR_PREF_IMM16_IMM16: @@ -84,7 +151,14 @@ BytecodeMetaData BytecodeMetaData::InitBytecodeMetaData(const uint8_t *pc) case EcmaOpcode::CREATEEMPTYOBJECT: case EcmaOpcode::CREATEARRAYWITHBUFFER_IMM8_ID16: case EcmaOpcode::CREATEARRAYWITHBUFFER_IMM16_ID16: - case EcmaOpcode::SETGENERATORSTATE_IMM8: + case EcmaOpcode::CREATEITERRESULTOBJ_V8_V8: + case EcmaOpcode::DEFINEFUNC_IMM8_ID16_IMM8: + case EcmaOpcode::DEFINEFUNC_IMM16_ID16_IMM8: + case EcmaOpcode::DEFINEMETHOD_IMM8_ID16_IMM8: + case EcmaOpcode::DEFINEMETHOD_IMM16_ID16_IMM8: + case EcmaOpcode::GETUNMAPPEDARGS: + case EcmaOpcode::DEBUGGER: + case EcmaOpcode::NOP: flags |= BytecodeFlags::NO_THROW; break; default: @@ -198,10 +272,11 @@ BytecodeMetaData BytecodeMetaData::InitBytecodeMetaData(const uint8_t *pc) kind = BytecodeKind::SUSPEND; break; case EcmaOpcode::RESUMEGENERATOR: - case EcmaOpcode::CREATEOBJECTWITHEXCLUDEDKEYS_IMM8_V8_V8: kind = BytecodeKind::RESUME; break; case EcmaOpcode::DEBUGGER: + flags |= BytecodeFlags::DEBUGGER_STMT; + break; case EcmaOpcode::NOP: kind = BytecodeKind::DISCARDED; flags |= BytecodeFlags::NO_SIDE_EFFECTS; @@ -1053,18 +1128,20 @@ void BytecodeInfo::InitBytecodeInfo(BytecodeCircuitBuilder *builder, uint8_t numKeys = READ_INST_8_0(); uint16_t v0 = READ_INST_8_1(); uint16_t firstArgRegIdx = READ_INST_8_2(); - info.inputs.emplace_back(Immediate(numKeys)); info.inputs.emplace_back(VirtualRegister(v0)); - info.inputs.emplace_back(Immediate(firstArgRegIdx)); + for (int i = 0; i <= numKeys; i++) { + info.inputs.emplace_back(VirtualRegister(firstArgRegIdx + i)); + } break; } case EcmaOpcode::WIDE_CREATEOBJECTWITHEXCLUDEDKEYS_PREF_IMM16_V8_V8: { uint16_t numKeys = READ_INST_16_1(); uint16_t v0 = READ_INST_8_3(); uint16_t firstArgRegIdx = READ_INST_8_4(); - info.inputs.emplace_back(Immediate(numKeys)); info.inputs.emplace_back(VirtualRegister(v0)); - info.inputs.emplace_back(Immediate(firstArgRegIdx)); + for (int i = 0; i <= numKeys; i++) { + info.inputs.emplace_back(VirtualRegister(firstArgRegIdx + i)); + } break; } case EcmaOpcode::COPYRESTARGS_IMM8: { @@ -1566,13 +1643,13 @@ const BytecodeInfo &BytecodeIterator::GetBytecodeInfo() const const uint8_t *BytecodeIterator::PeekNextPc(size_t i) const { - ASSERT(index_ + i <= end_); - return builder_->GetPCByIndex(index_ + i); + ASSERT(index_ + static_cast(i) <= end_); + return builder_->GetPCByIndex(static_cast(index_ + i)); } const uint8_t *BytecodeIterator::PeekPrevPc(size_t i) const { - ASSERT(index_ - i >= start_); - return builder_->GetPCByIndex(index_ - i); + ASSERT(index_ - static_cast(i) >= start_); + return builder_->GetPCByIndex(static_cast(index_ - i)); } } // panda::ecmascript::kungfu diff --git a/ecmascript/compiler/bytecodes.h b/ecmascript/compiler/bytecodes.h index 618e756600fe9900b18a4179bf75227b650864dd..e143bdb57574a1e9af27c0268d5b4bbe669ae7ab 100644 --- a/ecmascript/compiler/bytecodes.h +++ b/ecmascript/compiler/bytecodes.h @@ -49,6 +49,8 @@ enum BytecodeFlags : uint32_t { READ_FUNC = 1 << 9, READ_NEWTARGET = 1 << 10, READ_ARGC = 1 << 11, + NO_GC = 1 << 12, + DEBUGGER_STMT = 1 << 13, }; enum BytecodeKind : uint32_t { @@ -70,7 +72,7 @@ class BytecodeMetaData { public: static constexpr uint32_t MAX_OPCODE_SIZE = 16; static constexpr uint32_t MAX_SIZE_BITS = 4; - static constexpr uint32_t BYTECODE_FLAGS_SIZE = 12; + static constexpr uint32_t BYTECODE_FLAGS_SIZE = 14; static constexpr uint32_t BYTECODE_KIND_SIZE = 4; using OpcodeField = panda::BitField; @@ -113,6 +115,11 @@ public: return HasFlag(BytecodeFlags::WRITE_ENV); } + bool IsNoGC() const + { + return HasFlag(BytecodeFlags::NO_GC); + } + bool IsMov() const { return GetKind() == BytecodeKind::MOV; @@ -215,6 +222,11 @@ public: return GetKind() == BytecodeKind::CALL_BC; } + bool HasDebuggerStmt() const + { + return HasFlag(BytecodeFlags::DEBUGGER_STMT); + } + private: BytecodeMetaData() = default; DEFAULT_NOEXCEPT_MOVE_SEMANTIC(BytecodeMetaData); @@ -308,6 +320,71 @@ public: return bytecodes_[primary]; } + static bool IsCallOp(EcmaOpcode opcode) + { + switch (opcode) { + case EcmaOpcode::CALLARG0_IMM8: + case EcmaOpcode::CALLARG1_IMM8_V8: + case EcmaOpcode::CALLARGS2_IMM8_V8_V8: + case EcmaOpcode::CALLARGS3_IMM8_V8_V8_V8: + case EcmaOpcode::CALLRANGE_IMM8_IMM8_V8: + case EcmaOpcode::WIDE_CALLRANGE_PREF_IMM16_V8: + case EcmaOpcode::CALLTHIS0_IMM8_V8: + case EcmaOpcode::CALLTHIS1_IMM8_V8_V8: + case EcmaOpcode::CALLTHIS2_IMM8_V8_V8_V8: + case EcmaOpcode::CALLTHIS3_IMM8_V8_V8_V8_V8: + case EcmaOpcode::CALLTHISRANGE_IMM8_IMM8_V8: + case EcmaOpcode::WIDE_CALLTHISRANGE_PREF_IMM16_V8: + return true; + default: + return false; + } + } + + static bool IsCreateObjectWithBufferOp(EcmaOpcode opcode) + { + switch (opcode) { + case EcmaOpcode::CREATEOBJECTWITHBUFFER_IMM8_ID16: + case EcmaOpcode::CREATEOBJECTWITHBUFFER_IMM16_ID16: + return true; + default: + return false; + } + } + + static bool IsCreateEmptyArrayOp(EcmaOpcode opcode) + { + switch (opcode) { + case EcmaOpcode::CREATEEMPTYARRAY_IMM8: + case EcmaOpcode::CREATEEMPTYARRAY_IMM16: + return true; + default: + return false; + } + } + + static bool IsCreateArrayWithBufferOp(EcmaOpcode opcode) + { + switch (opcode) { + case EcmaOpcode::CREATEARRAYWITHBUFFER_IMM8_ID16: + case EcmaOpcode::CREATEARRAYWITHBUFFER_IMM16_ID16: + return true; + default: + return false; + } + } + + static bool IsDefineClassWithBufferOp(EcmaOpcode opcode) + { + switch (opcode) { + case EcmaOpcode::DEFINECLASSWITHBUFFER_IMM8_ID16_ID16_IMM16_V8: + case EcmaOpcode::DEFINECLASSWITHBUFFER_IMM16_ID16_ID16_IMM16_V8: + return true; + default: + return false; + } + } + private: static uint8_t ReadByte(const uint8_t *pc) { @@ -636,11 +713,21 @@ public: return HasFuncIn() || HasNewTargetIn() || ThisObjectIn() || HasArgcIn(); } + bool HasFrameState() const + { + return HasFrameArgs() || !NoThrow(); + } + bool IsCall() const { return metaData_.IsCall(); } + bool HasDebuggerStmt() const + { + return metaData_.HasDebuggerStmt(); + } + inline EcmaOpcode GetOpcode() const { return metaData_.GetOpcode(); @@ -656,6 +743,7 @@ private: class BytecodeIterator { public: + static constexpr int INVALID_INDEX = -1; BytecodeIterator() = default; BytecodeIterator(BytecodeCircuitBuilder *builder, uint32_t start, uint32_t end) @@ -664,8 +752,8 @@ public: uint32_t start, uint32_t end) { builder_ = builder; - start_ = start; - end_ = end; + start_ = static_cast(start); + end_ = static_cast(end); } BytecodeIterator& operator++() @@ -685,13 +773,12 @@ public: void Goto(uint32_t i) { - index_ = i; + index_ = static_cast(i); } void GotoStart() { index_ = start_; - ASSERT(InRange()); } void GotoEnd() @@ -700,6 +787,11 @@ public: ASSERT(InRange()); } + bool IsInRange(int idx) const + { + return (idx <= end_) && (idx >= start_); + } + bool InRange() const { return (index_ <= end_) && (index_ >= start_); @@ -712,7 +804,7 @@ public: uint32_t Index() const { - return index_; + return static_cast(index_); } const BytecodeInfo &GetBytecodeInfo() const; @@ -721,9 +813,9 @@ public: private: BytecodeCircuitBuilder *builder_ {nullptr}; - uint32_t start_ {0}; - uint32_t end_ {0}; - uint32_t index_{ INVALID_INDEX }; + int32_t start_ {0}; + int32_t end_ {0}; + int32_t index_{ INVALID_INDEX }; }; class BytecodeCallArgc { diff --git a/ecmascript/compiler/call_signature.cpp b/ecmascript/compiler/call_signature.cpp index fd21f5f9a63213ad198d5863d165c5240dd0d8bd..5cb35158d0feacfbab5c7a0bc2e05373a29abc2a 100644 --- a/ecmascript/compiler/call_signature.cpp +++ b/ecmascript/compiler/call_signature.cpp @@ -14,6 +14,7 @@ */ #include "ecmascript/compiler/call_signature.h" +#include "ecmascript/compiler/variable_type.h" #if defined(__clang__) #pragma clang diagnostic push @@ -644,13 +645,17 @@ DEF_CALL_SIGNATURE(ConstructorCheck) DEF_CALL_SIGNATURE(CreateEmptyArray) { - // 1 : 1 input parameters - CallSignature signature("CreateEmptyArray", 0, 1, + // 5 : 5 input parameters + CallSignature signature("CreateEmptyArray", 0, 5, ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); *callSign = signature; - // 1 : 1 input parameters - std::array params = { + // 5 : 5 input parameters + std::array params = { VariableType::NATIVE_POINTER(), // glue + VariableType::JS_ANY(), // jsFunc + VariableType::JS_ANY(), // pc + VariableType::INT32(), // profileTypeInfo + VariableType::INT32(), // slotId }; callSign->SetParameters(params.data()); callSign->SetCallConv(CallSignature::CallConv::CCallConv); @@ -658,15 +663,18 @@ DEF_CALL_SIGNATURE(CreateEmptyArray) DEF_CALL_SIGNATURE(CreateArrayWithBuffer) { - // 3 : 3 input parameters - CallSignature signature("CreateArrayWithBuffer", 0, 3, + // 6 : 6 input parameters + CallSignature signature("CreateArrayWithBuffer", 0, 6, ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); *callSign = signature; - // 3 : 3 input parameters - std::array params = { + // 6 : 6 input parameters + std::array params = { VariableType::NATIVE_POINTER(), // glue VariableType::INT32(), // index VariableType::JS_ANY(), // jsFunc + VariableType::JS_ANY(), // pc + VariableType::INT32(), // profileTypeInfo + VariableType::INT32(), // slotId }; callSign->SetParameters(params.data()); callSign->SetCallConv(CallSignature::CallConv::CCallConv); @@ -1120,6 +1128,27 @@ DEF_CALL_SIGNATURE(ResumeUncaughtFrameAndReturn) callSign->SetCallConv(CallSignature::CallConv::GHCCallConv); } +DEF_CALL_SIGNATURE(ResumeRspAndRollback) +{ + // 8 : 8 input parameters + CallSignature resumeRspAndRollback("ResumeRspAndRollback", 0, 8, + ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); + *callSign = resumeRspAndRollback; + std::array params = { // 8 : 8 input parameters + VariableType::NATIVE_POINTER(), + VariableType::NATIVE_POINTER(), + VariableType::NATIVE_POINTER(), + VariableType::JS_POINTER(), + VariableType::JS_POINTER(), + VariableType::JS_ANY(), + VariableType::INT32(), + VariableType::NATIVE_POINTER(), + }; + callSign->SetParameters(params.data()); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); + callSign->SetCallConv(CallSignature::CallConv::GHCCallConv); +} + DEF_CALL_SIGNATURE(StringsAreEquals) { // 2 : 2 input parameters @@ -1135,6 +1164,36 @@ DEF_CALL_SIGNATURE(StringsAreEquals) callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } +DEF_CALL_SIGNATURE(JSHClassFindProtoTransitions) +{ + // 3 : 3 input parameters + CallSignature bigIntSameValueZero("JSHClassFindProtoTransitions", 0, 3, + ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); + *callSign = bigIntSameValueZero; + std::array params = { // 3 : 3 input parameters + VariableType::JS_POINTER(), + VariableType::JS_POINTER(), + VariableType::JS_POINTER(), + }; + callSign->SetParameters(params.data()); + callSign->SetGCLeafFunction(true); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); +} + +DEF_CALL_SIGNATURE(NumberHelperStringToDouble) +{ + // 1 : 1 input parameters + CallSignature bigIntSameValueZero("NumberHelperStringToDouble", 0, 1, + ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); + *callSign = bigIntSameValueZero; + std::array params = { // 1 : 1 input parameters + VariableType::JS_POINTER(), + }; + callSign->SetParameters(params.data()); + callSign->SetGCLeafFunction(true); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); +} + DEF_CALL_SIGNATURE(BigIntEquals) { // 2 : 2 input parameters @@ -1150,6 +1209,69 @@ DEF_CALL_SIGNATURE(BigIntEquals) callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } +DEF_CALL_SIGNATURE(LocaleCompareNoGc) +{ + // 4 : 4 input parameters + CallSignature localeCompareNoGc("LocaleCompareNoGc", 0, 4, + ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); + *callSign = localeCompareNoGc; + std::array params = { // 4 : 4 input parameters + VariableType::NATIVE_POINTER(), + VariableType::JS_POINTER(), + VariableType::JS_POINTER(), + VariableType::JS_POINTER(), + }; + callSign->SetParameters(params.data()); + callSign->SetGCLeafFunction(true); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); +} + +DEF_CALL_SIGNATURE(BigIntSameValueZero) +{ + // 1 : 1 input parameters + CallSignature bigIntSameValueZero("BigIntSameValueZero", 0, 2, + ArgumentsOrder::DEFAULT_ORDER, VariableType::BOOL()); + *callSign = bigIntSameValueZero; + std::array params = { // 2 : 2 input parameters + VariableType::JS_POINTER(), + VariableType::JS_POINTER(), + }; + callSign->SetParameters(params.data()); + callSign->SetGCLeafFunction(true); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); +} + +DEF_CALL_SIGNATURE(StringGetStart) +{ + CallSignature stringGetStart("StringGetStart", 0, 4, ArgumentsOrder::DEFAULT_ORDER, VariableType::INT32()); + *callSign = stringGetStart; + std::array params = { // 4 : four input parameters + VariableType::BOOL(), + VariableType::JS_POINTER(), + VariableType::INT32(), + VariableType::INT32(), + }; + callSign->SetParameters(params.data()); + callSign->SetGCLeafFunction(true); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); +} + +DEF_CALL_SIGNATURE(StringGetEnd) +{ + CallSignature stringGetEnd("StringGetEnd", 0, 5, ArgumentsOrder::DEFAULT_ORDER, VariableType::INT32()); + *callSign = stringGetEnd; + std::array params = { // 5 : five input parameters + VariableType::BOOL(), + VariableType::JS_POINTER(), + VariableType::INT32(), + VariableType::INT32(), + VariableType::INT32(), + }; + callSign->SetParameters(params.data()); + callSign->SetGCLeafFunction(true); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); +} + #define PUSH_CALL_ARGS_AND_DISPATCH_SIGNATURE_COMMON(name) \ /* 1 : 1 input parameters */ \ CallSignature signature(#name, 0, 1, \ @@ -1361,82 +1483,73 @@ DEF_CALL_SIGNATURE(CallOptimized) callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } -DEF_CALL_SIGNATURE(DebugPrint) +DEF_CALL_SIGNATURE(Dump) { - // 1 : 1 input parameters - CallSignature debugPrint("DebugPrint", 0, 1, + constexpr size_t N_INPUT_PARAMETERS = 1; + CallSignature dump("Dump", 0, N_INPUT_PARAMETERS, ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); - *callSign = debugPrint; - // 1 : 1 input parameters - std::array params = { - VariableType::INT32(), + *callSign = dump; + std::array params = { + VariableType::JS_POINTER() // Tagged value of the object to be dumped }; - callSign->SetVariadicArgs(true); callSign->SetParameters(params.data()); callSign->SetGCLeafFunction(true); callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } -DEF_CALL_SIGNATURE(DebugPrintInstruction) +DEF_CALL_SIGNATURE(DebugDump) { - // 2 : 2 input parameters - CallSignature debugPrintInstruction("DebugPrintInstruction", 0, 2, + constexpr size_t N_INPUT_PARAMETERS = 1; + CallSignature debugDump("DebugDump", 0, N_INPUT_PARAMETERS, ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); - *callSign = debugPrintInstruction; - // 2 : 2 input parameters - std::array params = { - VariableType::NATIVE_POINTER(), - VariableType::NATIVE_POINTER(), + *callSign = debugDump; + std::array params = { + VariableType::JS_POINTER() // Tagged value of the object to be dumped }; - callSign->SetVariadicArgs(true); callSign->SetParameters(params.data()); callSign->SetGCLeafFunction(true); callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } -DEF_CALL_SIGNATURE(Comment) +DEF_CALL_SIGNATURE(DumpWithHint) { - // 1 : 1 input parameters - CallSignature comment("Comment", 0, 1, + constexpr size_t N_INPUT_PARAMETERS = 2; + CallSignature dumpWithHint("DumpWithHint", 0, N_INPUT_PARAMETERS, ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); - *callSign = comment; - // 1 : 1 input parameters - std::array params = { - VariableType::NATIVE_POINTER(), + *callSign = dumpWithHint; + std::array params = { + VariableType::NATIVE_POINTER(), // String created via CircuitBuilder::StringPtr() + VariableType::JS_POINTER() // Tagged value of the object to be dumped }; callSign->SetParameters(params.data()); callSign->SetGCLeafFunction(true); callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } -DEF_CALL_SIGNATURE(ProfileCall) +DEF_CALL_SIGNATURE(DebugDumpWithHint) { - // 2 : 2 input parameters - CallSignature callProfilerInstruction("ProfileCall", 0, 2, ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); - *callSign = callProfilerInstruction; - // 2 : 2 input parameters - std::array params = { - VariableType::NATIVE_POINTER(), - VariableType::JS_ANY(), + constexpr size_t N_INPUT_PARAMETERS = 2; + CallSignature debugDumpWithHint("DebugDumpWithHint", 0, N_INPUT_PARAMETERS, + ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); + *callSign = debugDumpWithHint; + std::array params = { + VariableType::NATIVE_POINTER(), // String created via CircuitBuilder::StringPtr() + VariableType::JS_POINTER() // Tagged value of the object to be dumped }; - callSign->SetVariadicArgs(true); callSign->SetParameters(params.data()); callSign->SetGCLeafFunction(true); callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } -DEF_CALL_SIGNATURE(ProfileDefineClass) +DEF_CALL_SIGNATURE(DebugPrint) { - // 4: 4 input parameters - CallSignature defineProfInstruction( - "ProfileDefineClass", 0, 4, ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); - *callSign = defineProfInstruction; - // 4: 4 input parameters - std::array params = { // 4 : 4 input parameters - VariableType::NATIVE_POINTER(), - VariableType::JS_ANY(), + // 1 : 1 input parameters + CallSignature debugPrint("DebugPrint", 0, 1, + ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); + *callSign = debugPrint; + // 1 : 1 input parameters + std::array params = { VariableType::INT32(), - VariableType::JS_ANY(), }; callSign->SetVariadicArgs(true); callSign->SetParameters(params.data()); @@ -1444,19 +1557,15 @@ DEF_CALL_SIGNATURE(ProfileDefineClass) callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } -DEF_CALL_SIGNATURE(ProfileCreateObject) +DEF_CALL_SIGNATURE(DebugPrintCustom) { - // 5: 5 input parameters - CallSignature defineProfInstruction( - "ProfileCreateObject", 0, 5, ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); - *callSign = defineProfInstruction; - // 4: 4 input parameters - std::array params = { // 5 : 5 input parameters - VariableType::NATIVE_POINTER(), - VariableType::JS_ANY(), - VariableType::INT32(), - VariableType::JS_ANY(), - VariableType::JS_ANY(), + // 1 : 1 input parameters + CallSignature debugPrintCustom("DebugPrintCustom", 0, 1, + ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); + *callSign = debugPrintCustom; + // 1 : 1 input parameters + std::array params = { + VariableType::NATIVE_POINTER() // Format string created via CircuitBuilder::StringPtr() }; callSign->SetVariadicArgs(true); callSign->SetParameters(params.data()); @@ -1464,17 +1573,16 @@ DEF_CALL_SIGNATURE(ProfileCreateObject) callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } -DEF_CALL_SIGNATURE(ProfileOpType) +DEF_CALL_SIGNATURE(DebugPrintInstruction) { - // 4: 4 input parameters - CallSignature typeProfInstruction("ProfileOpType", 0, 4, ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); - *callSign = typeProfInstruction; - // 4: 4 input parameters - std::array params = { // 4 : 4 input parameters + // 2 : 2 input parameters + CallSignature debugPrintInstruction("DebugPrintInstruction", 0, 2, + ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); + *callSign = debugPrintInstruction; + // 2 : 2 input parameters + std::array params = { + VariableType::NATIVE_POINTER(), VariableType::NATIVE_POINTER(), - VariableType::JS_ANY(), - VariableType::INT32(), - VariableType::INT32(), }; callSign->SetVariadicArgs(true); callSign->SetParameters(params.data()); @@ -1482,20 +1590,16 @@ DEF_CALL_SIGNATURE(ProfileOpType) callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } -DEF_CALL_SIGNATURE(ProfileObjLayout) +DEF_CALL_SIGNATURE(Comment) { - // 4: 4 input parameters - CallSignature layoutProfInstruction("ProfileObjLayout", 0, 5, ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); - *callSign = layoutProfInstruction; - // 4: 4 input parameters - std::array params = { // 5 : 5 input parameters + // 1 : 1 input parameters + CallSignature comment("Comment", 0, 1, + ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); + *callSign = comment; + // 1 : 1 input parameters + std::array params = { VariableType::NATIVE_POINTER(), - VariableType::JS_ANY(), - VariableType::INT32(), - VariableType::JS_ANY(), - VariableType::INT32(), }; - callSign->SetVariadicArgs(true); callSign->SetParameters(params.data()); callSign->SetGCLeafFunction(true); callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); @@ -1517,6 +1621,22 @@ DEF_CALL_SIGNATURE(FatalPrint) callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } +DEF_CALL_SIGNATURE(FatalPrintCustom) +{ + // 1 : 1 input parameters + CallSignature fatalPrintCustom("FatalPrintCustom", 0, 1, + ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); + *callSign = fatalPrintCustom; + // 1 : 1 input parameters + std::array params = { + VariableType::NATIVE_POINTER() // Format string created via CircuitBuilder::StringPtr() + }; + callSign->SetVariadicArgs(true); + callSign->SetParameters(params.data()); + callSign->SetGCLeafFunction(true); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); +} + DEF_CALL_SIGNATURE(GetActualArgvNoGC) { CallSignature index("GetActualArgvNoGC", 0, 1, ArgumentsOrder::DEFAULT_ORDER, VariableType::NATIVE_POINTER()); @@ -1662,9 +1782,24 @@ DEF_CALL_SIGNATURE(FindElementWithCache) } DEF_CALL_SIGNATURE(DoubleToInt) +{ + // 2 : 2 input parameters + CallSignature index("DoubleToInt", 0, 2, ArgumentsOrder::DEFAULT_ORDER, VariableType::INT32()); + *callSign = index; + // 2 : 2 input parameters + std::array params = { + VariableType::FLOAT64(), + VariableType::NATIVE_POINTER(), + }; + callSign->SetParameters(params.data()); + callSign->SetGCLeafFunction(true); + callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); +} + +DEF_CALL_SIGNATURE(DoubleToLength) { // 1 : 1 input parameters - CallSignature index("DoubleToInt", 0, 1, ArgumentsOrder::DEFAULT_ORDER, VariableType::INT32()); + CallSignature index("DoubleToLength", 0, 1, ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); *callSign = index; // 1 : 1 input parameters std::array params = { @@ -1935,8 +2070,8 @@ DEF_CALL_SIGNATURE(EndCallTimer) { CallSignature index("EndCallTimer", 0, 2, ArgumentsOrder::DEFAULT_ORDER, VariableType::VOID()); *callSign = index; - // 3 : 3 input parameters - std::array params = { + // 2 : 2 input parameters + std::array params = { VariableType::NATIVE_POINTER(), VariableType::JS_ANY() }; @@ -1944,4 +2079,127 @@ DEF_CALL_SIGNATURE(EndCallTimer) callSign->SetGCLeafFunction(true); callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC); } + +DEF_CALL_SIGNATURE(GetSingleCharCodeByIndex) +{ + // 3 : 3 input parameters + CallSignature signature("GetSingleCharCodeByIndex", 0, 3, + ArgumentsOrder::DEFAULT_ORDER, VariableType::INT32()); + *callSign = signature; + // 3 : 3 input parameters + std::array params = { + VariableType::NATIVE_POINTER(), // glue + VariableType::JS_ANY(), // ecmaString + VariableType::INT32(), // index + }; + callSign->SetParameters(params.data()); + callSign->SetCallConv(CallSignature::CallConv::CCallConv); +} + +DEF_CALL_SIGNATURE(CreateStringBySingleCharCode) +{ + // 2 : 2 input parameters + CallSignature signature("CreateStringByCharCode", 0, 2, + ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); + *callSign = signature; + // 2 : 2 input parameters + std::array params = { + VariableType::NATIVE_POINTER(), // glue + VariableType::INT32(), // charcode + }; + callSign->SetParameters(params.data()); + callSign->SetCallConv(CallSignature::CallConv::CCallConv); +} + +DEF_CALL_SIGNATURE(Getpropiterator) +{ + // 2 : 2 input parameters + CallSignature signature("Getpropiterator", 0, 2, + ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); + *callSign = signature; + // 2 : 2 input parameters + std::array params = { + VariableType::NATIVE_POINTER(), // glue + VariableType::JS_ANY(), // object + }; + callSign->SetParameters(params.data()); + callSign->SetCallConv(CallSignature::CallConv::CCallConv); +} + +DEF_CALL_SIGNATURE(Getnextpropname) +{ + // 2 : 2 input parameters + CallSignature signature("Getnextpropname", 0, 2, + ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); + *callSign = signature; + // 2 : 2 input parameters + std::array params = { + VariableType::NATIVE_POINTER(), // glue + VariableType::JS_ANY(), // iter + }; + callSign->SetParameters(params.data()); + callSign->SetCallConv(CallSignature::CallConv::CCallConv); +} + +DEF_CALL_SIGNATURE(CreateJSSetIterator) +{ + // 2 : 2 input parameters + CallSignature signature("CreateJSSetIterator", 0, 2, + ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); + *callSign = signature; + // 2 : 2 input parameters + std::array params = { + VariableType::NATIVE_POINTER(), // glue + VariableType::JS_ANY(), // obj + }; + callSign->SetParameters(params.data()); + callSign->SetCallConv(CallSignature::CallConv::CCallConv); +} + +DEF_CALL_SIGNATURE(CreateJSMapIterator) +{ + // 2 : 2 input parameters + CallSignature signature("CreateJSMapIterator", 0, 2, + ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); + *callSign = signature; + // 2 : 2 input parameters + std::array params = { + VariableType::NATIVE_POINTER(), // glue + VariableType::JS_ANY(), // obj + }; + callSign->SetParameters(params.data()); + callSign->SetCallConv(CallSignature::CallConv::CCallConv); +} + +DEF_CALL_SIGNATURE(FastStringEqual) +{ + // 3 : 3 input parameters + CallSignature signature("FastStringEqual", 0, 3, + ArgumentsOrder::DEFAULT_ORDER, VariableType::BOOL()); + *callSign = signature; + // 3 : 3 input parameters + std::array params = { + VariableType::NATIVE_POINTER(), // glue + VariableType::JS_ANY(), // ecmaString1 + VariableType::JS_ANY(), // ecmaString2 + }; + callSign->SetParameters(params.data()); + callSign->SetCallConv(CallSignature::CallConv::CCallConv); +} + +DEF_CALL_SIGNATURE(FastStringAdd) +{ + // 3 : 3 input parameters + CallSignature signature("FastStringAdd", 0, 3, + ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY()); + *callSign = signature; + // 3 : 3 input parameters + std::array params = { + VariableType::NATIVE_POINTER(), // glue + VariableType::JS_ANY(), // ecmaString1 + VariableType::JS_ANY(), // ecmaString2 + }; + callSign->SetParameters(params.data()); + callSign->SetCallConv(CallSignature::CallConv::CCallConv); +} } // namespace panda::ecmascript::kungfu diff --git a/ecmascript/compiler/call_signature.h b/ecmascript/compiler/call_signature.h index a0c47c0e7da37a79d22306dda5b5ebcc61de1638..3add28c7a83e26a6bf6dc887321f573a9ef164ba 100644 --- a/ecmascript/compiler/call_signature.h +++ b/ecmascript/compiler/call_signature.h @@ -21,7 +21,6 @@ #include #include "ecmascript/compiler/variable_type.h" -#include "ecmascript/compiler/test_stubs_signature.h" #include "libpandabase/macros.h" #include "libpandabase/utils/bit_field.h" @@ -410,20 +409,24 @@ private: V(ResumeRspAndReturn) \ V(ResumeCaughtFrameAndDispatch) \ V(ResumeUncaughtFrameAndReturn) \ + V(ResumeRspAndRollback) \ V(StringsAreEquals) \ V(BigIntEquals) \ + V(BigIntSameValueZero) \ + V(Dump) \ + V(DebugDump) \ + V(DumpWithHint) \ + V(DebugDumpWithHint) \ V(DebugPrint) \ + V(DebugPrintCustom) \ V(DebugPrintInstruction) \ V(Comment) \ - V(ProfileCall) \ - V(ProfileDefineClass) \ - V(ProfileCreateObject) \ - V(ProfileOpType) \ - V(ProfileObjLayout) \ V(FatalPrint) \ + V(FatalPrintCustom) \ V(GetActualArgvNoGC) \ V(InsertOldToNewRSet) \ V(DoubleToInt) \ + V(DoubleToLength) \ V(FloatMod) \ V(FloatSqrt) \ V(FloatCos) \ @@ -458,7 +461,19 @@ private: V(CallReturnWithArgv) \ V(StartCallTimer) \ V(EndCallTimer) \ - TEST_STUB_SIGNATRUE_LIST(V) + V(GetSingleCharCodeByIndex) \ + V(CreateStringBySingleCharCode) \ + V(FastStringEqual) \ + V(FastStringAdd) \ + V(Getpropiterator) \ + V(Getnextpropname) \ + V(CreateJSSetIterator) \ + V(CreateJSMapIterator) \ + V(JSHClassFindProtoTransitions) \ + V(NumberHelperStringToDouble) \ + V(LocaleCompareNoGc) \ + V(StringGetStart) \ + V(StringGetEnd) #define DECL_CALL_SIGNATURE(name) \ class name##CallSignature final { \ diff --git a/ecmascript/compiler/circuit.cpp b/ecmascript/compiler/circuit.cpp index 9baf7aa906e4d813f52da1dcf4c7c633c620eb6d..21335a121547b44878d0bafa43b3d4af54eeb359 100644 --- a/ecmascript/compiler/circuit.cpp +++ b/ecmascript/compiler/circuit.cpp @@ -21,10 +21,12 @@ #include "ecmascript/platform/map.h" namespace panda::ecmascript::kungfu { -Circuit::Circuit(NativeAreaAllocator* allocator, DebugInfo* debugInfo, const char* funcName, bool isArch64) +Circuit::Circuit(NativeAreaAllocator* allocator, DebugInfo* debugInfo, const char* funcName, + bool isArch64, panda::ecmascript::FrameType type) : circuitSize_(0), gateCount_(0), time_(1), + frameType_(type), isArch64_(isArch64), chunk_(allocator), root_(Circuit::NullGate()), @@ -141,6 +143,12 @@ GateRef Circuit::NewGate(const GateMetaData *meta, MachineType machineType, return NewGate(meta, machineType, args.size(), args.begin(), type, comment); } +GateRef Circuit::NewGate(const GateMetaData *meta, MachineType machineType, + const std::vector& inList, GateType type, const char* comment) +{ + return NewGate(meta, machineType, inList.size(), inList.data(), type, comment); +} + GateRef Circuit::NewGate(const GateMetaData *meta, MachineType machineType, GateType type, const char* comment) { return NewGate(meta, machineType, {}, type, comment); @@ -160,14 +168,7 @@ void Circuit::PrintAllGatesWithBytecode() const std::vector gateList; GetAllGates(gateList); for (const auto &gate : gateList) { - if (GetOpCode(gate) == OpCode::JS_BYTECODE) { - const Gate *gatePtr = LoadGatePtrConst(gate); - auto opcode = gatePtr->GetJSBytecodeMetaData()->GetByteCodeOpcode(); - std::string bytecodeStr = GetEcmaOpcodeStr(opcode); - LoadGatePtrConst(gate)->PrintByteCode(bytecodeStr); - } else { - LoadGatePtrConst(gate)->Print(); - } + LoadGatePtrConst(gate)->PrintWithBytecode(); } } @@ -347,8 +348,11 @@ void Circuit::DeleteIn(GateRef gate, size_t idx) void Circuit::DeleteGate(GateRef gate) { - LoadGatePtr(gate)->DeleteGate(); - LoadGatePtr(gate)->SetMetaData(Nop()); + // constant in constant cache, dont delete it. + if (GetOpCode(gate) != OpCode::CONSTANT) { + LoadGatePtr(gate)->DeleteGate(); + LoadGatePtr(gate)->SetMetaData(Nop()); + } } void Circuit::DecreaseIn(GateRef gate, size_t idx) @@ -448,7 +452,7 @@ GateRef Circuit::GetConstantGate(MachineType machineType, uint64_t value, { auto search = constantCache_.find({machineType, value, type}); if (search != constantCache_.end()) { - return constantCache_.at({machineType, value, type}); + return search->second; } auto gate = NewGate(metaBuilder_.Constant(value), machineType, type); constantCache_[{machineType, value, type}] = gate; @@ -463,7 +467,7 @@ void Circuit::ClearConstantCache(MachineType machineType, uint64_t value, GateTy } } -GateRef Circuit::GetConstantStringGate(MachineType machineType, const std::string &str, +GateRef Circuit::GetConstantStringGate(MachineType machineType, std::string_view str, GateType type) { auto gate = NewGate(metaBuilder_.ConstString(str), machineType, type); diff --git a/ecmascript/compiler/circuit.h b/ecmascript/compiler/circuit.h index 739fc6e3ba50b4d4cb533cd9f49051d4c99bd601..45457afc3789f8217ad99dc5a9c206d6cc0659aa 100644 --- a/ecmascript/compiler/circuit.h +++ b/ecmascript/compiler/circuit.h @@ -23,7 +23,10 @@ #include #include "ecmascript/compiler/gate.h" -#include "ecmascript/compiler/gate_meta_data.h" +#include "ecmascript/compiler/share_gate_meta_data.h" +#include "ecmascript/compiler/lcr_gate_meta_data.h" +#include "ecmascript/compiler/mcr_gate_meta_data.h" +#include "ecmascript/compiler/hcr_gate_meta_data.h" #include "ecmascript/compiler/gate_meta_data_builder.h" #include "ecmascript/frames.h" @@ -35,13 +38,13 @@ class DebugInfo; enum class VisitState : uint8_t { UNVISITED, PENDING, - VISITED + VISITED, }; class Circuit { // note: calling NewGate could make all saved Gate* invalid public: explicit Circuit(NativeAreaAllocator* allocator, DebugInfo* dInfo = nullptr, const char* funcName = nullptr, - bool isArch64 = true); + bool isArch64 = true, FrameType type = FrameType::OPTIMIZED_FRAME); ~Circuit(); NO_COPY_SEMANTIC(Circuit); NO_MOVE_SEMANTIC(Circuit); @@ -52,6 +55,8 @@ public: const std::initializer_list& args, GateType type, const char* comment = nullptr); GateRef NewGate(const GateMetaData *meta, MachineType machineType, size_t numIns, const GateRef inList[], GateType type, const char* comment = nullptr); + GateRef NewGate(const GateMetaData *meta, MachineType machineType, + const std::vector& inList, GateType type, const char* comment = nullptr); void PrintAllGates() const; void PrintAllGatesWithBytecode() const; void GetAllGates(std::vector& gates) const; @@ -61,7 +66,7 @@ public: void SetFrameType(panda::ecmascript::FrameType type); GateRef GetConstantGate(MachineType machineType, uint64_t value, GateType type); void ClearConstantCache(MachineType machineType, uint64_t value, GateType type); - GateRef GetConstantStringGate(MachineType machineType, const std::string &str, GateType type); + GateRef GetConstantStringGate(MachineType machineType, std::string_view str, GateType type); GateRef NewArg(MachineType machineType, size_t index, GateType type, GateRef argRoot); GateRef GetInitialEnvGate(GateRef jsFunc); size_t GetGateCount() const; @@ -132,6 +137,14 @@ public: GATE_META_DATA_LIST_WITH_BOOL(DECLARE_GATE_META) #undef DECLARE_GATE_META +#define DECLARE_GATE_META_WITH_BOOL_VALUE_IN(NAME, OP, R, S, D, V) \ + const GateMetaData* NAME(size_t value, bool flag) \ + { \ + return metaBuilder_.NAME(value, flag); \ + } + GATE_META_DATA_LIST_WITH_BOOL_VALUE_IN(DECLARE_GATE_META_WITH_BOOL_VALUE_IN) +#undef DECLARE_GATE_META_WITH_BOOL_VALUE_IN + #define DECLARE_GATE_META(NAME, OP, R, S, D, V) \ const GateMetaData* NAME(uint64_t value, uint64_t pcOffset) \ { \ @@ -140,6 +153,14 @@ public: GATE_META_DATA_LIST_WITH_PC_OFFSET(DECLARE_GATE_META) #undef DECLARE_GATE_META +#define DECLARE_GATE_META_FOR_CALL(NAME, OP, R, S, D, V) \ + const GateMetaData* NAME(uint64_t value, uint64_t pcOffset, bool noGC) \ + { \ + return metaBuilder_.NAME(value, pcOffset, noGC); \ + } + GATE_META_DATA_LIST_FOR_CALL(DECLARE_GATE_META_FOR_CALL) +#undef DECLARE_GATE_META_FOR_CALL + #define DECLARE_GATE_META(NAME, OP, R, S, D, V) \ const GateMetaData* NAME(uint64_t pcOffset) const \ { \ @@ -162,11 +183,16 @@ public: return metaBuilder_.JSBytecode(valuesIn, opcode, pcOffset, flags); } - const GateMetaData* TypedBinaryOp(uint64_t value, TypedBinOp binOp, PGOSampleType type) + const GateMetaData* TypedBinaryOp(uint64_t value, TypedBinOp binOp, PGOTypeRef type) { return metaBuilder_.TypedBinaryOp(value, binOp, type); } + const GateMetaData* TypedCallTargetCheckOp(uint32_t numIns, uint64_t value, TypedCallTargetCheckOp checkOp) + { + return metaBuilder_.TypedCallTargetCheckOp(numIns, value, checkOp); + } + GateRef DeadGate() { if (dead_ == NullGate()) { @@ -183,7 +209,7 @@ public: bool IsOptimizedJSFunctionFrame() const { - return frameType_ == panda::ecmascript::FrameType::OPTIMIZED_JS_FUNCTION_FRAME + return frameType_ == FrameType::OPTIMIZED_JS_FUNCTION_FRAME || frameType_ == FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME; } @@ -231,6 +257,15 @@ private: { return LoadGatePtrConst(gate)->GetMetaData(); } +#ifndef NDEBUG + GateRef GetGateRefById(size_t id) const + { + if (id > allGates_.size()) { + return NullGate(); + } + return allGates_[id]; + } +#endif private: void* space_ {nullptr}; @@ -240,7 +275,7 @@ private: std::map, GateRef> constantCache_ {}; std::map, GateRef> constantDataCache_ {}; std::map initialEnvCache_ {}; - panda::ecmascript::FrameType frameType_ {panda::ecmascript::FrameType::OPTIMIZED_FRAME}; + panda::ecmascript::FrameType frameType_ {FrameType::OPTIMIZED_FRAME}; bool isArch64_ { false }; Chunk chunk_; diff --git a/ecmascript/compiler/circuit_builder-inl.h b/ecmascript/compiler/circuit_builder-inl.h index 2e03957d2e4c271b73c3e58c22477e484b14beb2..2f822e039a9d72802185560394d773b706e1230a 100644 --- a/ecmascript/compiler/circuit_builder-inl.h +++ b/ecmascript/compiler/circuit_builder-inl.h @@ -1,1231 +1,215 @@ -/* - * Copyright (c) 2021 Huawei Device Co., Ltd. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef ECMASCRIPT_COMPILER_CIRCUIT_BUILDER_INL_H -#define ECMASCRIPT_COMPILER_CIRCUIT_BUILDER_INL_H - -#include "ecmascript/compiler/circuit_builder.h" -#include "ecmascript/mem/region.h" -#include "ecmascript/method.h" - -namespace panda::ecmascript::kungfu { -// constant -GateRef CircuitBuilder::True() -{ - return TruncInt32ToInt1(Int32(1)); -} - -GateRef CircuitBuilder::False() -{ - return TruncInt32ToInt1(Int32(0)); -} - -GateRef CircuitBuilder::Undefined() -{ - return UndefineConstant(); -} - -GateRef CircuitBuilder::Hole() -{ - return HoleConstant(); -} - -GateRef CircuitBuilder::Equal(GateRef x, GateRef y) -{ - auto xType = acc_.GetMachineType(x); - switch (xType) { - case ARCH: - case FLEX: - case I1: - case I8: - case I16: - case I32: - case I64: - return BinaryCmp(circuit_->Icmp(static_cast(ICmpCondition::EQ)), x, y); - case F32: - case F64: - return BinaryCmp(circuit_->Fcmp(static_cast(FCmpCondition::OEQ)), x, y); - default: - LOG_ECMA(FATAL) << "this branch is unreachable"; - UNREACHABLE(); - } -} - -GateRef CircuitBuilder::NotEqual(GateRef x, GateRef y) -{ - auto xType = acc_.GetMachineType(x); - switch (xType) { - case ARCH: - case FLEX: - case I1: - case I8: - case I16: - case I32: - case I64: - return BinaryCmp(circuit_->Icmp(static_cast(ICmpCondition::NE)), x, y); - case F32: - case F64: - return BinaryCmp(circuit_->Fcmp(static_cast(FCmpCondition::ONE)), x, y); - default: - LOG_ECMA(FATAL) << "this branch is unreachable"; - UNREACHABLE(); - } -} - -// memory -GateRef CircuitBuilder::Load(VariableType type, GateRef base, GateRef offset) -{ - auto label = GetCurrentLabel(); - auto depend = label->GetDepend(); - GateRef val = PtrAdd(base, offset); - GateRef result = GetCircuit()->NewGate(GetCircuit()->Load(), type.GetMachineType(), - { depend, val }, type.GetGateType()); - label->SetDepend(result); - return result; -} - -GateRef CircuitBuilder::Load(VariableType type, GateRef base, GateRef offset, GateRef depend) -{ - GateRef val = PtrAdd(base, offset); - GateRef result = GetCircuit()->NewGate(GetCircuit()->Load(), type.GetMachineType(), - { depend, val }, type.GetGateType()); - return result; -} - -// Js World -// cast operation -GateRef CircuitBuilder::GetInt64OfTInt(GateRef x) -{ - GateRef tagged = ChangeTaggedPointerToInt64(x); - return Int64And(tagged, Int64(~JSTaggedValue::TAG_MARK)); -} - -GateRef CircuitBuilder::GetInt32OfTInt(GateRef x) -{ - GateRef tagged = ChangeTaggedPointerToInt64(x); - return TruncInt64ToInt32(tagged); -} - -GateRef CircuitBuilder::TaggedCastToIntPtr(GateRef x) -{ - ASSERT(cmpCfg_ != nullptr); - return cmpCfg_->Is32Bit() ? GetInt32OfTInt(x) : GetInt64OfTInt(x); -} - -GateRef CircuitBuilder::GetDoubleOfTDouble(GateRef x) -{ - GateRef tagged = ChangeTaggedPointerToInt64(x); - GateRef val = Int64Sub(tagged, Int64(JSTaggedValue::DOUBLE_ENCODE_OFFSET)); - return CastInt64ToFloat64(val); -} - -GateRef CircuitBuilder::GetBooleanOfTBoolean(GateRef x) -{ - GateRef tagged = ChangeTaggedPointerToInt64(x); - return TruncInt64ToInt1(tagged); -} - -GateRef CircuitBuilder::GetDoubleOfTNumber(GateRef x) -{ - Label subentry(env_); - SubCfgEntry(&subentry); - Label isInt(env_); - Label isDouble(env_); - Label exit(env_); - DEFVAlUE(result, env_, VariableType::FLOAT64(), Double(0)); - Branch(TaggedIsInt(x), &isInt, &isDouble); - Bind(&isInt); - { - result = ChangeInt32ToFloat64(GetInt32OfTInt(x)); - Jump(&exit); - } - Bind(&isDouble); - { - result = GetDoubleOfTDouble(x); - Jump(&exit); - } - Bind(&exit); - GateRef ret = *result; - SubCfgExit(); - return ret; -} - -GateRef CircuitBuilder::DoubleToInt(GateRef x, Label *exit) -{ - Label overflow(env_); - - GateRef xInt = ChangeFloat64ToInt32(x); - DEFVAlUE(result, env_, VariableType::INT32(), xInt); - - GateRef xInt64 = CastDoubleToInt64(x); - // exp = (u64 & DOUBLE_EXPONENT_MASK) >> DOUBLE_SIGNIFICAND_SIZE - DOUBLE_EXPONENT_BIAS - GateRef exp = Int64And(xInt64, Int64(base::DOUBLE_EXPONENT_MASK)); - exp = TruncInt64ToInt32(Int64LSR(exp, Int64(base::DOUBLE_SIGNIFICAND_SIZE))); - exp = Int32Sub(exp, Int32(base::DOUBLE_EXPONENT_BIAS)); - GateRef bits = Int32(base::INT32_BITS - 1); - // exp < 32 - 1 - Branch(Int32LessThan(exp, bits), exit, &overflow); - - Bind(&overflow); - { - result = CallNGCRuntime(acc_.GetGlueFromArgList(), RTSTUB_ID(DoubleToInt), - Circuit::NullGate(), { x }, Circuit::NullGate()); - Jump(exit); - } - Bind(exit); - auto ret = *result; - return ret; -} - -GateRef CircuitBuilder::Int8Equal(GateRef x, GateRef y) -{ - return Equal(x, y); -} - -GateRef CircuitBuilder::Int32NotEqual(GateRef x, GateRef y) -{ - return NotEqual(x, y); -} - -GateRef CircuitBuilder::Int64NotEqual(GateRef x, GateRef y) -{ - return NotEqual(x, y); -} - -GateRef CircuitBuilder::Int64Equal(GateRef x, GateRef y) -{ - return Equal(x, y); -} - -GateRef CircuitBuilder::Int32Equal(GateRef x, GateRef y) -{ - return Equal(x, y); -} - -GateRef CircuitBuilder::IntPtrGreaterThan(GateRef x, GateRef y) -{ - return env_->Is32Bit() ? Int32GreaterThan(x, y) : Int64GreaterThan(x, y); -} - -template -GateRef CircuitBuilder::BinaryOp(GateRef x, GateRef y) -{ - if (Op == OpCode::ADD) { - return BinaryArithmetic(circuit_->Add(), Type, x, y); - } else if (Op == OpCode::SUB) { - return BinaryArithmetic(circuit_->Sub(), Type, x, y); - } else if (Op == OpCode::MUL) { - return BinaryArithmetic(circuit_->Mul(), Type, x, y); - } - UNREACHABLE(); - return Circuit::NullGate(); -} - -GateRef CircuitBuilder::IntPtrLSR(GateRef x, GateRef y) -{ - auto ptrSize = env_->Is32Bit() ? MachineType::I32 : MachineType::I64; - return BinaryArithmetic(circuit_->Lsr(), ptrSize, x, y); -} - -GateRef CircuitBuilder::IntPtrLSL(GateRef x, GateRef y) -{ - auto ptrSize = env_->Is32Bit() ? MachineType::I32 : MachineType::I64; - return BinaryArithmetic(circuit_->Lsl(), ptrSize, x, y); -} - -GateRef CircuitBuilder::IntPtrOr(GateRef x, GateRef y) -{ - auto ptrsize = env_->Is32Bit() ? MachineType::I32 : MachineType::I64; - return BinaryArithmetic(circuit_->Or(), ptrsize, x, y); -} - -GateRef CircuitBuilder::IntPtrDiv(GateRef x, GateRef y) -{ - return env_->Is32Bit() ? Int32Div(x, y) : Int64Div(x, y); -} - -GateRef CircuitBuilder::Int64ToTaggedPtr(GateRef x) -{ - return GetCircuit()->NewGate(circuit_->Int64ToTagged(), - MachineType::I64, { x }, GateType::TaggedValue()); -} - -GateRef CircuitBuilder::Int32ToTaggedPtr(GateRef x) -{ - GateRef val = SExtInt32ToInt64(x); - return Int64ToTaggedPtr(Int64Or(val, Int64(JSTaggedValue::TAG_INT))); -} - -GateRef CircuitBuilder::Int32ToTaggedInt(GateRef x) -{ - GateRef val = SExtInt32ToInt64(x); - return Int64Or(val, Int64(JSTaggedValue::TAG_INT)); -} - -// bit operation -GateRef CircuitBuilder::IsSpecial(GateRef x, JSTaggedType type) -{ - auto specialValue = circuit_->GetConstantGate( - MachineType::I64, type, GateType::TaggedValue()); - - return Equal(x, specialValue); -} - -GateRef CircuitBuilder::TaggedIsInt(GateRef x) -{ - x = ChangeTaggedPointerToInt64(x); - return Equal(Int64And(x, Int64(JSTaggedValue::TAG_MARK)), - Int64(JSTaggedValue::TAG_INT)); -} - -GateRef CircuitBuilder::TaggedIsDouble(GateRef x) -{ - x = ChangeTaggedPointerToInt64(x); - x = Int64And(x, Int64(JSTaggedValue::TAG_MARK)); - auto left = NotEqual(x, Int64(JSTaggedValue::TAG_INT)); - auto right = NotEqual(x, Int64(JSTaggedValue::TAG_OBJECT)); - return BoolAnd(left, right); -} - -GateRef CircuitBuilder::TaggedIsObject(GateRef x) -{ - x = ChangeTaggedPointerToInt64(x); - return Equal(Int64And(x, Int64(JSTaggedValue::TAG_MARK)), - Int64(JSTaggedValue::TAG_OBJECT)); -} - -GateRef CircuitBuilder::TaggedIsNumber(GateRef x) -{ - return BoolNot(TaggedIsObject(x)); -} - -GateRef CircuitBuilder::TaggedIsNumeric(GateRef x) -{ - return BoolOr(TaggedIsNumber(x), TaggedIsBigInt(x)); -} - -GateRef CircuitBuilder::DoubleIsINF(GateRef x) -{ - GateRef infinity = Double(base::POSITIVE_INFINITY); - GateRef negativeInfinity = Double(-base::POSITIVE_INFINITY); - GateRef diff1 = DoubleEqual(x, infinity); - GateRef diff2 = DoubleEqual(x, negativeInfinity); - return BoolOr(diff1, diff2); -} - -GateRef CircuitBuilder::TaggedIsHole(GateRef x) -{ - return Equal(x, HoleConstant()); -} - -GateRef CircuitBuilder::TaggedIsNullPtr(GateRef x) -{ - return Equal(x, NullPtrConstant()); -} - -GateRef CircuitBuilder::TaggedIsNotHole(GateRef x) -{ - return NotEqual(x, HoleConstant()); -} - -GateRef CircuitBuilder::TaggedIsUndefined(GateRef x) -{ - return Equal(x, UndefineConstant()); -} - -GateRef CircuitBuilder::TaggedIsException(GateRef x) -{ - return Equal(x, ExceptionConstant()); -} - -GateRef CircuitBuilder::TaggedIsSpecial(GateRef x) -{ - return BoolOr( - Equal(Int64And(ChangeTaggedPointerToInt64(x), Int64(JSTaggedValue::TAG_SPECIAL_MASK)), - Int64(JSTaggedValue::TAG_SPECIAL)), - TaggedIsHole(x)); -} - -inline GateRef CircuitBuilder::IsJSHClass(GateRef obj) -{ - return Int32Equal(GetObjectType(LoadHClass(obj)), Int32(static_cast(JSType::HCLASS))); -} - -GateRef CircuitBuilder::TaggedIsHeapObject(GateRef x) -{ - x = ChangeTaggedPointerToInt64(x); - return Equal(Int64And(x, Int64(JSTaggedValue::TAG_HEAPOBJECT_MASK)), Int64(0)); -} - -GateRef CircuitBuilder::TaggedIsAsyncGeneratorObject(GateRef x) -{ - GateRef isHeapObj = TaggedIsHeapObject(x); - GateRef objType = GetObjectType(LoadHClass(x)); - GateRef isAsyncGeneratorObj = Equal(objType, - Int32(static_cast(JSType::JS_ASYNC_GENERATOR_OBJECT))); - return LogicAnd(isHeapObj, isAsyncGeneratorObj); -} - -GateRef CircuitBuilder::TaggedIsJSGlobalObject(GateRef x) -{ - GateRef isHeapObj = TaggedIsHeapObject(x); - GateRef objType = GetObjectType(LoadHClass(x)); - GateRef isGlobal = Equal(objType, - Int32(static_cast(JSType::JS_GLOBAL_OBJECT))); - return LogicAnd(isHeapObj, isGlobal); -} - -GateRef CircuitBuilder::TaggedIsGeneratorObject(GateRef x) -{ - GateRef isHeapObj = TaggedIsHeapObject(x); - GateRef objType = GetObjectType(LoadHClass(x)); - GateRef isAsyncGeneratorObj = Equal(objType, - Int32(static_cast(JSType::JS_GENERATOR_OBJECT))); - return LogicAnd(isHeapObj, isAsyncGeneratorObj); -} - -GateRef CircuitBuilder::TaggedIsPropertyBox(GateRef x) -{ - return LogicAnd(TaggedIsHeapObject(x), - IsJsType(x, JSType::PROPERTY_BOX)); -} - -GateRef CircuitBuilder::TaggedIsWeak(GateRef x) -{ - return LogicAnd(TaggedIsHeapObject(x), - Equal(Int64And(ChangeTaggedPointerToInt64(x), Int64(JSTaggedValue::TAG_WEAK)), Int64(1))); -} - -GateRef CircuitBuilder::TaggedIsPrototypeHandler(GateRef x) -{ - return LogicAnd(TaggedIsHeapObject(x), - IsJsType(x, JSType::PROTOTYPE_HANDLER)); -} - -GateRef CircuitBuilder::TaggedIsTransitionHandler(GateRef x) -{ - return LogicAnd(TaggedIsHeapObject(x), - IsJsType(x, JSType::TRANSITION_HANDLER)); -} - -GateRef CircuitBuilder::TaggedIsStoreTSHandler(GateRef x) -{ - return LogicAnd(TaggedIsHeapObject(x), - IsJsType(x, JSType::STORE_TS_HANDLER)); -} - -GateRef CircuitBuilder::TaggedIsTransWithProtoHandler(GateRef x) -{ - return LogicAnd(TaggedIsHeapObject(x), - IsJsType(x, JSType::TRANS_WITH_PROTO_HANDLER)); -} - -GateRef CircuitBuilder::TaggedIsUndefinedOrNull(GateRef x) -{ - return BoolOr(TaggedIsUndefined(x), TaggedIsNull(x)); -} - -GateRef CircuitBuilder::TaggedIsTrue(GateRef x) -{ - return Equal(x, TaggedTrue()); -} - -GateRef CircuitBuilder::TaggedIsFalse(GateRef x) -{ - return Equal(x, TaggedFalse()); -} - -GateRef CircuitBuilder::TaggedIsNull(GateRef x) -{ - return Equal(x, NullConstant()); -} - -GateRef CircuitBuilder::TaggedIsBoolean(GateRef x) -{ - return BoolOr(TaggedIsFalse(x), TaggedIsTrue(x)); -} - -GateRef CircuitBuilder::IsAOTLiteralInfo(GateRef x) -{ - GateRef isHeapObj = TaggedIsHeapObject(x); - GateRef objType = GetObjectType(LoadHClass(x)); - GateRef isAOTLiteralInfoObj = Equal(objType, - Int32(static_cast(JSType::AOT_LITERAL_INFO))); - return LogicAnd(isHeapObj, isAOTLiteralInfoObj); -} - -GateRef CircuitBuilder::TaggedGetInt(GateRef x) -{ - x = ChangeTaggedPointerToInt64(x); - return TruncInt64ToInt32(Int64And(x, Int64(~JSTaggedValue::TAG_MARK))); -} - -GateRef CircuitBuilder::ToTaggedInt(GateRef x) -{ - return Int64Or(x, Int64(JSTaggedValue::TAG_INT)); -} - -GateRef CircuitBuilder::ToTaggedIntPtr(GateRef x) -{ - return Int64ToTaggedPtr(Int64Or(x, Int64(JSTaggedValue::TAG_INT))); -} - -GateRef CircuitBuilder::DoubleToTaggedDoublePtr(GateRef x) -{ - GateRef val = CastDoubleToInt64(x); - return Int64ToTaggedPtr(Int64Add(val, Int64(JSTaggedValue::DOUBLE_ENCODE_OFFSET))); -} - -GateRef CircuitBuilder::BooleanToTaggedBooleanPtr(GateRef x) -{ - auto val = ZExtInt1ToInt64(x); - return Int64ToTaggedPtr(Int64Or(val, Int64(JSTaggedValue::TAG_BOOLEAN_MASK))); -} - -GateRef CircuitBuilder::Float32ToTaggedDoublePtr(GateRef x) -{ - GateRef val = ExtFloat32ToDouble(x); - return DoubleToTaggedDoublePtr(val); -} - -GateRef CircuitBuilder::TaggedDoublePtrToFloat32(GateRef x) -{ - GateRef val = GetDoubleOfTDouble(x); - return TruncDoubleToFloat32(val); -} - -GateRef CircuitBuilder::TaggedIntPtrToFloat32(GateRef x) -{ - GateRef val = GetInt32OfTInt(x); - return ChangeInt32ToFloat32(val); -} - -GateRef CircuitBuilder::DoubleToTaggedDouble(GateRef x) -{ - GateRef val = CastDoubleToInt64(x); - return Int64Add(val, Int64(JSTaggedValue::DOUBLE_ENCODE_OFFSET)); -} - -GateRef CircuitBuilder::DoubleIsNAN(GateRef x) -{ - GateRef diff = DoubleEqual(x, x); - return Equal(SExtInt1ToInt32(diff), Int32(0)); -} - -GateRef CircuitBuilder::DoubleToTagged(GateRef x) -{ - GateRef val = CastDoubleToInt64(x); - acc_.SetGateType(val, GateType::TaggedValue()); - return Int64Add(val, Int64(JSTaggedValue::DOUBLE_ENCODE_OFFSET)); -} - -GateRef CircuitBuilder::TaggedTrue() -{ - return GetCircuit()->GetConstantGate(MachineType::I64, JSTaggedValue::VALUE_TRUE, GateType::TaggedValue()); -} - -GateRef CircuitBuilder::TaggedFalse() -{ - return GetCircuit()->GetConstantGate(MachineType::I64, JSTaggedValue::VALUE_FALSE, GateType::TaggedValue()); -} - -GateRef CircuitBuilder::GetLengthFromTaggedArray(GateRef array) -{ - GateRef offset = IntPtr(TaggedArray::LENGTH_OFFSET); - return Load(VariableType::INT32(), array, offset); -} - -GateRef CircuitBuilder::GetValueFromTaggedArray(GateRef array, GateRef index) -{ - GateRef offset = PtrMul(ZExtInt32ToPtr(index), IntPtr(JSTaggedValue::TaggedTypeSize())); - GateRef dataOffset = PtrAdd(offset, IntPtr(TaggedArray::DATA_OFFSET)); - return Load(VariableType::JS_ANY(), array, dataOffset); -} - -void CircuitBuilder::SetValueToTaggedArray(VariableType valType, GateRef glue, - GateRef array, GateRef index, GateRef val) -{ - GateRef offset = PtrMul(ZExtInt32ToPtr(index), IntPtr(JSTaggedValue::TaggedTypeSize())); - GateRef dataOffset = PtrAdd(offset, IntPtr(TaggedArray::DATA_OFFSET)); - Store(valType, glue, array, dataOffset, val); -} - -GateRef CircuitBuilder::GetGlobalConstantString(ConstantIndex index) -{ - return PtrMul(IntPtr(sizeof(JSTaggedValue)), IntPtr(static_cast(index))); -} - -GateRef CircuitBuilder::LoadObjectFromWeakRef(GateRef x) -{ - return PtrAdd(x, IntPtr(-JSTaggedValue::TAG_WEAK)); -} - -// object operation -GateRef CircuitBuilder::LoadHClass(GateRef object) -{ - GateRef offset = IntPtr(TaggedObject::HCLASS_OFFSET); - return Load(VariableType::JS_POINTER(), object, offset); -} - -void CircuitBuilder::StoreHClass(GateRef glue, GateRef object, GateRef hClass) -{ - Store(VariableType::JS_POINTER(), glue, object, IntPtr(TaggedObject::HCLASS_OFFSET), hClass); -} - -inline GateRef CircuitBuilder::IsJSFunction(GateRef obj) -{ - GateRef objectType = GetObjectType(LoadHClass(obj)); - GateRef greater = Int32GreaterThanOrEqual(objectType, - Int32(static_cast(JSType::JS_FUNCTION_FIRST))); - GateRef less = Int32LessThanOrEqual(objectType, - Int32(static_cast(JSType::JS_FUNCTION_LAST))); - return BoolAnd(greater, less); -} - -inline GateRef CircuitBuilder::IsJSFunctionWithBit(GateRef obj) -{ - GateRef hClass = LoadHClass(obj); - GateRef bitfieldOffset = Int32(JSHClass::BIT_FIELD_OFFSET); - GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); - return NotEqual(Int32And(bitfield, Int32(1LU << JSHClass::IsJSFunctionBit::START_BIT)), Int32(0)); -} - -inline GateRef CircuitBuilder::IsOptimized(GateRef obj) -{ - GateRef hClass = LoadHClass(obj); - GateRef bitfieldOffset = Int32(JSHClass::BIT_FIELD_OFFSET); - GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); - return NotEqual(Int32And(bitfield, Int32(1LU << JSHClass::IsOptimizedBit::START_BIT)), Int32(0)); -} - -inline GateRef CircuitBuilder::IsOptimizedWithBitField(GateRef bitfield) -{ - return NotEqual(Int32And(bitfield, Int32(1LU << JSHClass::IsOptimizedBit::START_BIT)), Int32(0)); -} - -inline GateRef CircuitBuilder::CanFastCall(GateRef obj) -{ - GateRef hClass = LoadHClass(obj); - GateRef bitfieldOffset = Int32(JSHClass::BIT_FIELD_OFFSET); - GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); - return NotEqual(Int32And(bitfield, Int32(1LU << JSHClass::CanFastCallBit::START_BIT)), Int32(0)); -} - -inline GateRef CircuitBuilder::CanFastCallWithBitField(GateRef bitfield) -{ - return NotEqual(Int32And(bitfield, Int32(1LU << JSHClass::CanFastCallBit::START_BIT)), Int32(0)); -} - -GateRef CircuitBuilder::IsJsType(GateRef obj, JSType type) -{ - GateRef objectType = GetObjectType(LoadHClass(obj)); - return Equal(objectType, Int32(static_cast(type))); -} - -inline GateRef CircuitBuilder::IsDictionaryMode(GateRef object) -{ - GateRef type = GetObjectType(LoadHClass(object)); - return Int32Equal(type, Int32(static_cast(JSType::TAGGED_DICTIONARY))); -} - -GateRef CircuitBuilder::GetObjectType(GateRef hClass) -{ - GateRef bitfieldOffset = IntPtr(JSHClass::BIT_FIELD_OFFSET); - GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); - return Int32And(bitfield, Int32((1LU << JSHClass::ObjectTypeBits::SIZE) - 1)); -} - -GateRef CircuitBuilder::IsDictionaryModeByHClass(GateRef hClass) -{ - GateRef bitfieldOffset = Int32(JSHClass::BIT_FIELD_OFFSET); - GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); - return NotEqual(Int32And(Int32LSR(bitfield, - Int32(JSHClass::IsDictionaryBit::START_BIT)), - Int32((1LU << JSHClass::IsDictionaryBit::SIZE) - 1)), - Int32(0)); -} - -GateRef CircuitBuilder::IsIsStableElementsByHClass(GateRef hClass) -{ - GateRef bitfieldOffset = Int32(JSHClass::BIT_FIELD_OFFSET); - GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); - return NotEqual(Int32And(Int32LSR(bitfield, - Int32(JSHClass::IsStableElementsBit::START_BIT)), - Int32((1LU << JSHClass::IsStableElementsBit::SIZE) - 1)), - Int32(0)); -} - -GateRef CircuitBuilder::IsDictionaryElement(GateRef hClass) -{ - GateRef bitfieldOffset = Int32(JSHClass::BIT_FIELD_OFFSET); - GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); - return NotEqual(Int32And(Int32LSR(bitfield, - Int32(JSHClass::DictionaryElementBits::START_BIT)), - Int32((1LU << JSHClass::DictionaryElementBits::SIZE) - 1)), - Int32(0)); -} - -GateRef CircuitBuilder::IsStableElements(GateRef hClass) -{ - GateRef bitfieldOffset = Int32(JSHClass::BIT_FIELD_OFFSET); - GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); - return NotEqual(Int32And(Int32LSR(bitfield, - Int32(JSHClass::IsStableElementsBit::START_BIT)), - Int32((1LU << JSHClass::IsStableElementsBit::SIZE) - 1)), - Int32(0)); -} - -GateRef CircuitBuilder::IsStableArguments(GateRef hClass) -{ - GateRef objectType = GetObjectType(hClass); - GateRef isJsArguments = Int32Equal(objectType, Int32(static_cast(JSType::JS_ARGUMENTS))); - GateRef isStableElements = IsStableElements(hClass); - return BoolAnd(isStableElements, isJsArguments); -} - -GateRef CircuitBuilder::IsStableArray(GateRef hClass) -{ - GateRef objectType = GetObjectType(hClass); - GateRef isJsArray = Int32Equal(objectType, Int32(static_cast(JSType::JS_ARRAY))); - GateRef isStableElements = IsStableElements(hClass); - return BoolAnd(isStableElements, isJsArray); -} - -GateRef CircuitBuilder::IsClassConstructor(GateRef object) -{ - GateRef hClass = LoadHClass(object); - GateRef bitfieldOffset = Int32(JSHClass::BIT_FIELD_OFFSET); - GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); - return NotEqual(Int32And(Int32LSR(bitfield, - Int32(JSHClass::ClassConstructorBit::START_BIT)), - Int32((1LU << JSHClass::ClassConstructorBit::SIZE) - 1)), - Int32(0)); -} - -GateRef CircuitBuilder::IsClassConstructorWithBitField(GateRef bitfield) -{ - return NotEqual(Int32And(bitfield, Int32(1LU << JSHClass::ClassConstructorBit::START_BIT)), Int32(0)); -} - -GateRef CircuitBuilder::IsConstructor(GateRef object) -{ - GateRef hClass = LoadHClass(object); - GateRef bitfieldOffset = IntPtr(JSHClass::BIT_FIELD_OFFSET); - GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); - // decode - return Int32NotEqual( - Int32And(Int32LSR(bitfield, Int32(JSHClass::ConstructorBit::START_BIT)), - Int32((1LU << JSHClass::ConstructorBit::SIZE) - 1)), - Int32(0)); -} - -GateRef CircuitBuilder::IsClassPrototype(GateRef object) -{ - GateRef hClass = LoadHClass(object); - GateRef bitfieldOffset = IntPtr(JSHClass::BIT_FIELD_OFFSET); - GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); - // decode - return NotEqual( - Int32And(Int32LSR(bitfield, Int32(JSHClass::ClassPrototypeBit::START_BIT)), - Int32((1LU << JSHClass::ClassPrototypeBit::SIZE) - 1)), - Int32(0)); -} - -GateRef CircuitBuilder::IsExtensible(GateRef object) -{ - GateRef hClass = LoadHClass(object); - GateRef bitfieldOffset = Int32(JSHClass::BIT_FIELD_OFFSET); - GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); - return NotEqual(Int32And(Int32LSR(bitfield, - Int32(JSHClass::ExtensibleBit::START_BIT)), - Int32((1LU << JSHClass::ExtensibleBit::SIZE) - 1)), - Int32(0)); -} - -GateRef CircuitBuilder::GetExpectedNumOfArgs(GateRef method) -{ - GateRef callFieldOffset = IntPtr(Method::CALL_FIELD_OFFSET); - GateRef callfield = Load(VariableType::INT64(), method, callFieldOffset); - return Int64And( - Int64LSR(callfield, Int64(MethodLiteral::NumArgsBits::START_BIT)), - Int64((1LU << MethodLiteral::NumArgsBits::SIZE) - 1)); -} - -GateRef CircuitBuilder::TaggedObjectIsEcmaObject(GateRef obj) -{ - GateRef objectType = GetObjectType(LoadHClass(obj)); - return BoolAnd( - Int32LessThanOrEqual(objectType, Int32(static_cast(JSType::ECMA_OBJECT_LAST))), - Int32GreaterThanOrEqual(objectType, Int32(static_cast(JSType::ECMA_OBJECT_FIRST)))); -} - -GateRef CircuitBuilder::IsJSObject(GateRef obj) -{ - GateRef objectType = GetObjectType(LoadHClass(obj)); - auto ret = BoolAnd( - Int32LessThanOrEqual(objectType, Int32(static_cast(JSType::JS_OBJECT_LAST))), - Int32GreaterThanOrEqual(objectType, Int32(static_cast(JSType::JS_OBJECT_FIRST)))); - return LogicAnd(TaggedIsHeapObject(obj), ret); -} - -GateRef CircuitBuilder::TaggedObjectIsString(GateRef obj) -{ - GateRef objectType = GetObjectType(LoadHClass(obj)); - return BoolAnd( - Int32LessThanOrEqual(objectType, Int32(static_cast(JSType::STRING_LAST))), - Int32GreaterThanOrEqual(objectType, Int32(static_cast(JSType::STRING_FIRST)))); -} - -GateRef CircuitBuilder::TaggedObjectBothAreString(GateRef x, GateRef y) -{ - return BoolAnd(TaggedObjectIsString(x), TaggedObjectIsString(y)); -} - -GateRef CircuitBuilder::IsCallableFromBitField(GateRef bitfield) -{ - return NotEqual( - Int32And(Int32LSR(bitfield, Int32(JSHClass::CallableBit::START_BIT)), - Int32((1LU << JSHClass::CallableBit::SIZE) - 1)), - Int32(0)); -} - -GateRef CircuitBuilder::IsCallable(GateRef obj) -{ - GateRef hClass = LoadHClass(obj); - GateRef bitfieldOffset = IntPtr(JSHClass::BIT_FIELD_OFFSET); - GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset); - return IsCallableFromBitField(bitfield); -} - -GateRef CircuitBuilder::BothAreString(GateRef x, GateRef y) -{ - Label subentry(env_); - SubCfgEntry(&subentry); - Label bothAreHeapObjet(env_); - Label bothAreStringType(env_); - Label exit(env_); - DEFVAlUE(result, env_, VariableType::BOOL(), False()); - Branch(BoolAnd(TaggedIsHeapObject(x), TaggedIsHeapObject(y)), &bothAreHeapObjet, &exit); - Bind(&bothAreHeapObjet); - { - Branch(TaggedObjectBothAreString(x, y), &bothAreStringType, &exit); - Bind(&bothAreStringType); - { - result = True(); - Jump(&exit); - } - } - Bind(&exit); - auto ret = *result; - SubCfgExit(); - return ret; -} - -GateRef CircuitBuilder::GetObjectSizeFromHClass(GateRef hClass) -{ - // NOTE: check for special case of string and TAGGED_ARRAY - GateRef bitfield = Load(VariableType::INT32(), hClass, IntPtr(JSHClass::BIT_FIELD1_OFFSET)); - GateRef objectSizeInWords = Int32And(Int32LSR(bitfield, - Int32(JSHClass::ObjectSizeInWordsBits::START_BIT)), - Int32((1LU << JSHClass::ObjectSizeInWordsBits::SIZE) - 1)); - return PtrMul(ZExtInt32ToPtr(objectSizeInWords), IntPtr(JSTaggedValue::TaggedTypeSize())); -} - -template -GateRef CircuitBuilder::TypedBinaryOp(GateRef x, GateRef y, GateType xType, GateType yType, GateType gateType, - PGOSampleType sampleType) -{ - auto currentLabel = env_->GetCurrentLabel(); - auto currentControl = currentLabel->GetControl(); - auto currentDepend = currentLabel->GetDepend(); - uint64_t operandTypes = GatePairTypeAccessor::ToValue(xType, yType); - auto numberBinaryOp = GetCircuit()->NewGate(circuit_->TypedBinaryOp(operandTypes, Op, sampleType), - MachineType::I64, {currentControl, currentDepend, x, y}, gateType); - currentLabel->SetControl(numberBinaryOp); - currentLabel->SetDepend(numberBinaryOp); - return numberBinaryOp; -} - -template -GateRef CircuitBuilder::TypedUnaryOp(GateRef x, GateType xType, GateType gateType) -{ - auto currentLabel = env_->GetCurrentLabel(); - auto currentControl = currentLabel->GetControl(); - auto currentDepend = currentLabel->GetDepend(); - uint64_t value = TypedUnaryAccessor::ToValue(xType, Op); - auto numberUnaryOp = GetCircuit()->NewGate(circuit_->TypedUnaryOp(value), - MachineType::I64, {currentControl, currentDepend, x}, gateType); - currentLabel->SetControl(numberUnaryOp); - currentLabel->SetDepend(numberUnaryOp); - return numberUnaryOp; -} - -template -GateRef CircuitBuilder::TypedConditionJump(GateRef x, GateType xType) -{ - auto currentLabel = env_->GetCurrentLabel(); - auto currentControl = currentLabel->GetControl(); - auto currentDepend = currentLabel->GetDepend(); - auto machineType = MachineType::NOVALUE; - auto jumpOp = TypedConditionJump(machineType, Op, xType, {currentControl, currentDepend, x}); - currentLabel->SetControl(jumpOp); - currentLabel->SetDepend(jumpOp); - return jumpOp; -} - -template -GateRef CircuitBuilder::LoadElement(GateRef receiver, GateRef index) -{ - auto opIdx = static_cast(Op); - auto currentLabel = env_->GetCurrentLabel(); - auto currentControl = currentLabel->GetControl(); - auto currentDepend = currentLabel->GetDepend(); - auto ret = GetCircuit()->NewGate(GetCircuit()->LoadElement(opIdx), MachineType::I64, - {currentControl, currentDepend, receiver, index}, GateType::AnyType()); - currentLabel->SetControl(ret); - currentLabel->SetDepend(ret); - return ret; -} - -template -GateRef CircuitBuilder::StoreElement(GateRef receiver, GateRef index, GateRef value) -{ - auto opIdx = static_cast(Op); - auto currentLabel = env_->GetCurrentLabel(); - auto currentControl = currentLabel->GetControl(); - auto currentDepend = currentLabel->GetDepend(); - auto ret = - GetCircuit()->NewGate(GetCircuit()->StoreElement(opIdx), MachineType::NOVALUE, - {currentControl, currentDepend, receiver, index, value}, GateType::AnyType()); - currentLabel->SetControl(ret); - currentLabel->SetDepend(ret); - return ret; -} - -GateRef CircuitBuilder::PrimitiveToNumber(GateRef x, VariableType type) -{ - auto currentLabel = env_->GetCurrentLabel(); - auto currentControl = currentLabel->GetControl(); - auto currentDepend = currentLabel->GetDepend(); - auto numberconvert = TypeConvert(MachineType::I64, type.GetGateType(), GateType::NumberType(), - {currentControl, currentDepend, x}); - currentLabel->SetControl(numberconvert); - currentLabel->SetDepend(numberconvert); - return numberconvert; -} - -GateRef CircuitBuilder::LogicAnd(GateRef x, GateRef y) -{ - Label subentry(env_); - SubCfgEntry(&subentry); - Label exit(env_); - Label isX(env_); - Label notX(env_); - DEFVAlUE(result, env_, VariableType::BOOL(), x); - Branch(x, &isX, ¬X); - Bind(&isX); - { - result = y; - Jump(&exit); - } - Bind(¬X); - { - Jump(&exit); - } - Bind(&exit); - auto ret = *result; - SubCfgExit(); - return ret; -} - -GateRef CircuitBuilder::LogicOr(GateRef x, GateRef y) -{ - Label subentry(env_); - SubCfgEntry(&subentry); - Label exit(env_); - Label isX(env_); - Label notX(env_); - DEFVAlUE(result, env_, VariableType::BOOL(), x); - Branch(x, &isX, ¬X); - Bind(&isX); - { - Jump(&exit); - } - Bind(¬X); - { - result = y; - Jump(&exit); - } - Bind(&exit); - auto ret = *result; - SubCfgExit(); - return ret; -} - -int CircuitBuilder::NextVariableId() -{ - return env_->NextVariableId(); -} - -void CircuitBuilder::HandleException(GateRef result, Label *success, Label *fail, Label *exit) -{ - Branch(Equal(result, ExceptionConstant()), fail, success); - Bind(fail); - { - Jump(exit); - } -} - -void CircuitBuilder::HandleException(GateRef result, Label *success, Label *fail, Label *exit, GateRef exceptionVal) -{ - Branch(Equal(result, exceptionVal), fail, success); - Bind(fail); - { - Jump(exit); - } -} - -void CircuitBuilder::SubCfgEntry(Label *entry) -{ - ASSERT(env_ != nullptr); - env_->SubCfgEntry(entry); -} - -void CircuitBuilder::SubCfgExit() -{ - ASSERT(env_ != nullptr); - env_->SubCfgExit(); -} - -GateRef CircuitBuilder::Return(GateRef value) -{ - auto control = GetCurrentLabel()->GetControl(); - auto depend = GetCurrentLabel()->GetDepend(); - return Return(control, depend, value); -} - -GateRef CircuitBuilder::Return() -{ - auto control = GetCurrentLabel()->GetControl(); - auto depend = GetCurrentLabel()->GetDepend(); - return ReturnVoid(control, depend); -} - -void CircuitBuilder::Bind(Label *label) -{ - label->Bind(); - env_->SetCurrentLabel(label); -} - -void CircuitBuilder::Bind(Label *label, bool justSlowPath) -{ - if (!justSlowPath) { - label->Bind(); - env_->SetCurrentLabel(label); - } -} - -Label *CircuitBuilder::GetCurrentLabel() const -{ - return GetCurrentEnvironment()->GetCurrentLabel(); -} - -GateRef CircuitBuilder::GetState() const -{ - return GetCurrentLabel()->GetControl(); -} - -GateRef CircuitBuilder::GetDepend() const -{ - return GetCurrentLabel()->GetDepend(); -} - -StateDepend CircuitBuilder::GetStateDepend() const -{ - return StateDepend(GetState(), GetDepend()); -} - -void CircuitBuilder::SetDepend(GateRef depend) -{ - GetCurrentLabel()->SetDepend(depend); -} - -void CircuitBuilder::SetState(GateRef state) -{ - GetCurrentLabel()->SetControl(state); -} - -// ctor is base but not builtin -inline GateRef CircuitBuilder::IsBase(GateRef ctor) -{ - GateRef method = GetMethodFromFunction(ctor); - GateRef extraLiteralInfoOffset = IntPtr(Method::EXTRA_LITERAL_INFO_OFFSET); - GateRef bitfield = Load(VariableType::INT32(), method, extraLiteralInfoOffset); - - GateRef kind = Int32And(Int32LSR(bitfield, Int32(MethodLiteral::FunctionKindBits::START_BIT)), - Int32((1LU << MethodLiteral::FunctionKindBits::SIZE) - 1)); - return Int32LessThanOrEqual(kind, Int32(static_cast(FunctionKind::CLASS_CONSTRUCTOR))); -} - -inline GateRef CircuitBuilder::TypedCallBuiltin(GateRef hirGate, GateRef x, BuiltinsStubCSigns::ID id) -{ - auto currentLabel = env_->GetCurrentLabel(); - auto currentControl = currentLabel->GetControl(); - auto currentDepend = currentLabel->GetDepend(); - GateRef idGate = Int8(static_cast(id)); - auto numberMathOp = TypedCallOperator(hirGate, MachineType::I64, {currentControl, currentDepend, x, idGate}); - currentLabel->SetControl(numberMathOp); - currentLabel->SetDepend(numberMathOp); - return numberMathOp; -} - -inline GateRef CircuitBuilder::TypedCallThis3Builtin(GateRef hirGate, GateRef thisObj, GateRef a0, GateRef a1, - GateRef a2, BuiltinsStubCSigns::ID id) -{ - auto currentLabel = env_->GetCurrentLabel(); - auto currentControl = currentLabel->GetControl(); - auto currentDepend = currentLabel->GetDepend(); - GateRef idGate = Int8(static_cast(id)); - auto numberMathOp = TypedCallOperator(hirGate, MachineType::I64, - {currentControl, currentDepend, thisObj, a0, a1, a2, idGate}); - currentLabel->SetControl(numberMathOp); - currentLabel->SetDepend(numberMathOp); - return numberMathOp; -} - -inline GateRef CircuitBuilder::GetMethodId(GateRef func) -{ - GateRef method = GetMethodFromFunction(func); - GateRef literalInfoOffset = IntPtr(Method::LITERAL_INFO_OFFSET); - GateRef LiteralInfo = Load(VariableType::INT64(), method, literalInfoOffset); - GateRef methodId = Int64And(Int64LSR(LiteralInfo, Int64(MethodLiteral::MethodIdBits::START_BIT)), - Int64((1LLU << MethodLiteral::MethodIdBits::SIZE) - 1)); - return methodId; -} - -void Label::Seal() -{ - return impl_->Seal(); -} - -void Label::Bind() -{ - impl_->Bind(); -} - -void Label::MergeAllControl() -{ - impl_->MergeAllControl(); -} - -void Label::MergeAllDepend() -{ - impl_->MergeAllDepend(); -} - -void Label::AppendPredecessor(const Label *predecessor) -{ - impl_->AppendPredecessor(predecessor->GetRawLabel()); -} - -std::vector

Option

Description

+

Description

Value Range

+

Value Range

Default Value

+

Default Value

--modules

+

--debug-info

-m

+

Provides debug information.

Compiles JS files based on the module.

+

-

-

-

-

+

-

--debug-log

-

-l

+

--debugger-evaluate-expression

Enables the log function.

+

Evaluates base64 style expression in debugger

-

+

-

-

+

-

--dump-assembly

-

-a

+

--dump-assembly

Outputs a text ARK bytecode file.

+

Outputs an assembly file.

-

+

-

-

+

-

--debug

+

--dump-ast

-d

+

Prints the parsed AST(Abstract Syntax Tree)

Provides debug information.

+

-

-

-

-

+

-

--show-statistics

-

-s

+

--dump-debug-info

Displays statistics about bytecodes.

+

Prints debug Info

-

+

-

-

+

-

--output

+

--dump-literal-buffer

+

Prints the content of literal buffer

-o

+

-

+

-

+

--dump-size-stat

Specifies the path of the output file.

+

Displays statistics about bytecodes.

-

+

-

-

+

-

--timeout

+

--extension

-t

+

Specifies input file type

Specifies the timeout threshold.

+

['js', 'ts', 'as']

-

+

-

-

+

--help

+

Displays help information.

+

-

+

-

--help

+

--module

+

Compiles the code based on the ecmascript standard module.

+

-

-h

+

-

Displays help information.

+

--opt-level

+

Specifies the level for compilation optimization.

-

+

['0', '1', '2']

-

+

0

--bc-version

+

--output

+

+Specifies the path of the output file.

-v

+

-

+

-

+

--parse-only

Outputs the current bytecode version.

+

Parse the input file only

-

+

-

-

+

-

--bc-min-version

+

--thread

  

Outputs the lowest bytecode version supported.

+

Specifies the number of threads used to generate bytecode

-

+

0-Number of threads supported by your machine

-

+

0

Note + * SP Stack Pointer + * R30/LR Link register Stores the return address. + * We push it into stack along with FP on function + * entry using STP and restore it on function exit + * using LDP even if the function is a leaf (i.e., + * it does not call any other function) because it + * is free (we have to store FP anyway). So, if a + * function is a leaf, we may use it as a temporary + * register. + * R29/FP Frame Pointer + * R19-R28 Callee-saved + * registers + * R18 Platform reg Can we use it as a temporary register? + * R16,R17 IP0,IP1 Maybe used as temporary registers. Should be + * given lower priorities. (i.e., we push them + * into the free register stack before the others) + * R9-R15 Temporary registers, caller-saved + * Note: + * R16 and R17 may be used by a linker as a scratch register between + * a routine and any subroutine it calls. They can also be used within a + * routine to hold intermediate values between subroutine calls. + * + * The role of R18 is platform specific. If a platform ABI has need of + * a dedicated general purpose register to carry inter-procedural state + * (for example, the thread context) then it should use this register for + * that purpose. If the platform ABI has no such requirements, then it should + * use R18 as an additional temporary register. The platform ABI specification + * must document the usage for this register. + * + * A subroutine invocation must preserve the contents of the registers R19-R29 + * and SP. All 64 bits of each value stored in R19-R29 must be preserved, even + * when using the ILP32 data model. + * + * $ 5.1.2 SIMD and Floating-Point Registers + * + * The first eight registers, V0-V7, are used to pass argument values into + * a subroutine and to return result values from a function. They may also + * be used to hold intermediate values within a routine. + * + * V8-V15 must be preserved by a callee across subroutine calls; the + * remaining registers do not need to be preserved( or caller-saved). + * Additionally, only the bottom 64 bits of each value stored in V8- + * V15 need to be preserved. + */ +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_cfgo.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_cfgo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6e92948246fe1af3ed5f47370494a0ace010a7db --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_cfgo.cpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_cfgo.h" +#include "aarch64_isa.h" + +namespace maplebe { +/* Initialize cfg optimization patterns */ +void AArch64CFGOptimizer::InitOptimizePatterns() +{ + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + // diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); +} + +uint32 AArch64FlipBRPattern::GetJumpTargetIdx(const Insn &insn) +{ + return AArch64isa::GetJumpTargetIdx(insn); +} +MOperator AArch64FlipBRPattern::FlipConditionOp(MOperator flippedOp) +{ + return AArch64isa::FlipConditionOp(flippedOp); +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_cg.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_cg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4e87cc7b0ee249ccf848e9475ce0c83ed588852e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_cg.cpp @@ -0,0 +1,389 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_cg.h" +#include "mir_builder.h" +#include "becommon.h" +#include "label_creation.h" +#include "alignment.h" + +namespace maplebe { +#include "immvalid.def" +#define DEFINE_MOP(...) {__VA_ARGS__}, +const InsnDesc AArch64CG::kMd[kMopLast] = { +#include "abstract_mmir.def" +#include "aarch64_md.def" +}; +#undef DEFINE_MOP + +std::array, kIntRegTypeNum> AArch64CG::intRegNames = { + std::array { + "err", "err0", "err1", "err2", "err3", "err4", "err5", "err6", "err7", + "err8", "err9", "err10", "err11", "err12", "err13", "err14", "err15", "err16", + "err17", "err18", "err19", "err20", "err21", "err22", "err23", "err24", "err25", + "err26", "err27", "err28", "err", "err", "err", "errsp", "errzr", /* x29 is fp */ + "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7", "b8", + "b9", "b10", "b11", "b12", "b13", "b14", "b15", "b16", "b17", + "b18", "b19", "b20", "b21", "b22", "b23", "b24", "b25", "b26", + "b27", "b28", "b29", "b30", "b31", "errMaxRegNum", "rflag"}, + std::array { + "err", "err0", "err1", "err2", "err3", "err4", "err5", "err6", "err7", + "err8", "err9", "err10", "err11", "err12", "err13", "err14", "err15", "err16", + "err17", "err18", "err19", "err20", "err21", "err22", "err23", "err24", "err25", + "err26", "err27", "err28", "err29", "err30", "err31", "errsp", "errzr", /* x29 is fp */ + "h0", "h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8", + "h9", "h10", "h11", "h12", "h13", "h14", "h15", "h16", "h17", + "h18", "h19", "h20", "h21", "h22", "h23", "h24", "h25", "h26", + "h27", "h28", "h29", "h30", "h31", "errMaxRegNum", "rflag"}, + std::array { + "err", "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", + "w11", "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22", + "w23", "w24", "w25", "w26", "w27", "w28", "w29", "err", "err", "wsp", "wzr", /* x29 is fp */ + "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11", + "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", + "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", "errMaxRegNum", "rflag"}, + std::array { + "err", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", + "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", + "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29", "x30", "x29" /* use X40 when debug */, + "sp", "xzr", /* x29 is fp */ + "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", + "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", + "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31", "errMaxRegNum", + "rflag"}, + std::array { + "err", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", + "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", + "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29", "x30", "x29" /* use X40 when debug */, + "sp", "xzr", /* x29 is fp */ + "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", + "q11", "q12", "q13", "q14", "q15", "q16", "q17", "q18", "q19", "q20", "q21", + "q22", "q23", "q24", "q25", "q26", "q27", "q28", "q29", "q30", "q31", "errMaxRegNum", + "rflag"}}; + +std::array AArch64CG::vectorRegNames = { + "err", "err0", "err1", "err2", "err3", "err4", "err5", "err6", "err7", "err8", "err9", "err10", "err11", "err12", + "err13", "err14", "err15", "err16", "err17", "err18", "err19", "err20", "err21", "err22", + /* x29 is fp, err40 is fp before RA */ + "err23", "err24", "err25", "err26", "err27", "err28", "err29", "err30", "errsp", "errzr", "err40", "v0", "v1", "v2", + "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", + "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "errMaxRegNum", "rflag"}; + +bool AArch64CG::IsExclusiveFunc(MIRFunction &mirFunc) +{ + const std::string &funcName = mirFunc.GetName(); + for (const auto &it : ehExclusiveNameVec) { + if (it.compare(funcName) == 0) { + return true; + } + } + return false; +} +namespace wordsMap { +/* + * Generate object maps. + * + * 1. each class record its GCTIB in method meta (not read only meta) + * 2. GCTIB include: header protoType; n bitmap word; bitmap word + * 3. each reference word(4 or 8 bytes) is represented by 2 bits + * 00: not ref + * 01: normal ref + * 10: weak ref + * 11: unowned ref + * + * For example, if a scalar object has five ptr fields at offsets 24, 40(weak), + * 64(unowned), the generated code will be like: + * + * MCC_GCTIB__xxx: + * .long 0x40 // object has child reference + * .long 1 // one word in the bitmap + * .quad 0b110000100001000000 + * ... + */ +const uint32 kRefWordsPerMapWord = 32; /* contains bitmap for 32 ref words in 64 bits */ +const uint32 kLogRefWordsPerMapWord = 5; +#ifdef USE_32BIT_REF +const uint32 kReferenceWordSize = 4; +const uint32 kLog2ReferenceWordSize = 2; +#else +const uint32 kReferenceWordSize = 8; +const uint32 kLog2ReferenceWordSize = 3; +#endif +const uint32 kInMapWordOffsetMask = ((kReferenceWordSize * kRefWordsPerMapWord) - 1); +const uint32 kInMapWordIndexShift = (kLog2ReferenceWordSize - 1); +const uint32 kMapWordIndexShift = (kLog2ReferenceWordSize + kLogRefWordsPerMapWord); + +const uint64 kRefBits = 1; +const uint64 kWeakRefBits = 2; +const uint64 kUnownedRefBits = 3; + +/* + * Give a structrue type, calculate its bitmap_vector + */ +static void GetGCTIBBitMapWords(const BECommon &beCommon, MIRStructType &stType, std::vector &bitmapWords) +{ + bitmapWords.clear(); + if (stType.GetKind() == kTypeClass) { + uint64 curBitmap = 0; + uint32 curBitmapIndex = 0; + uint32 prevOffset = 0; + for (const auto &fieldInfo : beCommon.GetJClassLayout(static_cast(stType))) { + if (fieldInfo.IsRef()) { + uint32 curOffset = fieldInfo.GetOffset(); + /* skip meta field */ + if (curOffset == 0) { + continue; + } + CHECK_FATAL((curOffset > prevOffset) || (prevOffset == 0), "not ascending offset"); + uint32 wordIndex = curOffset >> kMapWordIndexShift; + if (wordIndex > curBitmapIndex) { + bitmapWords.emplace_back(curBitmap); + for (uint32 i = curBitmapIndex + 1; i < wordIndex; i++) { + bitmapWords.emplace_back(0); + } + curBitmap = 0; + curBitmapIndex = wordIndex; + } + uint32 bitOffset = (curOffset & kInMapWordOffsetMask) >> kInMapWordIndexShift; + if (CGOptions::IsGCOnly()) { + /* ignore unowned/weak when GCONLY is enabled. */ + curBitmap |= (kRefBits << bitOffset); + } else if (fieldInfo.IsUnowned()) { + curBitmap |= (kUnownedRefBits << bitOffset); + } else if (fieldInfo.IsWeak()) { + curBitmap |= (kWeakRefBits << bitOffset); + } else { + /* ref */ + curBitmap |= (kRefBits << bitOffset); + } + prevOffset = curOffset; + } + } + if (curBitmap != 0) { + bitmapWords.emplace_back(curBitmap); + } + } else if (stType.GetKind() != kTypeInterface) { + /* interface doesn't have reference fields */ + CHECK_FATAL(false, "GetGCTIBBitMapWords unexpected type"); + } +} +} // namespace wordsMap + +bool AArch64CG::IsTargetInsn(MOperator mOp) const +{ + return (mOp > MOP_undef && mOp <= MOP_nop); +} +bool AArch64CG::IsClinitInsn(MOperator mOp) const +{ + return (mOp == MOP_clinit || mOp == MOP_clinit_tail || mOp == MOP_adrp_ldr); +} +bool AArch64CG::IsPseudoInsn(MOperator mOp) const +{ + return (mOp >= MOP_pseudo_param_def_x && mOp < MOP_nop); +} + +bool AArch64CG::IsEffectiveCopy(Insn &insn) const +{ + MOperator mOp = insn.GetMachineOpcode(); + if (mOp >= MOP_xmovrr && mOp <= MOP_xvmovrv) { + return true; + } + if (mOp == MOP_vmovuu || mOp == MOP_vmovvv) { + return true; + } + if ((mOp >= MOP_xaddrrr && mOp <= MOP_ssub) || (mOp >= MOP_xlslrri6 && mOp <= MOP_wlsrrrr)) { + Operand &opnd2 = insn.GetOperand(kInsnThirdOpnd); + if (opnd2.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd2); + if (immOpnd.IsZero()) { + return true; + } + } + } + if (mOp > MOP_xmulrrr && mOp <= MOP_xvmuld) { + Operand &opnd2 = insn.GetOperand(kInsnThirdOpnd); + if (opnd2.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd2); + if (immOpnd.GetValue() == 1) { + return true; + } + } + } + return false; +} + +void AArch64CG::DumpTargetOperand(Operand &opnd, const OpndDesc &opndDesc) const +{ + A64OpndDumpVisitor visitor(opndDesc); + opnd.Accept(visitor); +} + +/* + * Find if there exist same GCTIB (both rcheader and bitmap are same) + * for different class. If ture reuse, if not emit and record new GCTIB. + */ +void AArch64CG::FindOrCreateRepresentiveSym(std::vector &bitmapWords, uint32 rcHeader, const std::string &name) +{ + GCTIBKey *key = memPool->New(allocator, rcHeader, bitmapWords); + const std::string &gcTIBName = GCTIB_PREFIX_STR + name; + MapleUnorderedMap::const_iterator iter = keyPatternMap.find(key); + if (iter == keyPatternMap.end() || gcTIBName.compare("MCC_GCTIB__Ljava_2Flang_2FObject_3B") == 0) { + /* Emit the GCTIB label for the class */ + GCTIBPattern *ptn = memPool->New(*key, *memPool); + + if (gcTIBName.compare("MCC_GCTIB__Ljava_2Flang_2FObject_3B") == 0) { + ptn->SetName("MCC_GCTIB__Ljava_2Flang_2FObject_3B"); + } + (void)keyPatternMap.insert(std::make_pair(key, ptn)); + (void)symbolPatternMap.insert(std::make_pair(gcTIBName, ptn)); + + /* Emit GCTIB pattern */ + std::string ptnString = "\t.type " + ptn->GetName() + ", %object\n" + "\t.data\n" + "\t.align 3\n"; + + MIRSymbol *gcTIBSymbol = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(namemangler::GetInternalNameLiteral(gcTIBName))); + if (gcTIBSymbol != nullptr && gcTIBSymbol->GetStorageClass() == kScFstatic) { + ptnString += "\t.local "; + } else { + ptnString += "\t.global "; + } + + Emitter *emitter = GetEmitter(); + emitter->Emit(ptnString); + emitter->Emit(ptn->GetName()); + emitter->Emit("\n"); + + /* Emit the GCTIB pattern label for the class */ + emitter->Emit(ptn->GetName()); + emitter->Emit(":\n"); + + emitter->Emit("\t.long "); + emitter->EmitHexUnsigned(rcHeader); + emitter->Emit("\n"); + + /* generate n_bitmap word */ + emitter->Emit("\t.long "); /* AArch64-specific. Generate a 64-bit value. */ + emitter->EmitDecUnsigned(bitmapWords.size()); + emitter->Emit("\n"); + + /* Emit each bitmap word */ + for (const auto &bitmapWord : bitmapWords) { + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " bitmap_word: 0x" << bitmapWord << " " << PRIx64 << "\n"; + } + emitter->Emit("\t.quad "); /* AArch64-specific. Generate a 64-bit value. */ + emitter->EmitHexUnsigned(bitmapWord); + emitter->Emit("\n"); + } + if (gcTIBSymbol != nullptr && gcTIBSymbol->GetStorageClass() != kScFstatic) { + /* add local symbol REF_XXX to every global GCTIB symbol */ + CreateRefSymForGlobalPtn(*ptn); + keyPatternMap[key] = ptn; + } + } else { + (void)symbolPatternMap.insert(make_pair(gcTIBName, iter->second)); + } +} + +/* + * Add local symbol REF_XXX to global GCTIB symbol, + * and replace the global GCTIBPattern in keyPatternMap. + */ +void AArch64CG::CreateRefSymForGlobalPtn(GCTIBPattern &ptn) const +{ + const std::string &refPtnString = REF_PREFIX_STR + ptn.GetName(); + const std::string &ptnString = "\t.type " + refPtnString + ", %object\n" + "\t.data\n" + "\t.align 3\n" + + "\t.local " + refPtnString + "\n" + refPtnString + ":\n" + "\t.quad " + + ptn.GetName() + "\n"; + Emitter *emitter = GetEmitter(); + emitter->Emit(ptnString); + ptn.SetName(refPtnString); +} + +std::string AArch64CG::FindGCTIBPatternName(const std::string &name) const +{ + auto iter = symbolPatternMap.find(name); + if (iter == symbolPatternMap.end()) { + CHECK_FATAL(false, "No GCTIB pattern found for symbol: %s", name.c_str()); + } + return iter->second->GetName(); +} + +void AArch64CG::GenerateObjectMaps(BECommon &beCommon) +{ + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << "DEBUG: Generating object maps...\n"; + } + + for (auto &tyId : GetMIRModule()->GetClassList()) { + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << "Class tyIdx: " << tyId << "\n"; + } + TyIdx tyIdx(tyId); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DEBUG_ASSERT(ty != nullptr, "ty nullptr check"); + /* Only emit GCTIB for classes owned by this module */ + DEBUG_ASSERT(ty->IsStructType(), "ty isn't MIRStructType* in AArch64CG::GenerateObjectMaps"); + MIRStructType *strTy = static_cast(ty); + if (!strTy->IsLocal()) { + continue; + } + + GStrIdx nameIdx = ty->GetNameStrIdx(); + + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(nameIdx); + + /* Emit for a class */ + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " name: " << name << "\n"; + } + + std::vector bitmapWords; + wordsMap::GetGCTIBBitMapWords(beCommon, *strTy, bitmapWords); + /* fill specific header according to the size of bitmapWords */ + uint32 rcHeader = (!bitmapWords.empty()) ? 0x40 : 0; + FindOrCreateRepresentiveSym(bitmapWords, rcHeader, name); + } +} + +void AArch64CG::EnrollTargetPhases(MaplePhaseManager *pm) const +{ + if (!GetMIRModule()->IsCModule()) { + CGOptions::DisableCGSSA(); + } +#include "aarch64_phases.def" +} + +Insn &AArch64CG::BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) +{ + DEBUG_ASSERT(defOpnd.IsRegister(), "build SSA on register operand"); + CHECK_FATAL(defOpnd.IsOfIntClass() || defOpnd.IsOfFloatOrSIMDClass(), " unknown operand type "); + bool is64bit = defOpnd.GetSize() == k64BitSize; + MOperator mop = MOP_nop; + if (defOpnd.GetSize() == k128BitSize) { + DEBUG_ASSERT(defOpnd.IsOfFloatOrSIMDClass(), "unexpect 128bit int operand in aarch64"); + mop = MOP_xvphivd; + } else { + mop = defOpnd.IsOfIntClass() ? is64bit ? MOP_xphirr : MOP_wphirr : is64bit ? MOP_xvphid : MOP_xvphis; + } + DEBUG_ASSERT(mop != MOP_nop, "unexpect 128bit int operand in aarch64"); + return GetCurCGFuncNoConst()->GetInsnBuilder()->BuildInsn(mop, defOpnd, listParam); +} + +PhiOperand &AArch64CG::CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) +{ + return *mp.New(mAllocator); +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..087aa083a375ee98946143c809e80d91cee6389d --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -0,0 +1,12696 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_cg.h" +#include "aarch64_cgfunc.h" +#include +#include +#include +#include +#include "cfi.h" +#include "mpl_logging.h" +#include "rt.h" +#include "opcode_info.h" +#include "mir_builder.h" +#include "mir_symbol_builder.h" +#include "mpl_atomic.h" +#include "metadata_layout.h" +#include "emit.h" +#include "simplify.h" +#include + +namespace maplebe { +using namespace maple; +CondOperand AArch64CGFunc::ccOperands[kCcLast] = { + CondOperand(CC_EQ), CondOperand(CC_NE), CondOperand(CC_CS), CondOperand(CC_HS), CondOperand(CC_CC), + CondOperand(CC_LO), CondOperand(CC_MI), CondOperand(CC_PL), CondOperand(CC_VS), CondOperand(CC_VC), + CondOperand(CC_HI), CondOperand(CC_LS), CondOperand(CC_GE), CondOperand(CC_LT), CondOperand(CC_GT), + CondOperand(CC_LE), CondOperand(CC_AL), +}; + +namespace { +constexpr int32 kSignedDimension = 2; /* signed and unsigned */ +constexpr int32 kIntByteSizeDimension = 4; /* 1 byte, 2 byte, 4 bytes, 8 bytes */ +constexpr int32 kFloatByteSizeDimension = 3; /* 4 bytes, 8 bytes, 16 bytes(vector) */ +constexpr int32 kShiftAmount12 = 12; /* for instruction that can use shift, shift amount must be 0 or 12 */ + +MOperator ldIs[kSignedDimension][kIntByteSizeDimension] = { + /* unsigned == 0 */ + {MOP_wldrb, MOP_wldrh, MOP_wldr, MOP_xldr}, + /* signed == 1 */ + {MOP_wldrsb, MOP_wldrsh, MOP_wldr, MOP_xldr}}; + +MOperator stIs[kSignedDimension][kIntByteSizeDimension] = { + /* unsigned == 0 */ + {MOP_wstrb, MOP_wstrh, MOP_wstr, MOP_xstr}, + /* signed == 1 */ + {MOP_wstrb, MOP_wstrh, MOP_wstr, MOP_xstr}}; + +MOperator ldIsAcq[kSignedDimension][kIntByteSizeDimension] = { + /* unsigned == 0 */ + {MOP_wldarb, MOP_wldarh, MOP_wldar, MOP_xldar}, + /* signed == 1 */ + {MOP_undef, MOP_undef, MOP_wldar, MOP_xldar}}; + +MOperator stIsRel[kSignedDimension][kIntByteSizeDimension] = { + /* unsigned == 0 */ + {MOP_wstlrb, MOP_wstlrh, MOP_wstlr, MOP_xstlr}, + /* signed == 1 */ + {MOP_wstlrb, MOP_wstlrh, MOP_wstlr, MOP_xstlr}}; + +MOperator ldFs[kFloatByteSizeDimension] = {MOP_sldr, MOP_dldr, MOP_qldr}; +MOperator stFs[kFloatByteSizeDimension] = {MOP_sstr, MOP_dstr, MOP_qstr}; + +MOperator ldFsAcq[kFloatByteSizeDimension] = {MOP_undef, MOP_undef, MOP_undef}; +MOperator stFsRel[kFloatByteSizeDimension] = {MOP_undef, MOP_undef, MOP_undef}; + +/* extended to unsigned ints */ +MOperator uextIs[kIntByteSizeDimension][kIntByteSizeDimension] = { + /* u8 u16 u32 u64 */ + {MOP_undef, MOP_xuxtb32, MOP_xuxtb32, MOP_xuxtb32}, /* u8/i8 */ + {MOP_undef, MOP_undef, MOP_xuxth32, MOP_xuxth32}, /* u16/i16 */ + {MOP_undef, MOP_undef, MOP_xuxtw64, MOP_xuxtw64}, /* u32/i32 */ + {MOP_undef, MOP_undef, MOP_undef, MOP_undef} /* u64/u64 */ +}; + +/* extended to signed ints */ +MOperator extIs[kIntByteSizeDimension][kIntByteSizeDimension] = { + /* i8 i16 i32 i64 */ + {MOP_undef, MOP_xsxtb32, MOP_xsxtb32, MOP_xsxtb64}, /* u8/i8 */ + {MOP_undef, MOP_undef, MOP_xsxth32, MOP_xsxth64}, /* u16/i16 */ + {MOP_undef, MOP_undef, MOP_undef, MOP_xsxtw64}, /* u32/i32 */ + {MOP_undef, MOP_undef, MOP_undef, MOP_undef} /* u64/u64 */ +}; + +MOperator PickLdStInsn(bool isLoad, uint32 bitSize, PrimType primType, AArch64isa::MemoryOrdering memOrd) +{ + DEBUG_ASSERT(__builtin_popcount(static_cast(memOrd)) <= 1, "must be kMoNone or kMoAcquire"); + DEBUG_ASSERT(bitSize >= k8BitSize, "PTY_u1 should have been lowered?"); + DEBUG_ASSERT(__builtin_popcount(bitSize) == 1, "PTY_u1 should have been lowered?"); + if (isLoad) { + DEBUG_ASSERT((memOrd == AArch64isa::kMoNone) || (memOrd == AArch64isa::kMoAcquire) || + (memOrd == AArch64isa::kMoAcquireRcpc) || (memOrd == AArch64isa::kMoLoacquire), + "unknown Memory Order"); + } else { + DEBUG_ASSERT((memOrd == AArch64isa::kMoNone) || (memOrd == AArch64isa::kMoRelease) || + (memOrd == AArch64isa::kMoLorelease), + "unknown Memory Order"); + } + + /* __builtin_ffs(x) returns: 0 -> 0, 1 -> 1, 2 -> 2, 4 -> 3, 8 -> 4 */ + if ((IsPrimitiveInteger(primType) || primType == PTY_agg) && !IsPrimitiveVector(primType)) { + MOperator(*table)[kIntByteSizeDimension]; + if (isLoad) { + table = (memOrd == AArch64isa::kMoAcquire) ? ldIsAcq : ldIs; + } else { + table = (memOrd == AArch64isa::kMoRelease) ? stIsRel : stIs; + } + + int32 signedUnsigned = IsUnsignedInteger(primType) ? 0 : 1; + if (primType == PTY_agg) { + CHECK_FATAL(bitSize >= k8BitSize, " unexpect agg size"); + bitSize = static_cast(RoundUp(bitSize, k8BitSize)); + DEBUG_ASSERT((bitSize & (bitSize - 1)) == 0, "bitlen error"); + } + + /* __builtin_ffs(x) returns: 8 -> 4, 16 -> 5, 32 -> 6, 64 -> 7 */ + if (primType == PTY_i128 || primType == PTY_u128) { + bitSize = k64BitSize; + } + uint32 size = static_cast(__builtin_ffs(static_cast(bitSize))) - 4; + DEBUG_ASSERT(size <= 3, "wrong bitSize"); + return table[signedUnsigned][size]; + } else { + MOperator *table = nullptr; + if (isLoad) { + table = (memOrd == AArch64isa::kMoAcquire) ? ldFsAcq : ldFs; + } else { + table = (memOrd == AArch64isa::kMoRelease) ? stFsRel : stFs; + } + + /* __builtin_ffs(x) returns: 32 -> 6, 64 -> 7, 128 -> 8 */ + uint32 size = static_cast(__builtin_ffs(static_cast(bitSize))) - 6; + DEBUG_ASSERT(size <= 2, "size must be 0 to 2"); + return table[size]; + } +} +} // namespace + +bool IsBlkassignForPush(const BlkassignoffNode &bNode) +{ + BaseNode *dest = bNode.Opnd(0); + bool spBased = false; + if (dest->GetOpCode() == OP_regread) { + RegreadNode &node = static_cast(*dest); + if (-node.GetRegIdx() == kSregSp) { + spBased = true; + } + } + return spBased; +} + +RegOperand &AArch64CGFunc::GetOrCreateResOperand(const BaseNode &parent, PrimType primType) +{ + RegOperand *resOpnd = nullptr; + if (parent.GetOpCode() == OP_regassign) { + auto ®AssignNode = static_cast(parent); + PregIdx pregIdx = regAssignNode.GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + /* if it is one of special registers */ + resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, primType); + } else { + resOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + } + } else { + resOpnd = &CreateRegisterOperandOfType(primType); + } + return *resOpnd; +} + +MOperator AArch64CGFunc::PickLdInsn(uint32 bitSize, PrimType primType, AArch64isa::MemoryOrdering memOrd) const +{ + return PickLdStInsn(true, bitSize, primType, memOrd); +} + +MOperator AArch64CGFunc::PickStInsn(uint32 bitSize, PrimType primType, AArch64isa::MemoryOrdering memOrd) const +{ + return PickLdStInsn(false, bitSize, primType, memOrd); +} + +MOperator AArch64CGFunc::PickExtInsn(PrimType dtype, PrimType stype) const +{ + int32 sBitSize = static_cast(GetPrimTypeBitSize(stype)); + int32 dBitSize = static_cast(GetPrimTypeBitSize(dtype)); + /* __builtin_ffs(x) returns: 0 -> 0, 1 -> 1, 2 -> 2, 4 -> 3, 8 -> 4 */ + if (IsPrimitiveInteger(stype) && IsPrimitiveInteger(dtype)) { + MOperator(*table)[kIntByteSizeDimension]; + table = IsUnsignedInteger(stype) ? uextIs : extIs; + if (stype == PTY_i128 || stype == PTY_u128) { + sBitSize = static_cast(k64BitSize); + } + /* __builtin_ffs(x) returns: 8 -> 4, 16 -> 5, 32 -> 6, 64 -> 7 */ + uint32 row = static_cast(__builtin_ffs(sBitSize)) - k4BitSize; + DEBUG_ASSERT(row <= 3, "wrong bitSize"); + if (dtype == PTY_i128 || dtype == PTY_u128) { + dBitSize = static_cast(k64BitSize); + } + uint32 col = static_cast(__builtin_ffs(dBitSize)) - k4BitSize; + DEBUG_ASSERT(col <= 3, "wrong bitSize"); + return table[row][col]; + } + CHECK_FATAL(0, "extend not primitive integer"); + return MOP_undef; +} + +MOperator AArch64CGFunc::PickMovBetweenRegs(PrimType destType, PrimType srcType) const +{ + if (IsPrimitiveVector(destType) && IsPrimitiveVector(srcType)) { + return GetPrimTypeSize(srcType) == k8ByteSize ? MOP_vmovuu : MOP_vmovvv; + } + if (IsPrimitiveInteger(destType) && IsPrimitiveInteger(srcType)) { + return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_wmovrr : MOP_xmovrr; + } + if (IsPrimitiveFloat(destType) && IsPrimitiveFloat(srcType)) { + return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovs : MOP_xvmovd; + } + if (IsPrimitiveInteger(destType) && IsPrimitiveFloat(srcType)) { + return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovrs : MOP_xvmovrd; + } + if (IsPrimitiveFloat(destType) && IsPrimitiveInteger(srcType)) { + return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovsr : MOP_xvmovdr; + } + if (IsPrimitiveInteger(destType) && IsPrimitiveVector(srcType)) { + return GetPrimTypeSize(srcType) == k8ByteSize + ? MOP_vwmovru + : GetPrimTypeSize(destType) <= k4ByteSize ? MOP_vwmovrv : MOP_vxmovrv; + } + CHECK_FATAL(false, "unexpected operand primtype for mov"); + return MOP_undef; +} + +MOperator AArch64CGFunc::PickMovInsn(const RegOperand &lhs, const RegOperand &rhs) const +{ + CHECK_FATAL(lhs.GetRegisterType() == rhs.GetRegisterType(), "PickMovInsn: unequal kind NYI"); + CHECK_FATAL(lhs.GetSize() == rhs.GetSize(), "PickMovInsn: unequal size NYI"); + DEBUG_ASSERT(((lhs.GetSize() < k64BitSize) || (lhs.GetRegisterType() == kRegTyFloat)), + "should split the 64 bits or more mov"); + if (lhs.GetRegisterType() == kRegTyInt) { + return MOP_wmovrr; + } + if (lhs.GetRegisterType() == kRegTyFloat) { + return (lhs.GetSize() <= k32BitSize) ? MOP_xvmovs : MOP_xvmovd; + } + DEBUG_ASSERT(false, "PickMovInsn: kind NYI"); + return MOP_undef; +} + +void AArch64CGFunc::SelectLoadAcquire(Operand &dest, PrimType dtype, Operand &src, PrimType stype, + AArch64isa::MemoryOrdering memOrd, bool isDirect) +{ + DEBUG_ASSERT(src.GetKind() == Operand::kOpdMem, "Just checking"); + DEBUG_ASSERT(memOrd != AArch64isa::kMoNone, "Just checking"); + + uint32 ssize = isDirect ? src.GetSize() : GetPrimTypeBitSize(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + MOperator mOp = PickLdInsn(ssize, stype, memOrd); + + Operand *newSrc = &src; + auto &memOpnd = static_cast(src); + OfstOperand *immOpnd = memOpnd.GetOffsetImmediate(); + int32 offset = static_cast(immOpnd->GetOffsetValue()); + RegOperand *origBaseReg = memOpnd.GetBaseRegister(); + if (offset != 0) { + RegOperand &resOpnd = CreateRegisterOperandOfType(PTY_i64); + DEBUG_ASSERT(origBaseReg != nullptr, "nullptr check"); + SelectAdd(resOpnd, *origBaseReg, *immOpnd, PTY_i64); + newSrc = &CreateReplacementMemOperand(ssize, resOpnd, 0); + } + + std::string key; + if (isDirect && GetCG()->GenerateVerboseCG()) { + key = GenerateMemOpndVerbose(src); + } + + /* Check if the right load-acquire instruction is available. */ + if (mOp != MOP_undef) { + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, dest, *newSrc); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + } else { + if (IsPrimitiveFloat(stype)) { + /* Uses signed integer version ldar followed by a floating-point move(fmov). */ + DEBUG_ASSERT(stype == dtype, "Just checking"); + PrimType itype = (stype == PTY_f32) ? PTY_i32 : PTY_i64; + RegOperand ®Opnd = CreateRegisterOperandOfType(itype); + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, itype, memOrd), regOpnd, *newSrc); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + mOp = (stype == PTY_f32) ? MOP_xvmovsr : MOP_xvmovdr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, regOpnd)); + } else { + /* Use unsigned version ldarb/ldarh followed by a sign-extension instruction(sxtb/sxth). */ + DEBUG_ASSERT((ssize == k8BitSize) || (ssize == k16BitSize), "Just checking"); + PrimType utype = (ssize == k8BitSize) ? PTY_u8 : PTY_u16; + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, utype, memOrd), dest, *newSrc); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + mOp = ((dsize == k32BitSize) ? ((ssize == k8BitSize) ? MOP_xsxtb32 : MOP_xsxth32) + : ((ssize == k8BitSize) ? MOP_xsxtb64 : MOP_xsxth64)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, dest)); + } + } +} + +void AArch64CGFunc::SelectStoreRelease(Operand &dest, PrimType dtype, Operand &src, PrimType stype, + AArch64isa::MemoryOrdering memOrd, bool isDirect) +{ + DEBUG_ASSERT(dest.GetKind() == Operand::kOpdMem, "Just checking"); + + uint32 dsize = isDirect ? dest.GetSize() : GetPrimTypeBitSize(stype); + MOperator mOp = PickStInsn(dsize, stype, memOrd); + + Operand *newDest = &dest; + MemOperand *memOpnd = static_cast(&dest); + OfstOperand *immOpnd = memOpnd->GetOffsetImmediate(); + int32 offset = static_cast(immOpnd->GetOffsetValue()); + RegOperand *origBaseReg = memOpnd->GetBaseRegister(); + if (offset != 0) { + RegOperand &resOpnd = CreateRegisterOperandOfType(PTY_i64); + DEBUG_ASSERT(origBaseReg != nullptr, "nullptr check"); + SelectAdd(resOpnd, *origBaseReg, *immOpnd, PTY_i64); + newDest = &CreateReplacementMemOperand(dsize, resOpnd, 0); + } + + std::string key; + if (isDirect && GetCG()->GenerateVerboseCG()) { + key = GenerateMemOpndVerbose(dest); + } + + /* Check if the right store-release instruction is available. */ + if (mOp != MOP_undef) { + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, src, *newDest); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + } else { + /* Use a floating-point move(fmov) followed by a stlr. */ + DEBUG_ASSERT(IsPrimitiveFloat(stype), "must be float type"); + CHECK_FATAL(stype == dtype, "Just checking"); + PrimType itype = (stype == PTY_f32) ? PTY_i32 : PTY_i64; + RegOperand ®Opnd = CreateRegisterOperandOfType(itype); + mOp = (stype == PTY_f32) ? MOP_xvmovrs : MOP_xvmovrd; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, regOpnd, src)); + Insn &insn = GetInsnBuilder()->BuildInsn(PickStInsn(dsize, itype, memOrd), regOpnd, *newDest); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + } +} + +void AArch64CGFunc::SelectCopyImm(Operand &dest, PrimType dType, ImmOperand &src, PrimType sType) +{ + if (IsPrimitiveInteger(dType) != IsPrimitiveInteger(sType)) { + RegOperand &tempReg = CreateRegisterOperandOfType(sType); + SelectCopyImm(tempReg, src, sType); + SelectCopy(dest, dType, tempReg, sType); + } else { + SelectCopyImm(dest, src, sType); + } +} + +void AArch64CGFunc::SelectCopyImm(Operand &dest, ImmOperand &src, PrimType dtype) +{ + uint32 dsize = GetPrimTypeBitSize(dtype); + DEBUG_ASSERT(IsPrimitiveInteger(dtype), "The type of destination operand must be Integer"); + DEBUG_ASSERT(((dsize == k8BitSize) || (dsize == k16BitSize) || (dsize == k32BitSize) || (dsize == k64BitSize)), + "The destination operand must be >= 8-bit"); + if (src.IsSingleInstructionMovable()) { + MOperator mOp = (dsize == k32BitSize) ? MOP_wmovri32 : MOP_xmovri64; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, src)); + return; + } + uint64 srcVal = static_cast(src.GetValue()); + /* using mov/movk to load the immediate value */ + if (dsize == k8BitSize) { + /* compute lower 8 bits value */ + if (dtype == PTY_u8) { + /* zero extend */ + srcVal = (srcVal << 56) >> 56; + dtype = PTY_u16; + } else { + /* sign extend */ + srcVal = ((static_cast(srcVal)) << 56) >> 56; + dtype = PTY_i16; + } + dsize = k16BitSize; + } + if (dsize == k16BitSize) { + if (dtype == PTY_u16) { + /* check lower 16 bits and higher 16 bits respectively */ + DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0, "unexpected value"); + DEBUG_ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) == 0, "unexpected value"); + DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0xFFFFULL, "unexpected value"); + /* create an imm opereand which represents lower 16 bits of the immediate */ + ImmOperand &srcLower = CreateImmOperand(static_cast(srcVal & 0x0000FFFFULL), k16BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, dest, srcLower)); + return; + } else { + /* sign extend and let `dsize == 32` case take care of it */ + srcVal = ((static_cast(srcVal)) << 48) >> 48; + dsize = k32BitSize; + } + } + if (dsize == k32BitSize) { + /* check lower 16 bits and higher 16 bits respectively */ + DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0, "unexpected val"); + DEBUG_ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) != 0, "unexpected val"); + DEBUG_ASSERT((srcVal & 0x0000FFFFULL) != 0xFFFFULL, "unexpected val"); + DEBUG_ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) != 0xFFFFULL, "unexpected val"); + /* create an imm opereand which represents lower 16 bits of the immediate */ + ImmOperand &srcLower = CreateImmOperand(static_cast(srcVal & 0x0000FFFFULL), k16BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, dest, srcLower)); + /* create an imm opereand which represents upper 16 bits of the immediate */ + ImmOperand &srcUpper = + CreateImmOperand(static_cast((srcVal >> k16BitSize) & 0x0000FFFFULL), k16BitSize, false); + BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(k16BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovkri16, dest, srcUpper, *lslOpnd)); + } else { + /* + * partition it into 4 16-bit chunks + * if more 0's than 0xFFFF's, use movz as the initial instruction. + * otherwise, movn. + */ + bool useMovz = BetterUseMOVZ(srcVal); + bool useMovk = false; + /* get lower 32 bits of the immediate */ + uint64 chunkLval = srcVal & 0xFFFFFFFFULL; + /* get upper 32 bits of the immediate */ + uint64 chunkHval = (srcVal >> k32BitSize) & 0xFFFFFFFFULL; + int32 maxLoopTime = 4; + + if (chunkLval == chunkHval) { + /* compute lower 32 bits, and then copy to higher 32 bits, so only 2 chunks need be processed */ + maxLoopTime = 2; + } + + uint64 sa = 0; + + for (int64 i = 0; i < maxLoopTime; ++i, sa += k16BitSize) { + /* create an imm opereand which represents the i-th 16-bit chunk of the immediate */ + uint64 chunkVal = (srcVal >> (static_cast(sa))) & 0x0000FFFFULL; + if (useMovz ? (chunkVal == 0) : (chunkVal == 0x0000FFFFULL)) { + continue; + } + ImmOperand &src16 = CreateImmOperand(static_cast(chunkVal), k16BitSize, false); + BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(sa, true); + if (!useMovk) { + /* use movz or movn */ + if (!useMovz) { + src16.BitwiseNegate(); + } + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(useMovz ? MOP_xmovzri16 : MOP_xmovnri16, dest, src16, *lslOpnd)); + useMovk = true; + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xmovkri16, dest, src16, *lslOpnd)); + } + } + + if (maxLoopTime == 2) { + /* copy lower 32 bits to higher 32 bits */ + ImmOperand &immOpnd = CreateImmOperand(k32BitSize, k8BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xbfirri6i6, dest, dest, immOpnd, immOpnd)); + } + } +} + +std::string AArch64CGFunc::GenerateMemOpndVerbose(const Operand &src) const +{ + DEBUG_ASSERT(src.GetKind() == Operand::kOpdMem, "Just checking"); + const MIRSymbol *symSecond = static_cast(&src)->GetSymbol(); + if (symSecond != nullptr) { + std::string key; + MIRStorageClass sc = symSecond->GetStorageClass(); + if (sc == kScFormal) { + key = "param: "; + } else if (sc == kScAuto) { + key = "local var: "; + } else { + key = "global: "; + } + return key.append(symSecond->GetName()); + } + return ""; +} + +void AArch64CGFunc::SelectCopyMemOpnd(Operand &dest, PrimType dtype, uint32 dsize, Operand &src, PrimType stype) +{ + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + const MIRSymbol *sym = static_cast(&src)->GetSymbol(); + if ((sym != nullptr) && (sym->GetStorageClass() == kScGlobal) && sym->GetAttr(ATTR_memory_order_acquire)) { + memOrd = AArch64isa::kMoAcquire; + } + + if (memOrd != AArch64isa::kMoNone) { + AArch64CGFunc::SelectLoadAcquire(dest, dtype, src, stype, memOrd, true); + return; + } + Insn *insn = nullptr; + uint32 ssize = src.GetSize(); + PrimType regTy = PTY_void; + RegOperand *loadReg = nullptr; + MOperator mop = MOP_undef; + if (IsPrimitiveFloat(stype) || IsPrimitiveVector(stype)) { + CHECK_FATAL(dsize == ssize, "dsize %u expect equals ssize %u", dtype, ssize); + insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), dest, src); + } else { + if (stype == PTY_agg && dtype == PTY_agg) { + mop = MOP_undef; + } else { + mop = PickExtInsn(dtype, stype); + } + if (ssize == (GetPrimTypeSize(dtype) * kBitsPerByte) || mop == MOP_undef) { + insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), dest, src); + } else { + regTy = dsize == k64BitSize ? dtype : PTY_i32; + loadReg = &CreateRegisterOperandOfType(regTy); + insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), *loadReg, src); + } + } + + if (GetCG()->GenerateVerboseCG()) { + insn->SetComment(GenerateMemOpndVerbose(src)); + } + + GetCurBB()->AppendInsn(*insn); + if (regTy != PTY_void && mop != MOP_undef) { + DEBUG_ASSERT(loadReg != nullptr, "loadReg should not be nullptr"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, dest, *loadReg)); + } +} + +bool AArch64CGFunc::IsImmediateValueInRange(MOperator mOp, int64 immVal, bool is64Bits, bool isIntactIndexed, + bool isPostIndexed, bool isPreIndexed) const +{ + bool isInRange = false; + switch (mOp) { + case MOP_xstr: + case MOP_wstr: + isInRange = + (isIntactIndexed && + ((!is64Bits && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrLdrImm32UpperBound)) || + (is64Bits && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrLdrImm64UpperBound)))) || + ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) && + (immVal <= kStrLdrPerPostUpperBound)); + break; + case MOP_wstrb: + isInRange = + (isIntactIndexed && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrbLdrbImmUpperBound)) || + ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) && + (immVal <= kStrLdrPerPostUpperBound)); + break; + case MOP_wstrh: + isInRange = + (isIntactIndexed && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrhLdrhImmUpperBound)) || + ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) && + (immVal <= kStrLdrPerPostUpperBound)); + break; + default: + break; + } + return isInRange; +} + +bool AArch64CGFunc::IsStoreMop(MOperator mOp) const +{ + switch (mOp) { + case MOP_sstr: + case MOP_dstr: + case MOP_qstr: + case MOP_xstr: + case MOP_wstr: + case MOP_wstrb: + case MOP_wstrh: + return true; + default: + return false; + } +} + +void AArch64CGFunc::SplitMovImmOpndInstruction(int64 immVal, RegOperand &destReg, Insn *curInsn) +{ + bool useMovz = BetterUseMOVZ(immVal); + bool useMovk = false; + /* get lower 32 bits of the immediate */ + uint64 chunkLval = static_cast(immVal) & 0xFFFFFFFFULL; + /* get upper 32 bits of the immediate */ + uint64 chunkHval = (static_cast(immVal) >> k32BitSize) & 0xFFFFFFFFULL; + int32 maxLoopTime = 4; + + if (chunkLval == chunkHval) { + /* compute lower 32 bits, and then copy to higher 32 bits, so only 2 chunks need be processed */ + maxLoopTime = 2; + } + + uint64 sa = 0; + auto *bb = (curInsn != nullptr) ? curInsn->GetBB() : GetCurBB(); + for (int64 i = 0; i < maxLoopTime; ++i, sa += k16BitSize) { + /* create an imm opereand which represents the i-th 16-bit chunk of the immediate */ + uint64 chunkVal = (static_cast(immVal) >> sa) & 0x0000FFFFULL; + if (useMovz ? (chunkVal == 0) : (chunkVal == 0x0000FFFFULL)) { + continue; + } + ImmOperand &src16 = CreateImmOperand(static_cast(chunkVal), k16BitSize, false); + BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(sa, true); + Insn *newInsn = nullptr; + if (!useMovk) { + /* use movz or movn */ + if (!useMovz) { + src16.BitwiseNegate(); + } + MOperator mOpCode = useMovz ? MOP_xmovzri16 : MOP_xmovnri16; + newInsn = &GetInsnBuilder()->BuildInsn(mOpCode, destReg, src16, *lslOpnd); + useMovk = true; + } else { + newInsn = &GetInsnBuilder()->BuildInsn(MOP_xmovkri16, destReg, src16, *lslOpnd); + } + if (curInsn != nullptr) { + bb->InsertInsnBefore(*curInsn, *newInsn); + } else { + bb->AppendInsn(*newInsn); + } + } + + if (maxLoopTime == 2) { + /* copy lower 32 bits to higher 32 bits */ + ImmOperand &immOpnd = CreateImmOperand(k32BitSize, k8BitSize, false); + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_xbfirri6i6, destReg, destReg, immOpnd, immOpnd); + if (curInsn != nullptr) { + bb->InsertInsnBefore(*curInsn, insn); + } else { + bb->AppendInsn(insn); + } + } +} + +void AArch64CGFunc::SelectCopyRegOpnd(Operand &dest, PrimType dtype, Operand::OperandType opndType, uint32 dsize, + Operand &src, PrimType stype) +{ + if (opndType != Operand::kOpdMem) { + if (!CGOptions::IsArm64ilp32()) { + DEBUG_ASSERT(stype != PTY_a32, ""); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickMovBetweenRegs(dtype, stype), dest, src)); + return; + } + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + const MIRSymbol *sym = static_cast(&dest)->GetSymbol(); + if ((sym != nullptr) && (sym->GetStorageClass() == kScGlobal) && sym->GetAttr(ATTR_memory_order_release)) { + memOrd = AArch64isa::kMoRelease; + } + + if (memOrd != AArch64isa::kMoNone) { + AArch64CGFunc::SelectStoreRelease(dest, dtype, src, stype, memOrd, true); + return; + } + + bool is64Bits = (dest.GetSize() == k64BitSize) ? true : false; + MOperator strMop = PickStInsn(dsize, stype); + if (!dest.IsMemoryAccessOperand()) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest)); + return; + } + + MemOperand *memOpnd = static_cast(&dest); + DEBUG_ASSERT(memOpnd != nullptr, "memOpnd should not be nullptr"); + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest)); + return; + } + if (memOpnd->GetOffsetOperand() == nullptr) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest)); + return; + } + ImmOperand *immOpnd = static_cast(memOpnd->GetOffsetOperand()); + DEBUG_ASSERT(immOpnd != nullptr, "immOpnd should not be nullptr"); + int64 immVal = immOpnd->GetValue(); + bool isIntactIndexed = memOpnd->IsIntactIndexed(); + bool isPostIndexed = memOpnd->IsPostIndexed(); + bool isPreIndexed = memOpnd->IsPreIndexed(); + DEBUG_ASSERT(!isPostIndexed, "memOpnd should not be post-index type"); + DEBUG_ASSERT(!isPreIndexed, "memOpnd should not be pre-index type"); + bool isInRange = false; + if (!GetMirModule().IsCModule()) { + isInRange = IsImmediateValueInRange(strMop, immVal, is64Bits, isIntactIndexed, isPostIndexed, isPreIndexed); + } else { + isInRange = IsOperandImmValid(strMop, memOpnd, kInsnSecondOpnd); + } + bool isMopStr = IsStoreMop(strMop); + if (isInRange || !isMopStr) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest)); + return; + } + DEBUG_ASSERT(memOpnd->GetBaseRegister() != nullptr, "nullptr check"); + if (isIntactIndexed) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dsize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, *memOpnd)); + } else if (isPostIndexed || isPreIndexed) { + RegOperand ® = CreateRegisterOperandOfType(PTY_i64); + MOperator mopMov = MOP_xmovri64; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopMov, reg, *immOpnd)); + MOperator mopAdd = MOP_xaddrrr; + MemOperand &newDest = + GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(dtype), memOpnd->GetBaseRegister(), nullptr, + &GetOrCreateOfstOpnd(0, k32BitSize), nullptr); + Insn &insn1 = GetInsnBuilder()->BuildInsn(strMop, src, newDest); + Insn &insn2 = GetInsnBuilder()->BuildInsn(mopAdd, *newDest.GetBaseRegister(), *newDest.GetBaseRegister(), reg); + if (isPostIndexed) { + GetCurBB()->AppendInsn(insn1); + GetCurBB()->AppendInsn(insn2); + } else { + /* isPreIndexed */ + GetCurBB()->AppendInsn(insn2); + GetCurBB()->AppendInsn(insn1); + } + } +} + +void AArch64CGFunc::SelectCopy(Operand &dest, PrimType dtype, Operand &src, PrimType stype) +{ + DEBUG_ASSERT(dest.IsRegister() || dest.IsMemoryAccessOperand(), ""); + uint32 dsize = GetPrimTypeBitSize(dtype); + if (dest.IsRegister()) { + dsize = dest.GetSize(); + } + Operand::OperandType opnd0Type = dest.GetKind(); + Operand::OperandType opnd1Type = src.GetKind(); + DEBUG_ASSERT(((dsize >= src.GetSize()) || (opnd0Type == Operand::kOpdRegister) || (opnd0Type == Operand::kOpdMem)), + "NYI"); + DEBUG_ASSERT(((opnd0Type == Operand::kOpdRegister) || (src.GetKind() == Operand::kOpdRegister)), + "either src or dest should be register"); + + switch (opnd1Type) { + case Operand::kOpdMem: + SelectCopyMemOpnd(dest, dtype, dsize, src, stype); + break; + case Operand::kOpdOffset: + case Operand::kOpdImmediate: + SelectCopyImm(dest, dtype, static_cast(src), stype); + break; + case Operand::kOpdFPImmediate: + CHECK_FATAL(static_cast(src).GetValue() == 0, "NIY"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn((dsize == k32BitSize) ? MOP_xvmovsr : MOP_xvmovdr, dest, + GetZeroOpnd(dsize))); + break; + case Operand::kOpdRegister: { + if (opnd0Type == Operand::kOpdRegister && IsPrimitiveVector(stype)) { + /* check vector reg to vector reg move */ + CHECK_FATAL(IsPrimitiveVector(dtype), "invalid vectreg to vectreg move"); + MOperator mop = (dsize <= k64BitSize) ? MOP_vmovuu : MOP_vmovvv; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + vInsn.AddOpndChain(dest).AddOpndChain(src); + auto *vecSpecSrc = GetMemoryPool()->New(dsize >> k3ByteSize, k8BitSize); + auto *vecSpecDest = GetMemoryPool()->New(dsize >> k3ByteSize, k8BitSize); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + break; + } + RegOperand &desReg = static_cast(dest); + RegOperand &srcReg = static_cast(src); + if (desReg.GetRegisterNumber() == srcReg.GetRegisterNumber()) { + break; + } + SelectCopyRegOpnd(dest, dtype, opnd0Type, dsize, src, stype); + break; + } + default: + CHECK_FATAL(false, "NYI"); + } +} + +/* This function copies src to a register, the src can be an imm, mem or a label */ +RegOperand &AArch64CGFunc::SelectCopy(Operand &src, PrimType stype, PrimType dtype) +{ + RegOperand &dest = CreateRegisterOperandOfType(dtype); + SelectCopy(dest, dtype, src, stype); + return dest; +} + +/* + * We need to adjust the offset of a stack allocated local variable + * if we store FP/SP before any other local variables to save an instruction. + * See AArch64CGFunc::OffsetAdjustmentForFPLR() in aarch64_cgfunc.cpp + * + * That is when we !UsedStpSubPairForCallFrameAllocation(). + * + * Because we need to use the STP/SUB instruction pair to store FP/SP 'after' + * local variables when the call frame size is greater that the max offset + * value allowed for the STP instruction (we cannot use STP w/ prefix, LDP w/ + * postfix), if UsedStpSubPairForCallFrameAllocation(), we don't need to + * adjust the offsets. + */ +bool AArch64CGFunc::IsImmediateOffsetOutOfRange(const MemOperand &memOpnd, uint32 bitLen) +{ + DEBUG_ASSERT(bitLen >= k8BitSize, "bitlen error"); + DEBUG_ASSERT(bitLen <= k128BitSize, "bitlen error"); + + if (bitLen >= k8BitSize) { + bitLen = static_cast(RoundUp(bitLen, k8BitSize)); + } + DEBUG_ASSERT((bitLen & (bitLen - 1)) == 0, "bitlen error"); + + MemOperand::AArch64AddressingMode mode = memOpnd.GetAddrMode(); + if ((mode == MemOperand::kAddrModeBOi) && memOpnd.IsIntactIndexed()) { + int32 offsetValue = static_cast(memOpnd.GetOffsetImmediate()->GetOffsetValue()); + if (memOpnd.GetOffsetImmediate()->GetVary() == kUnAdjustVary) { + offsetValue += + static_cast(static_cast(GetMemlayout())->RealStackFrameSize() + 0xff); + } + offsetValue += 2 * kIntregBytelen; /* Refer to the above comment */ + return MemOperand::IsPIMMOffsetOutOfRange(offsetValue, bitLen); + } else { + return false; + } +} + +bool AArch64CGFunc::IsOperandImmValid(MOperator mOp, Operand *o, uint32 opndIdx) +{ + const InsnDesc *md = &AArch64CG::kMd[mOp]; + auto *opndProp = md->opndMD[opndIdx]; + + Operand::OperandType opndTy = opndProp->GetOperandType(); + if (opndTy == Operand::kOpdMem) { + auto *memOpnd = static_cast(o); + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOrX) { + return true; + } + if (md->IsLoadStorePair() || + (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi && memOpnd->IsIntactIndexed())) { + int64 offsetValue = memOpnd->GetOffsetImmediate()->GetOffsetValue(); + if (memOpnd->GetOffsetImmediate()->GetVary() == kUnAdjustVary) { + offsetValue += static_cast(GetMemlayout())->RealStackFrameSize() + 0xffL; + } + return md->IsValidImmOpnd(offsetValue); + } else if (memOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li) { + int32 offsetValue = static_cast(memOpnd->GetOffsetImmediate()->GetOffsetValue()); + return offsetValue == 0; + } else { + CHECK_FATAL(!memOpnd->IsIntactIndexed(), "CHECK WHAT?"); + int32 offsetValue = static_cast(memOpnd->GetOffsetImmediate()->GetOffsetValue()); + return (offsetValue <= static_cast(k256BitSize) && offsetValue >= kNegative256BitSize); + } + } else if (opndTy == Operand::kOpdImmediate) { + return md->IsValidImmOpnd(static_cast(o)->GetValue()); + } + return true; +} + +MemOperand &AArch64CGFunc::CreateReplacementMemOperand(uint32 bitLen, RegOperand &baseReg, int64 offset) +{ + return CreateMemOpnd(baseReg, offset, bitLen); +} + +bool AArch64CGFunc::CheckIfSplitOffsetWithAdd(const MemOperand &memOpnd, uint32 bitLen) const +{ + if (memOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd.IsIntactIndexed()) { + return false; + } + OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); + int32 opndVal = static_cast(ofstOpnd->GetOffsetValue()); + int32 maxPimm = memOpnd.GetMaxPIMM(bitLen); + int32 q0 = opndVal / maxPimm; + int32 addend = q0 * maxPimm; + int32 r0 = opndVal - addend; + int32 alignment = memOpnd.GetImmediateOffsetAlignment(bitLen); + int32 r1 = static_cast(r0) & ((1u << static_cast(alignment)) - 1); + addend = addend + r1; + return (addend > 0); +} + +RegOperand *AArch64CGFunc::GetBaseRegForSplit(uint32 baseRegNum) +{ + RegOperand *resOpnd = nullptr; + if (baseRegNum == AArch64reg::kRinvalid) { + resOpnd = &CreateRegisterOperandOfType(PTY_i64); + } else if (AArch64isa::IsPhysicalRegister(baseRegNum)) { + resOpnd = &GetOrCreatePhysicalRegisterOperand(static_cast(baseRegNum), + GetPointerSize() * kBitsPerByte, kRegTyInt); + } else { + resOpnd = &GetOrCreateVirtualRegisterOperand(baseRegNum); + } + return resOpnd; +} + +/* + * When immediate of str/ldr is over 256bits, it should be aligned according to the reg byte size. + * Here we split the offset into (512 * n) and +/-(new Offset) when misaligned, to make sure that + * the new offet is always under 256 bits. + */ +MemOperand &AArch64CGFunc::ConstraintOffsetToSafeRegion(uint32 bitLen, const MemOperand &memOpnd) +{ + auto it = hashMemOpndTable.find(memOpnd); + if (it != hashMemOpndTable.end()) { + hashMemOpndTable.erase(memOpnd); + } + int32 offsetValue = static_cast(memOpnd.GetOffsetImmediate()->GetOffsetValue()); + int32 multiplier = (offsetValue / k512BitSize) + static_cast(offsetValue % k512BitSize > k256BitSize); + int32 addMount = multiplier * k512BitSizeInt; + int32 newOffset = offsetValue - addMount; + RegOperand *baseReg = memOpnd.GetBaseRegister(); + ImmOperand &immAddMount = CreateImmOperand(addMount, k64BitSize, true); + if (memOpnd.GetOffsetImmediate()->GetVary() == kUnAdjustVary) { + immAddMount.SetVary(kUnAdjustVary); + } + + RegOperand *resOpnd = GetBaseRegForSplit(kRinvalid); + SelectAdd(*resOpnd, *baseReg, immAddMount, PTY_i64); + MemOperand &newMemOpnd = CreateReplacementMemOperand(bitLen, *resOpnd, newOffset); + newMemOpnd.SetStackMem(memOpnd.IsStackMem()); + return newMemOpnd; +} + +ImmOperand &AArch64CGFunc::SplitAndGetRemained(const MemOperand &memOpnd, uint32 bitLen, RegOperand *resOpnd, + int64 ofstVal, bool isDest, Insn *insn, bool forPair) +{ + auto it = hashMemOpndTable.find(memOpnd); + if (it != hashMemOpndTable.end()) { + hashMemOpndTable.erase(memOpnd); + } + /* + * opndVal == Q0 * 32760(16380) + R0 + * R0 == Q1 * 8(4) + R1 + * ADDEND == Q0 * 32760(16380) + R1 + * NEW_OFFSET = Q1 * 8(4) + * we want to generate two instructions: + * ADD TEMP_REG, X29, ADDEND + * LDR/STR TEMP_REG, [ TEMP_REG, #NEW_OFFSET ] + */ + int32 maxPimm = 0; + if (!forPair) { + maxPimm = MemOperand::GetMaxPIMM(bitLen); + } else { + maxPimm = MemOperand::GetMaxPairPIMM(bitLen); + } + DEBUG_ASSERT(maxPimm != 0, "get max pimm failed"); + + int64 q0 = ofstVal / maxPimm + (ofstVal < 0 ? -1 : 0); + int64 addend = q0 * maxPimm; + int64 r0 = ofstVal - addend; + int64 alignment = MemOperand::GetImmediateOffsetAlignment(bitLen); + auto q1 = static_cast(static_cast(r0) >> static_cast(alignment)); + auto r1 = static_cast(static_cast(r0) & ((1u << static_cast(alignment)) - 1)); + auto remained = static_cast(static_cast(q1) << static_cast(alignment)); + addend = addend + r1; + if (addend > 0) { + int64 suffixClear = 0xfff; + if (forPair) { + suffixClear = 0xff; + } + int64 remainedTmp = remained + (addend & suffixClear); + if (!MemOperand::IsPIMMOffsetOutOfRange(static_cast(remainedTmp), bitLen) && + ((static_cast(remainedTmp) & ((1u << static_cast(alignment)) - 1)) == 0)) { + remained = remainedTmp; + addend = (addend & ~suffixClear); + } + } + ImmOperand &immAddend = CreateImmOperand(addend, k64BitSize, true); + if (memOpnd.GetOffsetImmediate()->GetVary() == kUnAdjustVary) { + immAddend.SetVary(kUnAdjustVary); + } + return immAddend; +} + +MemOperand &AArch64CGFunc::SplitOffsetWithAddInstruction(const MemOperand &memOpnd, uint32 bitLen, uint32 baseRegNum, + bool isDest, Insn *insn, bool forPair) +{ + DEBUG_ASSERT((memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi), "expect kAddrModeBOi memOpnd"); + DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "expect intactIndexed memOpnd"); + OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); + int64 ofstVal = ofstOpnd->GetOffsetValue(); + RegOperand *resOpnd = GetBaseRegForSplit(baseRegNum); + ImmOperand &immAddend = SplitAndGetRemained(memOpnd, bitLen, resOpnd, ofstVal, isDest, insn, forPair); + int64 remained = (ofstVal - immAddend.GetValue()); + RegOperand *origBaseReg = memOpnd.GetBaseRegister(); + DEBUG_ASSERT(origBaseReg != nullptr, "nullptr check"); + if (insn == nullptr) { + SelectAdd(*resOpnd, *origBaseReg, immAddend, PTY_i64); + } else { + SelectAddAfterInsn(*resOpnd, *origBaseReg, immAddend, PTY_i64, isDest, *insn); + } + MemOperand &newMemOpnd = CreateReplacementMemOperand(bitLen, *resOpnd, remained); + newMemOpnd.SetStackMem(memOpnd.IsStackMem()); + return newMemOpnd; +} + +void AArch64CGFunc::SelectDassign(DassignNode &stmt, Operand &opnd0) +{ + SelectDassign(stmt.GetStIdx(), stmt.GetFieldID(), stmt.GetRHS()->GetPrimType(), opnd0); +} + +/* + * Used for SelectDassign when do optimization for volatile store, because the stlr instruction only allow + * store to the memory addrress with the register base offset 0. + * STLR , [{,#0}], 32-bit variant (size = 10) + * STLR , [{,#0}], 64-bit variant (size = 11) + * So the function do the prehandle of the memory operand to satisify the Store-Release.. + */ +RegOperand *AArch64CGFunc::ExtractNewMemBase(const MemOperand &memOpnd) +{ + const MIRSymbol *sym = memOpnd.GetSymbol(); + MemOperand::AArch64AddressingMode mode = memOpnd.GetAddrMode(); + if (mode == MemOperand::kAddrModeLiteral) { + return nullptr; + } + RegOperand *baseOpnd = memOpnd.GetBaseRegister(); + DEBUG_ASSERT(baseOpnd != nullptr, "nullptr check"); + RegOperand &resultOpnd = + CreateRegisterOperandOfType(baseOpnd->GetRegisterType(), baseOpnd->GetSize() / kBitsPerByte); + bool is64Bits = (baseOpnd->GetSize() == k64BitSize); + if (mode == MemOperand::kAddrModeLo12Li) { + StImmOperand &stImm = CreateStImmOperand(*sym, 0, 0); + Insn &addInsn = GetInsnBuilder()->BuildInsn(MOP_xadrpl12, resultOpnd, *baseOpnd, stImm); + addInsn.SetComment("new add insn"); + GetCurBB()->AppendInsn(addInsn); + } else if (mode == MemOperand::kAddrModeBOi) { + OfstOperand *offsetOpnd = memOpnd.GetOffsetImmediate(); + if (offsetOpnd->GetOffsetValue() != 0) { + MOperator mOp = is64Bits ? MOP_xaddrri12 : MOP_waddrri12; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resultOpnd, *baseOpnd, *offsetOpnd)); + } else { + return baseOpnd; + } + } else { + CHECK_FATAL(mode == MemOperand::kAddrModeBOrX, "unexpect addressing mode."); + RegOperand *regOpnd = static_cast(&memOpnd)->GetIndexRegister(); + MOperator mOp = is64Bits ? MOP_xaddrrr : MOP_waddrrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resultOpnd, *baseOpnd, *regOpnd)); + } + return &resultOpnd; +} + +/* + * NOTE: I divided SelectDassign so that we can create "virtual" assignments + * when selecting other complex Maple IR instructions. For example, the atomic + * exchange and other intrinsics will need to assign its results to local + * variables. Such Maple IR instructions are pltform-specific (e.g. + * atomic_exchange can be implemented as one single machine intruction on x86_64 + * and ARMv8.1, but ARMv8.0 needs an LL/SC loop), therefore they cannot (in + * principle) be lowered at BELowerer or CGLowerer. + */ +void AArch64CGFunc::SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opnd0) +{ + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(stIdx); + int32 offset = 0; + bool parmCopy = false; + if (fieldId != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "SelectDassign: non-zero fieldID for non-structure"); + offset = GetBecommon().GetFieldOffset(*structType, fieldId).first; + parmCopy = IsParamStructCopy(*symbol); + } + uint32 regSize = GetPrimTypeBitSize(rhsPType); + MIRType *type = symbol->GetType(); + Operand &stOpnd = LoadIntoRegister(opnd0, IsPrimitiveInteger(rhsPType) || IsPrimitiveVectorInteger(rhsPType), + regSize, IsSignedInteger(type->GetPrimType())); + MOperator mOp = MOP_undef; + if ((type->GetKind() == kTypeStruct) || (type->GetKind() == kTypeUnion)) { + MIRStructType *structType = static_cast(type); + type = structType->GetFieldType(fieldId); + } else if (type->GetKind() == kTypeClass) { + MIRClassType *classType = static_cast(type); + type = classType->GetFieldType(fieldId); + } + + uint32 dataSize = GetPrimTypeBitSize(type->GetPrimType()); + if (type->GetPrimType() == PTY_agg) { + dataSize = GetPrimTypeBitSize(PTY_a64); + } + MemOperand *memOpnd = nullptr; + if (parmCopy) { + memOpnd = &LoadStructCopyBase(*symbol, offset, static_cast(dataSize)); + } else { + memOpnd = &GetOrCreateMemOpnd(*symbol, offset, dataSize); + } + if ((memOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize); + } + + /* In bpl mode, a func symbol's type is represented as a MIRFuncType instead of a MIRPtrType (pointing to + * MIRFuncType), so we allow `kTypeFunction` to appear here */ + DEBUG_ASSERT(((type->GetKind() == kTypeScalar) || (type->GetKind() == kTypePointer) || + (type->GetKind() == kTypeFunction) || (type->GetKind() == kTypeStruct) || + (type->GetKind() == kTypeUnion) || (type->GetKind() == kTypeArray)), + "NYI dassign type"); + PrimType ptyp = type->GetPrimType(); + if (ptyp == PTY_agg) { + ptyp = PTY_a64; + } + + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + if (isVolStore) { + RegOperand *baseOpnd = ExtractNewMemBase(*memOpnd); + if (baseOpnd != nullptr) { + memOpnd = &CreateMemOpnd(*baseOpnd, 0, dataSize); + memOrd = AArch64isa::kMoRelease; + isVolStore = false; + } + } + + memOpnd = memOpnd->IsOffsetMisaligned(dataSize) ? &ConstraintOffsetToSafeRegion(dataSize, *memOpnd) : memOpnd; + if (symbol->GetAsmAttr() != UStrIdx(0) && symbol->GetStorageClass() != kScPstatic && + symbol->GetStorageClass() != kScFstatic) { + std::string regDesp = GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->GetAsmAttr()); + RegOperand &specifiedOpnd = GetOrCreatePhysicalRegisterOperand(regDesp); + SelectCopy(specifiedOpnd, type->GetPrimType(), opnd0, rhsPType); + } else if (memOrd == AArch64isa::kMoNone) { + mOp = PickStInsn(GetPrimTypeBitSize(ptyp), ptyp); + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, stOpnd, *memOpnd); + if (GetCG()->GenerateVerboseCG()) { + insn.SetComment(GenerateMemOpndVerbose(*memOpnd)); + } + GetCurBB()->AppendInsn(insn); + } else { + AArch64CGFunc::SelectStoreRelease(*memOpnd, ptyp, stOpnd, ptyp, memOrd, true); + } +} + +void AArch64CGFunc::SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) +{ + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(stmt.stIdx); + int64 offset = stmt.offset; + uint32 size = GetPrimTypeSize(stmt.GetPrimType()) * k8ByteSize; + MOperator mOp = (size == k16BitSize) + ? MOP_wstrh + : ((size == k32BitSize) ? MOP_wstr : ((size == k64BitSize) ? MOP_xstr : MOP_undef)); + CHECK_FATAL(mOp != MOP_undef, "illegal size for dassignoff"); + MemOperand *memOpnd = &GetOrCreateMemOpnd(*symbol, offset, size); + if ((memOpnd->GetMemVaryType() == kNotVary) && (IsImmediateOffsetOutOfRange(*memOpnd, size) || (offset % 8 != 0))) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, size); + } + Operand &stOpnd = LoadIntoRegister(opnd0, true, size, false); + memOpnd = memOpnd->IsOffsetMisaligned(size) ? &ConstraintOffsetToSafeRegion(size, *memOpnd) : memOpnd; + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, stOpnd, *memOpnd); + GetCurBB()->AppendInsn(insn); +} + +void AArch64CGFunc::SelectAssertNull(UnaryStmtNode &stmt) +{ + Operand *opnd0 = HandleExpr(stmt, *stmt.Opnd(0)); + RegOperand &baseReg = LoadIntoRegister(*opnd0, PTY_a64); + auto &zwr = GetZeroOpnd(k32BitSize); + auto &mem = CreateMemOpnd(baseReg, 0, k32BitSize); + Insn &loadRef = GetInsnBuilder()->BuildInsn(MOP_wldr, zwr, mem); + loadRef.SetDoNotRemove(true); + if (GetCG()->GenerateVerboseCG()) { + loadRef.SetComment("null pointer check"); + } + GetCurBB()->AppendInsn(loadRef); +} + +void AArch64CGFunc::SelectAbort() +{ + RegOperand &inOpnd = GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + auto &mem = CreateMemOpnd(inOpnd, 0, k64BitSize); + Insn &movXzr = GetInsnBuilder()->BuildInsn(MOP_xmovri64, inOpnd, CreateImmOperand(0, k64BitSize, false)); + Insn &loadRef = GetInsnBuilder()->BuildInsn(MOP_wldr, GetZeroOpnd(k64BitSize), mem); + loadRef.SetDoNotRemove(true); + movXzr.SetDoNotRemove(true); + GetCurBB()->AppendInsn(movXzr); + GetCurBB()->AppendInsn(loadRef); +} + +static std::string GetRegPrefixFromPrimType(PrimType pType, uint32 size, const std::string &constraint) +{ + std::string regPrefix = ""; + /* memory access check */ + if (constraint.find("m") != std::string::npos || constraint.find("Q") != std::string::npos) { + regPrefix += "["; + } + if (IsPrimitiveVector(pType)) { + regPrefix += "v"; + } else if (IsPrimitiveInteger(pType)) { + if (size == k32BitSize) { + regPrefix += "w"; + } else { + regPrefix += "x"; + } + } else { + if (size == k32BitSize) { + regPrefix += "s"; + } else { + regPrefix += "d"; + } + } + return regPrefix; +} + +void AArch64CGFunc::SelectAsm(AsmNode &node) +{ + SetHasAsm(); + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + if (GetCG()->GetCGOptions().DoLinearScanRegisterAllocation()) { + LogInfo::MapleLogger() << "Using coloring RA\n"; + const_cast(GetCG()->GetCGOptions()).SetOption(CGOptions::kDoColorRegAlloc); + const_cast(GetCG()->GetCGOptions()).ClearOption(CGOptions::kDoLinearScanRegAlloc); + } + } + Operand *asmString = &CreateStringOperand(node.asmString); + ListOperand *listInputOpnd = CreateListOpnd(*GetFuncScopeAllocator()); + ListOperand *listOutputOpnd = CreateListOpnd(*GetFuncScopeAllocator()); + ListOperand *listClobber = CreateListOpnd(*GetFuncScopeAllocator()); + ListConstraintOperand *listInConstraint = memPool->New(*GetFuncScopeAllocator()); + ListConstraintOperand *listOutConstraint = memPool->New(*GetFuncScopeAllocator()); + ListConstraintOperand *listInRegPrefix = memPool->New(*GetFuncScopeAllocator()); + ListConstraintOperand *listOutRegPrefix = memPool->New(*GetFuncScopeAllocator()); + std::list> rPlusOpnd; + bool noReplacement = false; + if (node.asmString.find('$') == std::string::npos) { + /* no replacements */ + noReplacement = true; + } + /* input constraints should be processed before OP_asm instruction */ + for (size_t i = 0; i < node.numOpnds; ++i) { + /* process input constraint */ + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(node.inputConstraints[i]); + bool isOutputTempNode = false; + if (str[0] == '+') { + isOutputTempNode = true; + } + listInConstraint->stringList.push_back(static_cast(&CreateStringOperand(str))); + /* process input operands */ + switch (node.Opnd(i)->op) { + case OP_dread: { + DreadNode &dread = static_cast(*node.Opnd(i)); + Operand *inOpnd = SelectDread(node, dread); + PrimType pType = dread.GetPrimType(); + listInputOpnd->PushOpnd(static_cast(*inOpnd)); + listInRegPrefix->stringList.push_back(static_cast( + &CreateStringOperand(GetRegPrefixFromPrimType(pType, inOpnd->GetSize(), str)))); + if (isOutputTempNode) { + rPlusOpnd.emplace_back(std::make_pair(inOpnd, pType)); + } + break; + } + case OP_addrof: { + auto &addrofNode = static_cast(*node.Opnd(i)); + Operand *inOpnd = SelectAddrof(addrofNode, node); + listInputOpnd->PushOpnd(static_cast(*inOpnd)); + PrimType pType = addrofNode.GetPrimType(); + listInRegPrefix->stringList.push_back(static_cast( + &CreateStringOperand(GetRegPrefixFromPrimType(pType, inOpnd->GetSize(), str)))); + if (isOutputTempNode) { + rPlusOpnd.emplace_back(std::make_pair(inOpnd, pType)); + } + break; + } + case OP_constval: { + CHECK_FATAL(!isOutputTempNode, "Unexpect"); + auto &constNode = static_cast(*node.Opnd(i)); + CHECK_FATAL(constNode.GetConstVal()->GetKind() == kConstInt, + "expect MIRIntConst does not support float yet"); + MIRIntConst *mirIntConst = safe_cast(constNode.GetConstVal()); + CHECK_FATAL(mirIntConst != nullptr, "just checking"); + int64 scale = mirIntConst->GetExtValue(); + if (str.find("r") != std::string::npos) { + bool isSigned = scale < 0; + ImmOperand &immOpnd = CreateImmOperand(scale, k64BitSize, isSigned); + /* set default type as a 64 bit reg */ + PrimType pty = isSigned ? PTY_i64 : PTY_u64; + auto &tempReg = static_cast(CreateRegisterOperandOfType(pty)); + SelectCopy(tempReg, pty, immOpnd, isSigned ? PTY_i64 : PTY_u64); + listInputOpnd->PushOpnd(static_cast(tempReg)); + listInRegPrefix->stringList.push_back(static_cast( + &CreateStringOperand(GetRegPrefixFromPrimType(pty, tempReg.GetSize(), str)))); + } else { + RegOperand &inOpnd = GetOrCreatePhysicalRegisterOperand(RZR, k64BitSize, kRegTyInt); + listInputOpnd->PushOpnd(static_cast(inOpnd)); + + listInRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand("i" + std::to_string(scale)))); + } + break; + } + case OP_regread: { + auto ®readNode = static_cast(*node.Opnd(i)); + PregIdx pregIdx = regreadNode.GetRegIdx(); + RegOperand &inOpnd = GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + listInputOpnd->PushOpnd(static_cast(inOpnd)); + MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + PrimType pType = preg->GetPrimType(); + listInRegPrefix->stringList.push_back(static_cast( + &CreateStringOperand(GetRegPrefixFromPrimType(pType, inOpnd.GetSize(), str)))); + if (isOutputTempNode) { + rPlusOpnd.emplace_back(std::make_pair(&static_cast(inOpnd), pType)); + } + break; + } + default: + CHECK_FATAL(0, "Inline asm input expression not handled"); + } + } + std::vector intrnOpnds; + intrnOpnds.emplace_back(asmString); + intrnOpnds.emplace_back(listOutputOpnd); + intrnOpnds.emplace_back(listClobber); + intrnOpnds.emplace_back(listInputOpnd); + intrnOpnds.emplace_back(listOutConstraint); + intrnOpnds.emplace_back(listInConstraint); + intrnOpnds.emplace_back(listOutRegPrefix); + intrnOpnds.emplace_back(listInRegPrefix); + Insn *asmInsn = &GetInsnBuilder()->BuildInsn(MOP_asm, intrnOpnds); + GetCurBB()->AppendInsn(*asmInsn); + + /* process listOutputOpnd */ + for (size_t i = 0; i < node.asmOutputs.size(); ++i) { + bool isOutputTempNode = false; + RegOperand *rPOpnd = nullptr; + /* process output constraint */ + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(node.outputConstraints[i]); + + listOutConstraint->stringList.push_back(static_cast(&CreateStringOperand(str))); + if (str[0] == '+') { + CHECK_FATAL(!rPlusOpnd.empty(), "Need r+ operand"); + rPOpnd = static_cast((rPlusOpnd.begin()->first)); + listOutputOpnd->PushOpnd(*rPOpnd); + listOutRegPrefix->stringList.push_back(static_cast( + &CreateStringOperand(GetRegPrefixFromPrimType(rPlusOpnd.begin()->second, rPOpnd->GetSize(), str)))); + if (!rPlusOpnd.empty()) { + rPlusOpnd.pop_front(); + } + isOutputTempNode = true; + } + if (str.find("Q") != std::string::npos || str.find("m") != std::string::npos) { + continue; + } + /* process output operands */ + StIdx stIdx = node.asmOutputs[i].first; + RegFieldPair regFieldPair = node.asmOutputs[i].second; + if (regFieldPair.IsReg()) { + PregIdx pregIdx = static_cast(regFieldPair.GetPregIdx()); + MIRPreg *mirPreg = mirModule.CurFunction()->GetPregTab()->PregFromPregIdx(pregIdx); + RegOperand *outOpnd = isOutputTempNode + ? rPOpnd + : &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + PrimType srcType = mirPreg->GetPrimType(); + PrimType destType = srcType; + if (GetPrimTypeBitSize(destType) < k32BitSize) { + destType = IsSignedInteger(destType) ? PTY_i32 : PTY_u32; + } + RegType rtype = GetRegTyFromPrimTy(srcType); + RegOperand &opnd0 = isOutputTempNode + ? GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)) + : CreateVirtualRegisterOperand(NewVReg(rtype, GetPrimTypeSize(srcType))); + SelectCopy(opnd0, destType, *outOpnd, srcType); + if (!isOutputTempNode) { + listOutputOpnd->PushOpnd(static_cast(*outOpnd)); + listOutRegPrefix->stringList.push_back(static_cast( + &CreateStringOperand(GetRegPrefixFromPrimType(srcType, outOpnd->GetSize(), str)))); + } + } else { + MIRSymbol *var; + if (stIdx.IsGlobal()) { + var = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + } else { + var = mirModule.CurFunction()->GetSymbolTabItem(stIdx.Idx()); + } + CHECK_FATAL(var != nullptr, "var should not be nullptr"); + if (!noReplacement || var->GetAsmAttr() != UStrIdx(0)) { + RegOperand *outOpnd = nullptr; + PrimType pty = GlobalTables::GetTypeTable().GetTypeTable().at(var->GetTyIdx())->GetPrimType(); + if (var->GetAsmAttr() != UStrIdx(0)) { + std::string regDesp = GlobalTables::GetUStrTable().GetStringFromStrIdx(var->GetAsmAttr()); + outOpnd = &GetOrCreatePhysicalRegisterOperand(regDesp); + } else { + RegType rtype = GetRegTyFromPrimTy(pty); + outOpnd = + isOutputTempNode ? rPOpnd : &CreateVirtualRegisterOperand(NewVReg(rtype, GetPrimTypeSize(pty))); + } + SaveReturnValueInLocal(node.asmOutputs, i, PTY_a64, *outOpnd, node); + if (!isOutputTempNode) { + listOutputOpnd->PushOpnd(static_cast(*outOpnd)); + listOutRegPrefix->stringList.push_back(static_cast( + &CreateStringOperand(GetRegPrefixFromPrimType(pty, outOpnd->GetSize(), str)))); + } + } + } + } + if (noReplacement) { + return; + } + + /* process listClobber */ + for (size_t i = 0; i < node.clobberList.size(); ++i) { + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(node.clobberList[i]); + auto regno = static_cast(str[1] - '0'); + if (str[2] >= '0' && str[2] <= '9') { + regno = regno * kDecimalMax + static_cast((str[2] - '0')); + } + RegOperand *reg; + switch (str[0]) { + case 'w': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + R0), k32BitSize, kRegTyInt); + listClobber->PushOpnd(*reg); + break; + } + case 'x': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + R0), k64BitSize, kRegTyInt); + listClobber->PushOpnd(*reg); + break; + } + case 's': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + V0), k32BitSize, kRegTyFloat); + listClobber->PushOpnd(*reg); + break; + } + case 'd': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + V0), k64BitSize, kRegTyFloat); + listClobber->PushOpnd(*reg); + break; + } + case 'v': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + V0), k64BitSize, kRegTyFloat); + listClobber->PushOpnd(*reg); + break; + } + case 'c': { + asmInsn->SetAsmDefCondCode(); + break; + } + case 'm': { + asmInsn->SetAsmModMem(); + break; + } + default: + CHECK_FATAL(0, "Inline asm clobber list not handled"); + } + } +} + +void AArch64CGFunc::SelectRegassign(RegassignNode &stmt, Operand &opnd0) +{ + if (GetCG()->IsLmbc()) { + PrimType lhsSize = stmt.GetPrimType(); + PrimType rhsSize = stmt.Opnd(0)->GetPrimType(); + if (lhsSize != rhsSize && stmt.Opnd(0)->GetOpCode() == OP_ireadoff) { + Insn *prev = GetCurBB()->GetLastInsn(); + if (prev->GetMachineOpcode() == MOP_wldrsb || prev->GetMachineOpcode() == MOP_wldrsh) { + opnd0.SetSize(GetPrimTypeBitSize(stmt.GetPrimType())); + prev->SetMOP(AArch64CG::kMd[prev->GetMachineOpcode() == MOP_wldrsb ? MOP_xldrsb : MOP_xldrsh]); + } else if (prev->GetMachineOpcode() == MOP_wldr && stmt.GetPrimType() == PTY_i64) { + opnd0.SetSize(GetPrimTypeBitSize(stmt.GetPrimType())); + prev->SetMOP(AArch64CG::kMd[MOP_xldrsw]); + } + } + } + RegOperand *regOpnd = nullptr; + PregIdx pregIdx = stmt.GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + if (GetCG()->IsLmbc() && stmt.GetPrimType() == PTY_agg) { + if (static_cast(opnd0).IsOfIntClass()) { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, PTY_i64); + } else if (opnd0.GetSize() <= k4ByteSize) { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, PTY_f32); + } else { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, PTY_f64); + } + } else { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, stmt.GetPrimType()); + } + } else { + regOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + } + /* look at rhs */ + PrimType rhsType = stmt.Opnd(0)->GetPrimType(); + if (GetCG()->IsLmbc() && rhsType == PTY_agg) { + /* This occurs when a call returns a small struct */ + /* The subtree should already taken care of the agg type that is in excess of 8 bytes */ + rhsType = PTY_i64; + } + PrimType dtype = rhsType; + if (GetPrimTypeBitSize(dtype) < k32BitSize) { + DEBUG_ASSERT(IsPrimitiveInteger(dtype), ""); + dtype = IsSignedInteger(dtype) ? PTY_i32 : PTY_u32; + } + DEBUG_ASSERT(regOpnd != nullptr, "null ptr check!"); + SelectCopy(*regOpnd, dtype, opnd0, rhsType); + if (GetCG()->GenerateVerboseCG()) { + if (GetCurBB()->GetLastInsn()) { + GetCurBB()->GetLastInsn()->AppendComment(" regassign %" + std::to_string(pregIdx) + "; "); + } else if (GetCurBB()->GetPrev()->GetLastInsn()) { + GetCurBB()->GetPrev()->GetLastInsn()->AppendComment(" regassign %" + std::to_string(pregIdx) + "; "); + } + } + + if ((Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) && (pregIdx >= 0)) { + MemOperand *dest = GetPseudoRegisterSpillMemoryOperand(pregIdx); + PrimType stype = GetTypeFromPseudoRegIdx(pregIdx); + MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + uint32 srcBitLength = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(srcBitLength, stype), *regOpnd, *dest)); + } else if (regOpnd->GetRegisterNumber() == R0 || regOpnd->GetRegisterNumber() == R1) { + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, *regOpnd); + GetCurBB()->AppendInsn(pseudo); + } else if (regOpnd->GetRegisterNumber() >= V0 && regOpnd->GetRegisterNumber() <= V3) { + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_float, *regOpnd); + GetCurBB()->AppendInsn(pseudo); + } + if (stmt.GetPrimType() == PTY_ref) { + regOpnd->SetIsReference(true); + AddReferenceReg(regOpnd->GetRegisterNumber()); + } + if (pregIdx > 0) { + // special MIRPreg is not supported + SetPregIdx2Opnd(pregIdx, *regOpnd); + } + const auto &derived2BaseRef = GetFunction().GetDerived2BaseRef(); + auto itr = derived2BaseRef.find(pregIdx); + if (itr != derived2BaseRef.end()) { + auto *opnd = GetOpndFromPregIdx(itr->first); + CHECK_FATAL(opnd != nullptr, "pregIdx has not been assigned Operand"); + auto &derivedRegOpnd = static_cast(*opnd); + opnd = GetOpndFromPregIdx(itr->second); + CHECK_FATAL(opnd != nullptr, "pregIdx has not been assigned Operand"); + auto &baseRegOpnd = static_cast(*opnd); + derivedRegOpnd.SetBaseRefOpnd(baseRegOpnd); + } +} + +MemOperand *AArch64CGFunc::FixLargeMemOpnd(MemOperand &memOpnd, uint32 align) +{ + MemOperand *lhsMemOpnd = &memOpnd; + if ((lhsMemOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*lhsMemOpnd, align * kBitsPerByte)) { + RegOperand *addReg = &CreateRegisterOperandOfType(PTY_i64); + lhsMemOpnd = &SplitOffsetWithAddInstruction(*lhsMemOpnd, align * k8BitSize, addReg->GetRegisterNumber()); + } + return lhsMemOpnd; +} + +MemOperand *AArch64CGFunc::FixLargeMemOpnd(MOperator mOp, MemOperand &memOpnd, uint32 dSize, uint32 opndIdx) +{ + auto *a64MemOpnd = &memOpnd; + if ((a64MemOpnd->GetMemVaryType() == kNotVary) && !IsOperandImmValid(mOp, &memOpnd, opndIdx)) { + if (opndIdx == kInsnSecondOpnd) { + a64MemOpnd = &SplitOffsetWithAddInstruction(*a64MemOpnd, dSize); + } else if (opndIdx == kInsnThirdOpnd) { + a64MemOpnd = + &SplitOffsetWithAddInstruction(*a64MemOpnd, dSize, AArch64reg::kRinvalid, false, nullptr, true); + } else { + CHECK_FATAL(false, "NYI"); + } + } + return a64MemOpnd; +} + +MemOperand *AArch64CGFunc::GenLargeAggFormalMemOpnd(const MIRSymbol &sym, uint32 align, int64 offset, bool needLow12) +{ + MemOperand *memOpnd; + if (sym.GetStorageClass() == kScFormal && GetBecommon().GetTypeSize(sym.GetTyIdx()) > k16ByteSize) { + /* formal of size of greater than 16 is copied by the caller and the pointer to it is passed. */ + /* otherwise it is passed in register and is accessed directly. */ + memOpnd = &GetOrCreateMemOpnd(sym, 0, align * kBitsPerByte); + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + Insn &ldInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *memOpnd); + GetCurBB()->AppendInsn(ldInsn); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, vreg, nullptr, + &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize), nullptr); + } else { + memOpnd = &GetOrCreateMemOpnd(sym, offset, align * kBitsPerByte, false, needLow12); + } + return FixLargeMemOpnd(*memOpnd, align); +} + +RegOperand *AArch64CGFunc::PrepareMemcpyParamOpnd(bool isLo12, const MIRSymbol &symbol, int64 offsetVal, + RegOperand &BaseReg) +{ + RegOperand *tgtAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + if (isLo12) { + StImmOperand &stImm = CreateStImmOperand(symbol, 0, 0); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, *tgtAddr, BaseReg, stImm)); + } else { + ImmOperand &imm = CreateImmOperand(offsetVal, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *tgtAddr, BaseReg, imm)); + } + return tgtAddr; +} + +RegOperand *AArch64CGFunc::PrepareMemcpyParamOpnd(int64 offset, Operand &exprOpnd) +{ + RegOperand *tgtAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + OfstOperand *ofstOpnd = &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *tgtAddr, exprOpnd, *ofstOpnd)); + return tgtAddr; +} + +RegOperand *AArch64CGFunc::PrepareMemcpyParamOpnd(uint64 copySize) +{ + RegOperand *vregMemcpySize = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + ImmOperand *sizeOpnd = &CreateImmOperand(static_cast(copySize), k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, *vregMemcpySize, *sizeOpnd)); + return vregMemcpySize; +} + +Insn *AArch64CGFunc::AggtStrLdrInsert(bool bothUnion, Insn *lastStrLdr, Insn &newStrLdr) +{ + if (bothUnion) { + if (lastStrLdr == nullptr) { + GetCurBB()->AppendInsn(newStrLdr); + } else { + GetCurBB()->InsertInsnAfter(*lastStrLdr, newStrLdr); + } + } else { + GetCurBB()->AppendInsn(newStrLdr); + } + return &newStrLdr; +} + +CCImpl *AArch64CGFunc::GetOrCreateLocator(CallConvKind cc) +{ + auto it = hashCCTable.find(cc); + if (it != hashCCTable.end()) { + it->second->Init(); + return it->second; + } + CCImpl *res = nullptr; + if (cc == kCCall) { + res = memPool->New(GetBecommon()); + } else if (cc == kWebKitJS) { + res = memPool->New(GetBecommon()); + } else if (cc == kGHC) { + res = memPool->New(GetBecommon()); + } else { + CHECK_FATAL(false, "unsupported yet"); + } + hashCCTable[cc] = res; + return res; +} +void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) +{ + MIRSymbol *lhsSymbol = GetFunction().GetLocalOrGlobalSymbol(stmt.GetStIdx()); + uint32 lhsOffset = 0; + MIRType *lhsType = lhsSymbol->GetType(); + bool bothUnion = false; + if (stmt.GetFieldID() != 0) { + MIRStructType *structType = static_cast(lhsSymbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "SelectAggDassign: non-zero fieldID for non-structure"); + lhsType = structType->GetFieldType(stmt.GetFieldID()); + lhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, stmt.GetFieldID()).first); + bothUnion |= (structType->GetKind() == kTypeUnion); + } + uint32 lhsAlign = GetBecommon().GetTypeAlign(lhsType->GetTypeIndex()); + uint64 lhsSize = GetBecommon().GetTypeSize(lhsType->GetTypeIndex()); + + uint32 rhsAlign; + uint32 alignUsed; + uint32 rhsOffset = 0; + if (stmt.GetRHS()->GetOpCode() == OP_dread) { + AddrofNode *rhsDread = static_cast(stmt.GetRHS()); + MIRSymbol *rhsSymbol = GetFunction().GetLocalOrGlobalSymbol(rhsDread->GetStIdx()); + MIRType *rhsType = rhsSymbol->GetType(); + if (rhsDread->GetFieldID() != 0) { + MIRStructType *structType = static_cast(rhsSymbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "SelectAggDassign: non-zero fieldID for non-structure"); + rhsType = structType->GetFieldType(rhsDread->GetFieldID()); + rhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, rhsDread->GetFieldID()).first); + bothUnion &= (structType->GetKind() == kTypeUnion); + } + bothUnion &= (rhsSymbol == lhsSymbol); + rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + alignUsed = std::min(lhsAlign, rhsAlign); + DEBUG_ASSERT(alignUsed != 0, "expect non-zero"); + uint32 copySize = GetAggCopySize(lhsOffset, rhsOffset, alignUsed); + MemOperand *rhsBaseMemOpnd; + if (IsParamStructCopy(*rhsSymbol)) { + rhsBaseMemOpnd = &LoadStructCopyBase(*rhsSymbol, rhsOffset, static_cast(copySize * k8BitSize)); + } else { + rhsBaseMemOpnd = &GetOrCreateMemOpnd(*rhsSymbol, rhsOffset, copySize * k8BitSize, false, true); + rhsBaseMemOpnd = FixLargeMemOpnd(*rhsBaseMemOpnd, copySize); + } + RegOperand *rhsBaseReg = rhsBaseMemOpnd->GetBaseRegister(); + int64 rhsOffsetVal = rhsBaseMemOpnd->GetOffsetOperand()->GetValue(); + MemOperand *lhsBaseMemOpnd = GenLargeAggFormalMemOpnd(*lhsSymbol, copySize, lhsOffset, true); + RegOperand *lhsBaseReg = lhsBaseMemOpnd->GetBaseRegister(); + int64 lhsOffsetVal = lhsBaseMemOpnd->GetOffsetOperand()->GetValue(); + bool rhsIsLo12 = (rhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); + bool lhsIsLo12 = (lhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); + if (lhsSize > kParmMemcpySize) { + std::vector opndVec; + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + opndVec.push_back(regResult); /* result */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsIsLo12, *lhsSymbol, lhsOffsetVal, *lhsBaseReg)); /* param 0 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(rhsIsLo12, *rhsSymbol, rhsOffsetVal, *rhsBaseReg)); /* param 1 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsSize)); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + + return; + } + Insn *lastLdr = nullptr; + Insn *lastStr = nullptr; + for (uint32 i = 0; i < (lhsSize / copySize); i++) { + uint64 rhsBaseOffset = i * copySize + static_cast(rhsOffsetVal); + uint64 lhsBaseOffset = i * copySize + static_cast(lhsOffsetVal); + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); + /* generate the load */ + MemOperand *rhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); + /* generate the load */ + RegOperand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + bool doPair = (!rhsIsLo12 && !lhsIsLo12 && (copySize >= k4BitSize) && ((i + 1) < (lhsSize / copySize))); + RegOperand *result1 = nullptr; + Insn *newLoadInsn = nullptr; + if (doPair) { + MOperator mOpLDP = (copySize == k4BitSize) ? MOP_wldp : MOP_xldp; + result1 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + rhsMemOpnd = FixLargeMemOpnd(mOpLDP, *rhsMemOpnd, copySize * k8BitSize, kInsnThirdOpnd); + newLoadInsn = &GetInsnBuilder()->BuildInsn(mOpLDP, result, *result1, *rhsMemOpnd); + } else { + MOperator mOp = PickLdInsn(copySize * k8BitSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOp, *rhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + newLoadInsn = &GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd); + } + DEBUG_ASSERT(newLoadInsn != nullptr, "build load instruction failed in SelectAggDassign"); + lastLdr = AggtStrLdrInsert(bothUnion, lastLdr, *newLoadInsn); + /* generate the store */ + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); + addrMode = lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + sym = lhsIsLo12 ? lhsSymbol : nullptr; + Insn *newStoreInsn = nullptr; + MemOperand *lhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, lhsBaseReg, nullptr, &lhsOfstOpnd, sym); + if (doPair) { + MOperator mOpSTP = (copySize == k4BitSize) ? MOP_wstp : MOP_xstp; + lhsMemOpnd = FixLargeMemOpnd(mOpSTP, *lhsMemOpnd, copySize * k8BitSize, kInsnThirdOpnd); + DEBUG_ASSERT(result1 != nullptr, "result1 should not be nullptr"); + newStoreInsn = &GetInsnBuilder()->BuildInsn(mOpSTP, result, *result1, *lhsMemOpnd); + i++; + } else { + MOperator mOp = PickStInsn(copySize * k8BitSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOp, *lhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + newStoreInsn = &GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd); + } + DEBUG_ASSERT(newStoreInsn != nullptr, "build store instruction failed in SelectAggDassign"); + lastStr = AggtStrLdrInsert(bothUnion, lastStr, *newStoreInsn); + } + /* take care of extra content at the end less than the unit */ + uint64 lhsSizeCovered = (lhsSize / copySize) * copySize; + uint32 newAlignUsed = copySize; + while (lhsSizeCovered < lhsSize) { + newAlignUsed = newAlignUsed >> 1; + CHECK_FATAL(newAlignUsed != 0, "expect non-zero"); + if ((lhsSizeCovered + newAlignUsed) > lhsSize) { + continue; + } + /* generate the load */ + MemOperand *rhsMemOpnd; + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; + OfstOperand &rhsOfstOpnd = + GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(rhsOffsetVal), k32BitSize); + rhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); + rhsMemOpnd = FixLargeMemOpnd(*rhsMemOpnd, newAlignUsed); + regno_t vRegNO = NewVReg(kRegTyInt, std::max(4u, newAlignUsed)); + RegOperand &result = CreateVirtualRegisterOperand(vRegNO); + MOperator mOp = PickLdInsn(newAlignUsed * k8BitSize, PTY_u32); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd)); + /* generate the store */ + addrMode = lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + sym = lhsIsLo12 ? lhsSymbol : nullptr; + OfstOperand &lhsOfstOpnd = + GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(lhsOffsetVal), k32BitSize); + MemOperand *lhsMemOpnd; + lhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, lhsBaseReg, nullptr, &lhsOfstOpnd, sym); + lhsMemOpnd = FixLargeMemOpnd(*lhsMemOpnd, newAlignUsed); + mOp = PickStInsn(newAlignUsed * k8BitSize, PTY_u32); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + lhsSizeCovered += newAlignUsed; + } + } else if (stmt.GetRHS()->GetOpCode() == OP_iread) { + IreadNode *rhsIread = static_cast(stmt.GetRHS()); + RegOperand *addrOpnd = static_cast(HandleExpr(*rhsIread, *rhsIread->Opnd(0))); + addrOpnd = &LoadIntoRegister(*addrOpnd, rhsIread->Opnd(0)->GetPrimType()); + MIRPtrType *rhsPointerType = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsIread->GetTyIdx())); + MIRType *rhsType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsPointerType->GetPointedTyIdx())); + bool isRefField = false; + if (rhsIread->GetFieldID() != 0) { + MIRStructType *rhsStructType = static_cast(rhsType); + DEBUG_ASSERT(rhsStructType != nullptr, "SelectAggDassign: non-zero fieldID for non-structure"); + rhsType = rhsStructType->GetFieldType(rhsIread->GetFieldID()); + rhsOffset = static_cast(GetBecommon().GetFieldOffset(*rhsStructType, rhsIread->GetFieldID()).first); + isRefField = GetBecommon().IsRefField(*rhsStructType, rhsIread->GetFieldID()); + } + rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + alignUsed = std::min(lhsAlign, rhsAlign); + DEBUG_ASSERT(alignUsed != 0, "expect non-zero"); + uint32 copySize = GetAggCopySize(rhsOffset, lhsOffset, alignUsed); + MemOperand *lhsBaseMemOpnd = GenLargeAggFormalMemOpnd(*lhsSymbol, copySize, lhsOffset, true); + RegOperand *lhsBaseReg = lhsBaseMemOpnd->GetBaseRegister(); + int64 lhsOffsetVal = lhsBaseMemOpnd->GetOffsetOperand()->GetValue(); + bool lhsIsLo12 = (lhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); + if (lhsSize > kParmMemcpySize) { + std::vector opndVec; + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + opndVec.push_back(regResult); /* result */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsIsLo12, *lhsSymbol, lhsOffsetVal, *lhsBaseReg)); /* param 0 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(rhsOffset, *addrOpnd)); /* param 1 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsSize)); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + + return; + } + for (uint32 i = 0; i < (lhsSize / copySize); i++) { + uint64 rhsBaseOffset = rhsOffset + i * copySize; + uint64 lhsBaseOffset = static_cast(lhsOffsetVal) + i * copySize; + /* generate the load */ + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); + MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, copySize * k8BitSize, addrOpnd, + nullptr, &ofstOpnd, nullptr); + regno_t vRegNO = NewVReg(kRegTyInt, std::max(4u, copySize)); + RegOperand &result = CreateVirtualRegisterOperand(vRegNO); + bool doPair = (!lhsIsLo12 && copySize >= k4BitSize) && ((i + 1) < (lhsSize / copySize)); + Insn *insn = nullptr; + RegOperand *result1 = nullptr; + if (doPair) { + MOperator mOpLDP = (copySize == k4BitSize) ? MOP_wldp : MOP_xldp; + regno_t vRegNO1 = NewVReg(kRegTyInt, std::max(4u, copySize)); + result1 = &CreateVirtualRegisterOperand(vRegNO1); + rhsMemOpnd = FixLargeMemOpnd(mOpLDP, *rhsMemOpnd, copySize * k8BitSize, kInsnThirdOpnd); + insn = &GetInsnBuilder()->BuildInsn(mOpLDP, result, *result1, *rhsMemOpnd); + } else { + MOperator mOp = PickLdInsn(copySize * k8BitSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOp, *rhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + insn = &GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd); + } + insn->MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(*insn); + /* generate the store */ + MemOperand::AArch64AddressingMode addrMode = + lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = lhsIsLo12 ? lhsSymbol : nullptr; + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); + MemOperand *lhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, lhsBaseReg, nullptr, &lhsOfstOpnd, sym); + if (doPair) { + MOperator mOpSTP = (copySize == k4BitSize) ? MOP_wstp : MOP_xstp; + lhsMemOpnd = FixLargeMemOpnd(mOpSTP, *lhsMemOpnd, copySize * k8BitSize, kInsnThirdOpnd); + DEBUG_ASSERT(result1 != nullptr, "result1 should not be nullptr"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpSTP, result, *result1, *lhsMemOpnd)); + i++; + } else { + MOperator mOp = PickStInsn(copySize * k8BitSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOp, *lhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + } + } + /* take care of extra content at the end less than the unit of alignUsed */ + uint64 lhsSizeCovered = (lhsSize / copySize) * copySize; + uint32 newAlignUsed = copySize; + while (lhsSizeCovered < lhsSize) { + newAlignUsed = newAlignUsed >> 1; + CHECK_FATAL(newAlignUsed != 0, "expect non-zero"); + if ((lhsSizeCovered + newAlignUsed) > lhsSize) { + continue; + } + /* generate the load */ + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(rhsOffset + lhsSizeCovered, k32BitSize); + MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, newAlignUsed * k8BitSize, addrOpnd, + nullptr, &ofstOpnd, nullptr); + rhsMemOpnd = FixLargeMemOpnd(*rhsMemOpnd, newAlignUsed); + RegOperand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, newAlignUsed))); + MOperator mOp = PickLdInsn(newAlignUsed * k8BitSize, PTY_u32); + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd); + insn.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(insn); + /* generate the store */ + MemOperand::AArch64AddressingMode addrMode = + lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = lhsIsLo12 ? lhsSymbol : nullptr; + OfstOperand &lhsOfstOpnd = + GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(lhsOffsetVal), k32BitSize); + MemOperand *lhsMemOpnd; + lhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, lhsBaseReg, nullptr, &lhsOfstOpnd, sym); + lhsMemOpnd = FixLargeMemOpnd(*lhsMemOpnd, newAlignUsed); + mOp = PickStInsn(newAlignUsed * k8BitSize, PTY_u32); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + lhsSizeCovered += newAlignUsed; + } + } else { + DEBUG_ASSERT(stmt.GetRHS()->op == OP_regread, "SelectAggDassign: NYI"); + bool isRet = false; + if (lhsType->GetKind() == kTypeStruct || lhsType->GetKind() == kTypeUnion) { + RegreadNode *rhsregread = static_cast(stmt.GetRHS()); + PregIdx pregIdx = rhsregread->GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + if ((-pregIdx) == kSregRetval0) { + CHECK_FATAL(GetFunction().GetAttr(FUNCATTR_ccall), "only c calling convention support here"); + AArch64CallConvImpl parmlocator(GetBecommon()); + CCLocInfo pLoc; + PrimType retPtype; + RegType regType; + uint32 memSize; + uint32 regSize; + parmlocator.LocateRetVal(*lhsType, pLoc); + AArch64reg r[kFourRegister]; + r[0] = static_cast(pLoc.reg0); + r[1] = static_cast(pLoc.reg1); + r[2] = static_cast(pLoc.reg2); + r[3] = static_cast(pLoc.reg3); + if (pLoc.numFpPureRegs) { + regSize = (pLoc.fpSize == k4ByteSize) ? k32BitSize : k64BitSize; + memSize = pLoc.fpSize; + retPtype = (pLoc.fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; + regType = kRegTyFloat; + } else { + regSize = k64BitSize; + memSize = k8BitSize; + retPtype = PTY_u64; + regType = kRegTyInt; + } + for (uint32 i = 0; i < kFourRegister; ++i) { + if (r[i] == kRinvalid) { + break; + } + RegOperand &parm = GetOrCreatePhysicalRegisterOperand(r[i], regSize, regType); + Operand &mOpnd = GetOrCreateMemOpnd(*lhsSymbol, memSize * i, regSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(regSize, retPtype), parm, mOpnd)); + } + isRet = true; + } + } + } + CHECK_FATAL(isRet, "SelectAggDassign: NYI"); + } +} + +static MIRType *GetPointedToType(const MIRPtrType &pointerType) +{ + MIRType *aType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType.GetPointedTyIdx()); + if (aType->GetKind() == kTypeArray) { + MIRArrayType *arrayType = static_cast(aType); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx()); + } + if (aType->GetKind() == kTypeFArray || aType->GetKind() == kTypeJArray) { + MIRFarrayType *farrayType = static_cast(aType); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(farrayType->GetElemTyIdx()); + } + return aType; +} + +void AArch64CGFunc::SelectIassign(IassignNode &stmt) +{ + int32 offset = 0; + MIRPtrType *pointerType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx())); + DEBUG_ASSERT(pointerType != nullptr, "expect a pointer type at iassign node"); + MIRType *pointedType = nullptr; + bool isRefField = false; + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + + if (stmt.GetFieldID() != 0) { + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType->GetPointedTyIdx()); + MIRStructType *structType = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structType = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structType = static_cast(pointedTy)->GetParentType(); + } + DEBUG_ASSERT(structType != nullptr, "SelectIassign: non-zero fieldID for non-structure"); + pointedType = structType->GetFieldType(stmt.GetFieldID()); + offset = GetBecommon().GetFieldOffset(*structType, stmt.GetFieldID()).first; + isRefField = GetBecommon().IsRefField(*structType, stmt.GetFieldID()); + } else { + pointedType = GetPointedToType(*pointerType); + if (GetFunction().IsJava() && (pointedType->GetKind() == kTypePointer)) { + MIRType *nextPointedType = GlobalTables::GetTypeTable().GetTypeFromTyIdx( + static_cast(pointedType)->GetPointedTyIdx()); + if (nextPointedType->GetKind() != kTypeScalar) { + isRefField = true; /* write into an object array or a high-dimensional array */ + } + } + } + + PrimType styp = stmt.GetRHS()->GetPrimType(); + Operand *valOpnd = HandleExpr(stmt, *stmt.GetRHS()); + Operand &srcOpnd = LoadIntoRegister(*valOpnd, (IsPrimitiveInteger(styp) || IsPrimitiveVectorInteger(styp)), + GetPrimTypeBitSize(styp)); + + PrimType destType = pointedType->GetPrimType(); + if (destType == PTY_agg) { + destType = PTY_a64; + } + if (IsPrimitiveVector(styp)) { /* a vector type */ + destType = styp; + } + DEBUG_ASSERT(stmt.Opnd(0) != nullptr, "null ptr check"); + MemOperand &memOpnd = CreateMemOpnd(destType, stmt, *stmt.Opnd(0), offset); + auto dataSize = GetPrimTypeBitSize(destType); + memOpnd = memOpnd.IsOffsetMisaligned(dataSize) ? ConstraintOffsetToSafeRegion(dataSize, memOpnd) : memOpnd; + if (isVolStore && memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) { + memOrd = AArch64isa::kMoRelease; + isVolStore = false; + } + + if (memOrd == AArch64isa::kMoNone) { + SelectCopy(memOpnd, destType, srcOpnd, destType); + } else { + AArch64CGFunc::SelectStoreRelease(memOpnd, destType, srcOpnd, destType, memOrd, false); + } + GetCurBB()->GetLastInsn()->MarkAsAccessRefField(isRefField); +} + +void AArch64CGFunc::SelectIassignoff(IassignoffNode &stmt) +{ + int32 offset = stmt.GetOffset(); + PrimType destType = stmt.GetPrimType(); + + MemOperand &memOpnd = CreateMemOpnd(destType, stmt, *stmt.GetBOpnd(0), offset); + auto dataSize = GetPrimTypeBitSize(destType); + memOpnd = memOpnd.IsOffsetMisaligned(dataSize) ? ConstraintOffsetToSafeRegion(dataSize, memOpnd) : memOpnd; + Operand *valOpnd = HandleExpr(stmt, *stmt.GetBOpnd(1)); + Operand &srcOpnd = LoadIntoRegister(*valOpnd, true, GetPrimTypeBitSize(destType)); + SelectCopy(memOpnd, destType, srcOpnd, destType); +} + +MemOperand *AArch64CGFunc::GenLmbcFpMemOperand(int32 offset, uint32 byteSize, AArch64reg baseRegno) +{ + MemOperand *memOpnd; + RegOperand *rfp = &GetOrCreatePhysicalRegisterOperand(baseRegno, k64BitSize, kRegTyInt); + uint32 bitlen = byteSize * kBitsPerByte; + if (offset < 0 && offset < -256) { + RegOperand *baseOpnd = &CreateRegisterOperandOfType(PTY_a64); + ImmOperand &immOpnd = CreateImmOperand(offset, k32BitSize, true); + Insn &addInsn = GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *baseOpnd, *rfp, immOpnd); + GetCurBB()->AppendInsn(addInsn); + OfstOperand *offsetOpnd = &CreateOfstOpnd(0, k32BitSize); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, baseOpnd, nullptr, offsetOpnd, nullptr); + } else { + OfstOperand *offsetOpnd = &CreateOfstOpnd(static_cast(static_cast(offset)), k32BitSize); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, rfp, nullptr, offsetOpnd, nullptr); + } + memOpnd->SetStackMem(true); + return memOpnd; +} + +void AArch64CGFunc::SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) +{ + int32 offset = stmt.GetOffset(); + PrimType primType = stmt.GetPrimType(); + MIRType *rType = GetLmbcCallReturnType(); + bool isPureFpStruct = false; + uint32 numRegs = 0; + if (rType && rType->GetPrimType() == PTY_agg && opnd.IsRegister() && + static_cast(opnd).IsPhysicalRegister()) { + CHECK_FATAL(rType->GetSize() <= k16BitSize, "SelectIassignfpoff invalid agg size"); + uint32 fpSize; + numRegs = FloatParamRegRequired(static_cast(rType), fpSize); + if (numRegs) { + primType = (fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; + isPureFpStruct = true; + } + } + uint32 byteSize = GetPrimTypeSize(primType); + uint32 bitlen = byteSize * kBitsPerByte; + if (isPureFpStruct) { + for (uint32 i = 0; i < numRegs; ++i) { + MemOperand *memOpnd = GenLmbcFpMemOperand(offset + static_cast(i * byteSize), byteSize); + RegOperand &srcOpnd = GetOrCreatePhysicalRegisterOperand(AArch64reg(V0 + i), bitlen, kRegTyFloat); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetInsnBuilder()->BuildInsn(mOp, srcOpnd, *memOpnd); + GetCurBB()->AppendInsn(store); + } + } else { + Operand &srcOpnd = LoadIntoRegister(opnd, primType); + MemOperand *memOpnd = GenLmbcFpMemOperand(offset, byteSize); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetInsnBuilder()->BuildInsn(mOp, srcOpnd, *memOpnd); + GetCurBB()->AppendInsn(store); + } +} + +/* Load and assign to a new register. To be moved to the correct call register OR stack + location in LmbcSelectParmList */ +void AArch64CGFunc::SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) +{ + if (GetLmbcArgInfo() == nullptr) { + LmbcArgInfo *p = memPool->New(*GetFuncScopeAllocator()); + SetLmbcArgInfo(p); + } + uint32 byteLen = GetPrimTypeSize(pTy); + uint32 bitLen = byteLen * kBitsPerByte; + RegType regTy = GetRegTyFromPrimTy(pTy); + int32 curRegArgs = GetLmbcArgsInRegs(regTy); + if (curRegArgs < static_cast(k8ByteSize)) { + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(regTy, byteLen)); + SelectCopy(*res, pTy, opnd, pTy); + SetLmbcArgInfo(res, pTy, offset, 1); + } else { + /* Move into allocated space */ + Operand &memOpd = CreateMemOpnd(RSP, offset, byteLen); + Operand ® = LoadIntoRegister(opnd, pTy); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(bitLen, pTy), reg, memOpd)); + } + IncLmbcArgsInRegs(regTy); /* num of args in registers */ + IncLmbcTotalArgs(); /* num of args */ +} + +/* Search for CALL/ICALL/ICALLPROTO node, must be called from a blkassignoff node */ +MIRType *AArch64CGFunc::LmbcGetAggTyFromCallSite(StmtNode *stmt, std::vector **parmList) const +{ + for (; stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt->GetOpCode() == OP_call || stmt->GetOpCode() == OP_icallproto) { + break; + } + } + CHECK_FATAL(stmt && (stmt->GetOpCode() == OP_call || stmt->GetOpCode() == OP_icallproto), + "blkassign sp not followed by call"); + uint32 nargs = GetLmbcTotalArgs(); + MIRType *ty = nullptr; + if (stmt->GetOpCode() == OP_call) { + CallNode *callNode = static_cast(stmt); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + if (fn->GetFormalCount() > 0) { + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fn->GetFormalDefVec()[nargs].formalTyIdx); + } + *parmList = &fn->GetParamTypes(); + // would return null if the actual parameter is bogus + } else if (stmt->GetOpCode() == OP_icallproto) { + IcallNode *icallproto = static_cast(stmt); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallproto->GetRetTyIdx()); + MIRFuncType *fType = static_cast(type); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fType->GetNthParamType(nargs)); + *parmList = &fType->GetParamTypeList(); + } else { + CHECK_FATAL(stmt->GetOpCode() == OP_icallproto, "LmbcGetAggTyFromCallSite:: unexpected call operator"); + } + return ty; +} + +/* return true if blkassignoff for return, false otherwise */ +bool AArch64CGFunc::LmbcSmallAggForRet(const BlkassignoffNode &bNode, const Operand *src) +{ + PrimType pTy; + uint32 size = 0; + AArch64reg regno = static_cast(static_cast(src)->GetRegisterNumber()); + MIRFunction *func = &GetFunction(); + + if (func->IsReturnStruct()) { + /* This blkassignoff is for struct return? */ + uint32 loadSize; + uint32 numRegs = 0; + if (bNode.GetNext()->GetOpCode() == OP_return) { + MIRStructType *ty = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(func->GetFuncRetStructTyIdx())); + uint32 fpregs = FloatParamRegRequired(ty, size); + if (fpregs > 0) { + /* pure floating point in agg */ + numRegs = fpregs; + pTy = (size == k4ByteSize) ? PTY_f32 : PTY_f64; + loadSize = GetPrimTypeSize(pTy) * kBitsPerByte; + for (uint32 i = 0; i < fpregs; i++) { + int32 s = (i == 0) ? 0 : static_cast(i * size); + MemOperand &mem = CreateMemOpnd(regno, s, size * kBitsPerByte); + AArch64reg reg = static_cast(V0 + i); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand(reg, loadSize, kRegTyFloat); + SelectCopy(*res, pTy, mem, pTy); + } + } else { + /* int/float mixed */ + numRegs = 2; + pTy = PTY_i64; + size = k4ByteSize; + switch (bNode.blockSize) { + case 1: + pTy = PTY_i8; + break; + case 2: + pTy = PTY_i16; + break; + case 4: + pTy = PTY_i32; + break; + default: + size = k8ByteSize; /* pTy remains i64 */ + break; + } + loadSize = GetPrimTypeSize(pTy) * kBitsPerByte; + MemOperand &mem = CreateMemOpnd(regno, 0, size * kBitsPerByte); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand(R0, loadSize, kRegTyInt); + SelectCopy(*res, pTy, mem, pTy); + if (bNode.blockSize > static_cast(k8ByteSize)) { + MemOperand &newMem = CreateMemOpnd(regno, k8ByteSize, size * kBitsPerByte); + res = &GetOrCreatePhysicalRegisterOperand(R1, loadSize, kRegTyInt); + SelectCopy(*res, pTy, newMem, pTy); + } + } + bool intReg = fpregs == 0; + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = static_cast((intReg ? R0 : V0) + i); + MOperator mop = intReg ? MOP_pseudo_ret_int : MOP_pseudo_ret_float; + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize, intReg ? kRegTyInt : kRegTyFloat); + Insn &pseudo = GetInsnBuilder()->BuildInsn(mop, dest); + GetCurBB()->AppendInsn(pseudo); + } + return true; + } + } + return false; +} + +/* return true if blkassignoff for return, false otherwise */ +bool AArch64CGFunc::LmbcSmallAggForCall(BlkassignoffNode &bNode, const Operand *src, std::vector **parmList) +{ + AArch64reg regno = static_cast(static_cast(src)->GetRegisterNumber()); + if (IsBlkassignForPush(bNode)) { + PrimType pTy = PTY_i64; + MIRStructType *ty = static_cast(LmbcGetAggTyFromCallSite(&bNode, parmList)); + uint32 size = 0; + uint32 fpregs = ty ? FloatParamRegRequired(ty, size) : 0; /* fp size determined */ + if (fpregs > 0) { + /* pure floating point in agg */ + pTy = (size == k4ByteSize) ? PTY_f32 : PTY_f64; + for (uint32 i = 0; i < fpregs; i++) { + int32 s = (i == 0) ? 0 : static_cast(i * size); + MemOperand &mem = CreateMemOpnd(regno, s, size * kBitsPerByte); + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(kRegTyFloat, size)); + SelectCopy(*res, pTy, mem, pTy); + SetLmbcArgInfo(res, pTy, 0, static_cast(fpregs)); + IncLmbcArgsInRegs(kRegTyFloat); + } + IncLmbcTotalArgs(); + return true; + } else if (bNode.blockSize <= static_cast(k16ByteSize)) { + /* integer/mixed types in register/s */ + size = k4ByteSize; + switch (bNode.blockSize) { + case 1: + pTy = PTY_i8; + break; + case 2: + pTy = PTY_i16; + break; + case 4: + pTy = PTY_i32; + break; + default: + size = k8ByteSize; /* pTy remains i64 */ + break; + } + MemOperand &mem = CreateMemOpnd(regno, 0, size * kBitsPerByte); + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, size)); + SelectCopy(*res, pTy, mem, pTy); + SetLmbcArgInfo(res, pTy, bNode.offset, bNode.blockSize > static_cast(k8ByteSize) ? 2 : 1); + IncLmbcArgsInRegs(kRegTyInt); + if (bNode.blockSize > static_cast(k8ByteSize)) { + MemOperand &newMem = CreateMemOpnd(regno, k8ByteSize, size * kBitsPerByte); + RegOperand *newRes = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, size)); + SelectCopy(*newRes, pTy, newMem, pTy); + SetLmbcArgInfo(newRes, pTy, bNode.offset + k8ByteSizeInt, 2); + IncLmbcArgsInRegs(kRegTyInt); + } + IncLmbcTotalArgs(); + return true; + } + } + return false; +} + +/* This function is incomplete and may be removed when Lmbc IR is changed + to have the lowerer figures out the address of the large agg to reside */ +uint32 AArch64CGFunc::LmbcFindTotalStkUsed(std::vector *paramList) +{ + CHECK_FATAL(GetFunction().GetAttr(FUNCATTR_ccall), "only c calling convention support here"); + AArch64CallConvImpl parmlocator(GetBecommon()); + CCLocInfo pLoc; + for (TyIdx tyIdx : *paramList) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + (void)parmlocator.LocateNextParm(*ty, pLoc); + } + return 0; +} + +/* All arguments passed as registers */ +uint32 AArch64CGFunc::LmbcTotalRegsUsed() +{ + if (GetLmbcArgInfo() == nullptr) { + return 0; /* no arg */ + } + MapleVector ®s = GetLmbcCallArgNumOfRegs(); + MapleVector &types = GetLmbcCallArgTypes(); + uint32 iCnt = 0; + uint32 fCnt = 0; + for (uint32 i = 0; i < regs.size(); i++) { + if (IsPrimitiveInteger(types[i])) { + if ((iCnt + static_cast(regs[i])) <= k8ByteSize) { + iCnt += static_cast(regs[i]); + }; + } else { + if ((fCnt + static_cast(regs[i])) <= k8ByteSize) { + fCnt += static_cast(regs[i]); + }; + } + } + return iCnt + fCnt; +} + +/* If blkassignoff for argument, this function loads the agg arguments into + virtual registers, disregard if there is sufficient physicall call + registers. Argument > 16-bytes are copied to preset space and ptr + result is loaded into virtual register. + If blassign is not for argument, this function simply memcpy */ +void AArch64CGFunc::SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) +{ + CHECK_FATAL(src->GetKind() == Operand::kOpdRegister, "blkassign src type not in register"); + std::vector *parmList; + if (GetLmbcArgInfo() == nullptr) { + LmbcArgInfo *p = memPool->New(*GetFuncScopeAllocator()); + SetLmbcArgInfo(p); + } + if (LmbcSmallAggForRet(bNode, src)) { + return; + } else if (LmbcSmallAggForCall(bNode, src, &parmList)) { + return; + } + Operand *dest = HandleExpr(bNode, *bNode.Opnd(0)); + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + /* memcpy for agg assign OR large agg for arg/ret */ + int32 offset = bNode.offset; + if (IsBlkassignForPush(bNode)) { + /* large agg for call, addr to be pushed in SelectCall */ + offset = GetLmbcTotalStkUsed(); + if (offset < 0) { + /* length of ALL stack based args for this call, this location is where the + next large agg resides, its addr will then be passed */ + offset = LmbcFindTotalStkUsed(parmList) + LmbcTotalRegsUsed(); + } + SetLmbcTotalStkUsed(offset + bNode.blockSize); /* next use */ + SetLmbcArgInfo(regResult, PTY_i64, 0, 1); /* 1 reg for ptr */ + IncLmbcArgsInRegs(kRegTyInt); + IncLmbcTotalArgs(); + /* copy large agg arg to offset below */ + } + std::vector opndVec; + opndVec.push_back(regResult); /* result */ + opndVec.push_back(PrepareMemcpyParamOpnd(offset, *dest)); /* param 0 */ + opndVec.push_back(src); /* param 1 */ + opndVec.push_back(PrepareMemcpyParamOpnd(static_cast(static_cast(bNode.blockSize)))); /* param 2 */ + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); +} + +void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd) +{ + DEBUG_ASSERT(stmt.Opnd(0) != nullptr, "null ptr check"); + Operand &lhsAddrOpnd = LoadIntoRegister(AddrOpnd, stmt.Opnd(0)->GetPrimType()); + uint32 lhsOffset = 0; + MIRType *stmtType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx()); + MIRPtrType *lhsPointerType = static_cast(stmtType); + bool loadToRegs4StructReturn = false; + if (mirModule.CurFunction()->StructReturnedInRegs()) { + MIRSymbol *retSt = mirModule.CurFunction()->GetFormal(0); + if (stmt.Opnd(0)->GetOpCode() == OP_dread) { + DreadNode *dread = static_cast(stmt.Opnd(0)); + MIRSymbol *addrSym = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dread->GetStIdx()); + loadToRegs4StructReturn = (retSt == addrSym); + } + } + MIRType *lhsType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lhsPointerType->GetPointedTyIdx()); + if (stmt.GetFieldID() != 0) { + MIRStructType *structType = static_cast(lhsType); + DEBUG_ASSERT(structType != nullptr, "SelectAggIassign: non-zero fieldID for non-structure"); + lhsType = structType->GetFieldType(stmt.GetFieldID()); + lhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, stmt.GetFieldID()).first); + } else if (lhsType->GetKind() == kTypeArray) { +#if DEBUG + MIRArrayType *arrayLhsType = static_cast(lhsType); + /* access an array element */ + MIRType *lhsType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayLhsType->GetElemTyIdx()); + MIRTypeKind typeKind = lhsType->GetKind(); + DEBUG_ASSERT(((typeKind == kTypeScalar) || (typeKind == kTypeStruct) || (typeKind == kTypeClass) || + (typeKind == kTypePointer)), + "unexpected array element type in iassign"); +#endif + } else if (lhsType->GetKind() == kTypeFArray) { +#if DEBUG + MIRFarrayType *farrayLhsType = static_cast(lhsType); + /* access an array element */ + MIRType *lhsElemType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(farrayLhsType->GetElemTyIdx()); + MIRTypeKind typeKind = lhsElemType->GetKind(); + DEBUG_ASSERT(((typeKind == kTypeScalar) || (typeKind == kTypeStruct) || (typeKind == kTypeClass) || + (typeKind == kTypePointer)), + "unexpected array element type in iassign"); +#endif + } + uint32 lhsAlign = GetBecommon().GetTypeAlign(lhsType->GetTypeIndex()); + uint64 lhsSize = GetBecommon().GetTypeSize(lhsType->GetTypeIndex()); + + uint32 rhsAlign; + uint32 alignUsed; + uint32 rhsOffset = 0; + if (stmt.GetRHS()->GetOpCode() == OP_dread) { + AddrofNode *rhsDread = static_cast(stmt.GetRHS()); + MIRSymbol *rhsSymbol = GetFunction().GetLocalOrGlobalSymbol(rhsDread->GetStIdx()); + MIRType *rhsType = rhsSymbol->GetType(); + if (rhsDread->GetFieldID() != 0) { + MIRStructType *structType = static_cast(rhsSymbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "SelectAggIassign: non-zero fieldID for non-structure"); + rhsType = structType->GetFieldType(rhsDread->GetFieldID()); + rhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, rhsDread->GetFieldID()).first); + } + if (loadToRegs4StructReturn) { + /* generate move to regs for agg return */ + CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggIassign: illegal struct size"); + CHECK_FATAL(GetFunction().GetAttr(FUNCATTR_ccall), "only c calling convention support here"); + AArch64CallConvImpl parmlocator(GetBecommon()); + CCLocInfo pLoc; + parmlocator.LocateNextParm(*lhsType, pLoc, true, GetBecommon().GetMIRModule().CurFunction()); + /* aggregates are 8 byte aligned. */ + Operand *rhsmemopnd = nullptr; + RegOperand *result[kFourRegister]; /* up to 2 int or 4 fp */ + uint32 loadSize; + uint32 numRegs; + RegType regType; + PrimType retPty; + bool fpParm = false; + if (pLoc.numFpPureRegs) { + loadSize = pLoc.fpSize; + numRegs = pLoc.numFpPureRegs; + fpParm = true; + regType = kRegTyFloat; + retPty = (pLoc.fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; + } else { + if (CGOptions::IsBigEndian()) { + loadSize = k8ByteSize; + numRegs = (lhsSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + regType = kRegTyInt; + retPty = PTY_u64; + } else { + loadSize = (lhsSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + numRegs = (lhsSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + regType = kRegTyInt; + retPty = PTY_u32; + } + } + bool parmCopy = IsParamStructCopy(*rhsSymbol); + for (uint32 i = 0; i < numRegs; i++) { + if (parmCopy) { + rhsmemopnd = &LoadStructCopyBase( + *rhsSymbol, (rhsOffset + static_cast(i * (fpParm ? loadSize : k8ByteSize))), + static_cast(loadSize * kBitsPerByte)); + } else { + rhsmemopnd = &GetOrCreateMemOpnd( + *rhsSymbol, (rhsOffset + static_cast(i * (fpParm ? loadSize : k8ByteSize))), + (loadSize * kBitsPerByte)); + } + result[i] = &CreateVirtualRegisterOperand(NewVReg(regType, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, retPty); + Insn &ld = GetInsnBuilder()->BuildInsn(mop1, *(result[i]), *rhsmemopnd); + GetCurBB()->AppendInsn(ld); + } + AArch64reg regs[kFourRegister]; + regs[kFirstReg] = static_cast(pLoc.reg0); + regs[kSecondReg] = static_cast(pLoc.reg1); + regs[kThirdReg] = static_cast(pLoc.reg2); + regs[kFourthReg] = static_cast(pLoc.reg3); + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg; + MOperator mop2; + if (fpParm) { + preg = regs[i]; + mop2 = (loadSize == k4ByteSize) ? MOP_xvmovs : MOP_xvmovd; + } else { + preg = (i == 0 ? R0 : R1); + mop2 = (loadSize == k4ByteSize) ? MOP_wmovrr : MOP_xmovrr; + } + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, (loadSize * kBitsPerByte), regType); + Insn &mov = GetInsnBuilder()->BuildInsn(mop2, dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + /* Create artificial dependency to extend the live range */ + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg; + MOperator mop3; + if (fpParm) { + preg = regs[i]; + mop3 = MOP_pseudo_ret_float; + } else { + preg = (i == 0 ? R0 : R1); + mop3 = MOP_pseudo_ret_int; + } + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, regType); + Insn &pseudo = GetInsnBuilder()->BuildInsn(mop3, dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } + rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + alignUsed = std::min(lhsAlign, rhsAlign); + DEBUG_ASSERT(alignUsed != 0, "expect non-zero"); + uint32 copySize = GetAggCopySize(rhsOffset, lhsOffset, alignUsed); + MemOperand *rhsBaseMemOpnd; + if (IsParamStructCopy(*rhsSymbol)) { + rhsBaseMemOpnd = &LoadStructCopyBase(*rhsSymbol, rhsOffset, static_cast(copySize * k8BitSize)); + } else { + rhsBaseMemOpnd = GenLargeAggFormalMemOpnd(*rhsSymbol, copySize, rhsOffset, true); + } + RegOperand *rhsBaseReg = rhsBaseMemOpnd->GetBaseRegister(); + int64 rhsOffsetVal = rhsBaseMemOpnd->GetOffsetOperand()->GetValue(); + bool rhsIsLo12 = (rhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); + if (lhsSize > kParmMemcpySize) { + std::vector opndVec; + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + opndVec.push_back(regResult); /* result */ + + opndVec.push_back(PrepareMemcpyParamOpnd(static_cast(lhsOffset), lhsAddrOpnd)); /* param 0 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(rhsOffsetVal, *rhsBaseReg)); /* param 1 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsSize)); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + + return; + } + for (uint32 i = 0; i < (lhsSize / copySize); ++i) { + uint32 rhsBaseOffset = static_cast(rhsOffsetVal + i * copySize); + uint32 lhsBaseOffset = lhsOffset + i * copySize; + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); + MemOperand *rhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); + rhsMemOpnd = FixLargeMemOpnd(*rhsMemOpnd, copySize); + /* generate the load */ + RegOperand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + MOperator mOpLDP = (copySize == k4BitSize) ? MOP_wldp : MOP_xldp; + bool doPair = (!rhsIsLo12 && (copySize >= k4BitSize) && ((i + 1) < (lhsSize / copySize))); + RegOperand *result1 = nullptr; + if (doPair) { + regno_t vRegNO1 = NewVReg(kRegTyInt, std::max(4u, copySize)); + result1 = &CreateVirtualRegisterOperand(vRegNO1); + rhsMemOpnd = + FixLargeMemOpnd(mOpLDP, *static_cast(rhsMemOpnd), result.GetSize(), kInsnThirdOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpLDP, result, *result1, *rhsMemOpnd)); + } else { + MOperator mOp = PickLdInsn(copySize * k8BitSize, PTY_u32); + rhsMemOpnd = + FixLargeMemOpnd(mOp, *static_cast(rhsMemOpnd), result.GetSize(), kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd)); + } + /* generate the store */ + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); + MemOperand *lhsMemOpnd = + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, copySize * k8BitSize, + static_cast(&lhsAddrOpnd), nullptr, &ofstOpnd, nullptr); + if (doPair) { + MOperator mOpSTP = (copySize == k4BitSize) ? MOP_wstp : MOP_xstp; + lhsMemOpnd = FixLargeMemOpnd(mOpSTP, *lhsMemOpnd, result.GetSize(), kInsnThirdOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpSTP, result, *result1, *lhsMemOpnd)); + i++; + } else { + MOperator mOp = PickStInsn(copySize * k8BitSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOp, *lhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + } + } + /* take care of extra content at the end less than the unit of alignUsed */ + uint64 lhsSizeCovered = (lhsSize / copySize) * copySize; + uint32 newAlignUsed = copySize; + while (lhsSizeCovered < lhsSize) { + newAlignUsed = newAlignUsed >> 1; + CHECK_FATAL(newAlignUsed != 0, "expect non-zero"); + if ((lhsSizeCovered + newAlignUsed) > lhsSize) { + continue; + } + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; + OfstOperand &rhsOfstOpnd = + GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(rhsOffsetVal), k32BitSize); + MemOperand *rhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); + /* generate the load */ + Operand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, newAlignUsed))); + MOperator mOp = PickLdInsn(newAlignUsed * k8BitSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOp, *rhsMemOpnd, newAlignUsed * k8BitSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd)); + /* generate the store */ + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(lhsOffset + lhsSizeCovered, k32BitSize); + MemOperand &lhsMemOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, newAlignUsed * k8BitSize, + static_cast(&lhsAddrOpnd), nullptr, &ofstOpnd, + static_cast(nullptr)); + mOp = PickStInsn(newAlignUsed * k8BitSize, PTY_u32); + lhsMemOpnd = *FixLargeMemOpnd(mOp, lhsMemOpnd, newAlignUsed * k8BitSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, lhsMemOpnd)); + lhsSizeCovered += newAlignUsed; + } + } else { /* rhs is iread */ + DEBUG_ASSERT(stmt.GetRHS()->GetOpCode() == OP_iread, "SelectAggDassign: NYI"); + IreadNode *rhsIread = static_cast(stmt.GetRHS()); + RegOperand *rhsAddrOpnd = static_cast(HandleExpr(*rhsIread, *rhsIread->Opnd(0))); + rhsAddrOpnd = &LoadIntoRegister(*rhsAddrOpnd, rhsIread->Opnd(0)->GetPrimType()); + MIRPtrType *rhsPointerType = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsIread->GetTyIdx())); + MIRType *rhsType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsPointerType->GetPointedTyIdx())); + bool isRefField = false; + if (rhsIread->GetFieldID() != 0) { + MIRStructType *rhsStructType = static_cast(rhsType); + DEBUG_ASSERT(rhsStructType, "SelectAggDassign: non-zero fieldID for non-structure"); + rhsType = rhsStructType->GetFieldType(rhsIread->GetFieldID()); + rhsOffset = static_cast(GetBecommon().GetFieldOffset(*rhsStructType, rhsIread->GetFieldID()).first); + isRefField = GetBecommon().IsRefField(*rhsStructType, rhsIread->GetFieldID()); + } + if (loadToRegs4StructReturn) { + /* generate move to regs. */ + CHECK_FATAL(lhsSize <= k16ByteSize, "SelectAggIassign: illegal struct size"); + RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ + uint32 loadSize; + if (CGOptions::IsBigEndian()) { + loadSize = k8ByteSize; + } else { + loadSize = (lhsSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + } + uint32 numRegs = (lhsSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (uint32 i = 0; i < numRegs; i++) { + OfstOperand *rhsOffOpnd = &GetOrCreateOfstOpnd(rhsOffset + i * loadSize, loadSize * kBitsPerByte); + Operand &rhsmemopnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, loadSize * kBitsPerByte, rhsAddrOpnd, + nullptr, rhsOffOpnd, nullptr); + result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); + Insn &ld = GetInsnBuilder()->BuildInsn(mop1, *(result[i]), rhsmemopnd); + ld.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(ld); + } + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &mov = GetInsnBuilder()->BuildInsn(MOP_xmovrr, dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + /* Create artificial dependency to extend the live range */ + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } + rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + alignUsed = std::min(lhsAlign, rhsAlign); + DEBUG_ASSERT(alignUsed != 0, "expect non-zero"); + uint32 copySize = GetAggCopySize(rhsOffset, lhsOffset, alignUsed); + if (lhsSize > kParmMemcpySize) { + std::vector opndVec; + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + opndVec.push_back(regResult); /* result */ + + opndVec.push_back(PrepareMemcpyParamOpnd(static_cast(lhsOffset), lhsAddrOpnd)); /* param 0 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(static_cast(rhsOffset), *rhsAddrOpnd)); /* param 1 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsSize)); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + + return; + } + DEBUG_ASSERT(copySize != 0, "expect non-zero"); + for (uint32 i = 0; i < (lhsSize / copySize); i++) { + /* generate the load */ + uint32 operandSize = copySize * k8BitSize; + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsOffset + i * copySize, k32BitSize); + MemOperand *rhsMemOpnd = + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, operandSize, static_cast(rhsAddrOpnd), + nullptr, &rhsOfstOpnd, nullptr); + RegOperand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + bool doPair = ((copySize >= k4BitSize) && ((i + 1) < (lhsSize / copySize))); + Insn *insn = nullptr; + RegOperand *result1 = nullptr; + if (doPair) { + MOperator mOpLDP = (copySize == k4BitSize) ? MOP_wldp : MOP_xldp; + rhsMemOpnd = + FixLargeMemOpnd(mOpLDP, *static_cast(rhsMemOpnd), operandSize, kInsnThirdOpnd); + result1 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + insn = &GetInsnBuilder()->BuildInsn(mOpLDP, result, *result1, *rhsMemOpnd); + } else { + MOperator mOp = PickLdInsn(operandSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOp, *static_cast(rhsMemOpnd), operandSize, kInsnSecondOpnd); + insn = &GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd); + } + insn->MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(*insn); + /* generate the store */ + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsOffset + i * copySize, k32BitSize); + MemOperand *lhsMemOpnd = + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, operandSize, static_cast(&lhsAddrOpnd), + nullptr, &lhsOfstOpnd, nullptr); + if (doPair) { + MOperator mOpSTP = (copySize == k4BitSize) ? MOP_wstp : MOP_xstp; + lhsMemOpnd = + FixLargeMemOpnd(mOpSTP, *static_cast(lhsMemOpnd), operandSize, kInsnThirdOpnd); + DEBUG_ASSERT(result1 != nullptr, "result1 should not be nullptr"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpSTP, result, *result1, *lhsMemOpnd)); + i++; + } else { + MOperator mOp = PickStInsn(operandSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOp, *static_cast(lhsMemOpnd), operandSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + } + } + /* take care of extra content at the end less than the unit */ + uint64 lhsSizeCovered = (lhsSize / copySize) * copySize; + uint32 newAlignUsed = copySize; + while (lhsSizeCovered < lhsSize) { + newAlignUsed = newAlignUsed >> 1; + CHECK_FATAL(newAlignUsed != 0, "expect non-zero"); + if ((lhsSizeCovered + newAlignUsed) > lhsSize) { + continue; + } + /* generate the load */ + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsOffset + lhsSizeCovered, k32BitSize); + uint32 memOpndSize = newAlignUsed * k8BitSize; + MemOperand *rhsMemOpnd = + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memOpndSize, static_cast(rhsAddrOpnd), + nullptr, &rhsOfstOpnd, nullptr); + regno_t vRegNO = NewVReg(kRegTyInt, std::max(4u, newAlignUsed)); + RegOperand &result = CreateVirtualRegisterOperand(vRegNO); + MOperator mOpLD = PickLdInsn(memOpndSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOpLD, *rhsMemOpnd, memOpndSize, static_cast(kInsnSecondOpnd)); + Insn &insn = GetInsnBuilder()->BuildInsn(mOpLD, result, *rhsMemOpnd); + insn.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(insn); + /* generate the store */ + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsOffset + lhsSizeCovered, k32BitSize); + MemOperand *lhsMemOpnd = + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memOpndSize, static_cast(&lhsAddrOpnd), + nullptr, &lhsOfstOpnd, nullptr); + MOperator mOpST = PickStInsn(memOpndSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOpST, *lhsMemOpnd, memOpndSize, static_cast(kInsnSecondOpnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpST, result, *lhsMemOpnd)); + lhsSizeCovered += newAlignUsed; + } + } +} + +void AArch64CGFunc::SelectReturnSendOfStructInRegs(BaseNode *x) +{ + uint32 offset = 0; + if (x->GetOpCode() == OP_dread) { + DreadNode *dread = static_cast(x); + MIRSymbol *sym = GetFunction().GetLocalOrGlobalSymbol(dread->GetStIdx()); + MIRType *mirType = sym->GetType(); + if (dread->GetFieldID() != 0) { + MIRStructType *structType = static_cast(mirType); + mirType = structType->GetFieldType(dread->GetFieldID()); + offset = static_cast(GetBecommon().GetFieldOffset(*structType, dread->GetFieldID()).first); + } + uint32 typeSize = GetBecommon().GetTypeSize(mirType->GetTypeIndex()); + /* generate move to regs for agg return */ + CHECK_FATAL(GetFunction().GetAttr(FUNCATTR_ccall), "only c calling convention support here"); + AArch64CallConvImpl parmlocator(GetBecommon()); + CCLocInfo pLoc; + (void)parmlocator.LocateNextParm(*mirType, pLoc, true, GetBecommon().GetMIRModule().CurFunction()); + /* aggregates are 8 byte aligned. */ + Operand *rhsmemopnd = nullptr; + RegOperand *result[kFourRegister]; /* up to 2 int or 4 fp */ + uint32 loadSize; + uint32 numRegs; + RegType regType; + PrimType retPty; + bool fpParm = false; + if (pLoc.numFpPureRegs) { + loadSize = pLoc.fpSize; + numRegs = pLoc.numFpPureRegs; + fpParm = true; + regType = kRegTyFloat; + retPty = (pLoc.fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; + } else { + if (CGOptions::IsBigEndian()) { + loadSize = k8ByteSize; + numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + regType = kRegTyInt; + retPty = PTY_u64; + } else { + loadSize = (typeSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + regType = kRegTyInt; + retPty = PTY_u32; + } + } + bool parmCopy = IsParamStructCopy(*sym); + for (uint32 i = 0; i < numRegs; i++) { + if (parmCopy) { + rhsmemopnd = + &LoadStructCopyBase(*sym, (offset + static_cast(i * (fpParm ? loadSize : k8ByteSize))), + static_cast(loadSize * kBitsPerByte)); + } else { + rhsmemopnd = + &GetOrCreateMemOpnd(*sym, (offset + static_cast(i * (fpParm ? loadSize : k8ByteSize))), + (loadSize * kBitsPerByte)); + } + result[i] = &CreateVirtualRegisterOperand(NewVReg(regType, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, retPty); + Insn &ld = GetInsnBuilder()->BuildInsn(mop1, *(result[i]), *rhsmemopnd); + GetCurBB()->AppendInsn(ld); + } + AArch64reg regs[kFourRegister]; + regs[0] = static_cast(pLoc.reg0); + regs[1] = static_cast(pLoc.reg1); + regs[2] = static_cast(pLoc.reg2); + regs[3] = static_cast(pLoc.reg3); + RegOperand *dest; + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg; + MOperator mop2; + if (fpParm) { + preg = regs[i]; + mop2 = (loadSize == k4ByteSize) ? MOP_xvmovs : MOP_xvmovd; + } else { + preg = (i == 0 ? R0 : R1); + mop2 = (loadSize == k4ByteSize) ? MOP_wmovrr : MOP_xmovrr; + } + dest = &GetOrCreatePhysicalRegisterOperand(preg, (loadSize * kBitsPerByte), regType); + Insn &mov = GetInsnBuilder()->BuildInsn(mop2, *dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + /* Create artificial dependency to extend the live range */ + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg; + MOperator mop3; + if (fpParm) { + preg = regs[i]; + mop3 = MOP_pseudo_ret_float; + } else { + preg = (i == 0 ? R0 : R1); + mop3 = MOP_pseudo_ret_int; + } + dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, regType); + Insn &pseudo = GetInsnBuilder()->BuildInsn(mop3, *dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } else if (x->GetOpCode() == OP_iread) { + IreadNode *iread = static_cast(x); + RegOperand *rhsAddrOpnd = static_cast(HandleExpr(*iread, *iread->Opnd(0))); + rhsAddrOpnd = &LoadIntoRegister(*rhsAddrOpnd, iread->Opnd(0)->GetPrimType()); + MIRPtrType *ptrType = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx())); + MIRType *mirType = static_cast(ptrType->GetPointedType()); + bool isRefField = false; + if (iread->GetFieldID() != 0) { + MIRStructType *structType = static_cast(mirType); + mirType = structType->GetFieldType(iread->GetFieldID()); + offset = static_cast(GetBecommon().GetFieldOffset(*structType, iread->GetFieldID()).first); + isRefField = GetBecommon().IsRefField(*structType, iread->GetFieldID()); + } + uint32 typeSize = GetBecommon().GetTypeSize(mirType->GetTypeIndex()); + /* generate move to regs. */ + RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ + uint32 loadSize; + if (CGOptions::IsBigEndian()) { + loadSize = k8ByteSize; + } else { + loadSize = (typeSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + } + uint32 numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (uint32 i = 0; i < numRegs; i++) { + OfstOperand *rhsOffOpnd = &GetOrCreateOfstOpnd(offset + i * loadSize, loadSize * kBitsPerByte); + Operand &rhsmemopnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, loadSize * kBitsPerByte, rhsAddrOpnd, + nullptr, rhsOffOpnd, nullptr); + result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); + Insn &ld = GetInsnBuilder()->BuildInsn(mop1, *(result[i]), rhsmemopnd); + ld.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(ld); + } + RegOperand *dest; + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &mov = GetInsnBuilder()->BuildInsn(MOP_xmovrr, *dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + /* Create artificial dependency to extend the live range */ + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, *dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } else { // dummy return of 0 inserted by front-end at absence of return + DEBUG_ASSERT(x->GetOpCode() == OP_constval, "SelectReturnSendOfStructInRegs: unexpected return operand"); + uint32 typeSize = GetPrimTypeSize(x->GetPrimType()); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(R0, typeSize * kBitsPerByte, kRegTyInt); + ImmOperand &src = CreateImmOperand(0, k16BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, dest, src)); + return; + } +} + +Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) +{ + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); + auto itr = stIdx2OverflowResult.find(expr.GetStIdx()); + if (itr != stIdx2OverflowResult.end()) { + /* add_with_overflow / sub_with_overflow: + * reg1: param1 + * reg2: param2 + * adds/subs reg3, reg1, reg2 + * cset reg4, vs + * result is saved in std::pair(reg3, reg4) + */ + if (expr.GetFieldID() == 1) { + return itr->second.first; + } else { + DEBUG_ASSERT(expr.GetFieldID() == 2, "only has 2 fileds for intrinsic overflow call result"); + return itr->second.second; + } + } + if (symbol->IsEhIndex()) { + CHECK_FATAL(false, "should not go here"); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i32)); + /* use the second register return by __builtin_eh_return(). */ + AArch64CallConvImpl retLocator(GetBecommon()); + CCLocInfo retMech; + retLocator.InitReturnInfo(*type, retMech); + retLocator.SetupSecondRetReg(*type, retMech); + return &GetOrCreatePhysicalRegisterOperand(static_cast(retMech.GetReg1()), k64BitSize, kRegTyInt); + } + + PrimType symType = symbol->GetType()->GetPrimType(); + uint32 offset = 0; + bool parmCopy = false; + if (expr.GetFieldID() != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "SelectDread: non-zero fieldID for non-structure"); + symType = structType->GetFieldType(expr.GetFieldID())->GetPrimType(); + offset = static_cast(GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first); + parmCopy = IsParamStructCopy(*symbol); + } + + uint32 dataSize = GetPrimTypeBitSize(symType); + uint32 aggSize = 0; + if (symType == PTY_agg) { + if (expr.GetPrimType() == PTY_agg) { + aggSize = static_cast(GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx())); + dataSize = ((expr.GetFieldID() == 0) ? GetPointerSize() : aggSize) << 3; + } else { + dataSize = GetPrimTypeBitSize(expr.GetPrimType()); + } + } + MemOperand *memOpnd = nullptr; + if (aggSize > k8ByteSize) { + if (parent.op == OP_eval) { + if (symbol->GetAttr(ATTR_volatile)) { + /* Need to generate loads for the upper parts of the struct. */ + Operand &dest = GetZeroOpnd(k64BitSize); + uint32 numLoads = static_cast(RoundUp(aggSize, k64BitSize) / k64BitSize); + for (uint32 o = 0; o < numLoads; ++o) { + if (parmCopy) { + memOpnd = &LoadStructCopyBase(*symbol, offset + o * GetPointerSize(), GetPointerSize()); + } else { + memOpnd = &GetOrCreateMemOpnd(*symbol, offset + o * GetPointerSize(), GetPointerSize()); + } + if (IsImmediateOffsetOutOfRange(*memOpnd, GetPointerSize())) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, GetPointerSize()); + } + SelectCopy(dest, PTY_u64, *memOpnd, PTY_u64); + } + } else { + /* No side-effects. No need to generate anything for eval. */ + } + } else { + if (expr.GetFieldID() != 0) { + CHECK_FATAL(false, "SelectDread: Illegal agg size"); + } + } + } + if (parmCopy) { + memOpnd = &LoadStructCopyBase(*symbol, offset, static_cast(dataSize)); + } else { + memOpnd = &GetOrCreateMemOpnd(*symbol, offset, dataSize); + } + if ((memOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize); + } + + PrimType resultType = expr.GetPrimType(); + RegOperand &resOpnd = GetOrCreateResOperand(parent, symType); + /* a local register variable defined with a specified register */ + if (symbol->GetAsmAttr() != UStrIdx(0) && symbol->GetStorageClass() != kScPstatic && + symbol->GetStorageClass() != kScFstatic) { + std::string regDesp = GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->GetAsmAttr()); + RegOperand &specifiedOpnd = GetOrCreatePhysicalRegisterOperand(regDesp); + return &specifiedOpnd; + } + memOpnd = memOpnd->IsOffsetMisaligned(dataSize) ? &ConstraintOffsetToSafeRegion(dataSize, *memOpnd) : memOpnd; + SelectCopy(resOpnd, resultType, *memOpnd, symType); + return &resOpnd; +} + +RegOperand *AArch64CGFunc::SelectRegread(RegreadNode &expr) +{ + PregIdx pregIdx = expr.GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + /* if it is one of special registers */ + return &GetOrCreateSpecialRegisterOperand(-pregIdx, expr.GetPrimType()); + } + RegOperand ® = GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + if (GetOpndFromPregIdx(pregIdx) == nullptr) { + SetPregIdx2Opnd(pregIdx, reg); + } + if (expr.GetPrimType() == PTY_ref) { + reg.SetIsReference(true); + AddReferenceReg(reg.GetRegisterNumber()); + } + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + MemOperand *src = GetPseudoRegisterSpillMemoryOperand(pregIdx); + MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + PrimType stype = preg->GetPrimType(); + uint32 srcBitLength = GetPrimTypeSize(stype) * kBitsPerByte; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(srcBitLength, stype), reg, *src)); + } + return ® +} + +void AArch64CGFunc::SelectAddrof(Operand &result, StImmOperand &stImm, FieldID field) +{ + const MIRSymbol *symbol = stImm.GetSymbol(); + if (symbol->GetStorageClass() == kScAuto) { + SetStackProtectInfo(kAddrofStack); + } + if ((symbol->GetStorageClass() == kScAuto) || (symbol->GetStorageClass() == kScFormal)) { + if (!CGOptions::IsQuiet()) { + maple::LogInfo::MapleLogger(kLlErr) + << "Warning: we expect AddrOf with StImmOperand is not used for local variables"; + } + AArch64SymbolAlloc *symLoc = + static_cast(GetMemlayout()->GetSymAllocInfo(symbol->GetStIndex())); + ImmOperand *offset = nullptr; + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offset = &CreateImmOperand(GetBaseOffset(*symLoc) + stImm.GetOffset(), k64BitSize, false, kUnAdjustVary); + } else if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsRefLocals) { + auto it = immOpndsRequiringOffsetAdjustmentForRefloc.find(symLoc); + if (it != immOpndsRequiringOffsetAdjustmentForRefloc.end()) { + offset = (*it).second; + } else { + offset = &CreateImmOperand(GetBaseOffset(*symLoc) + stImm.GetOffset(), k64BitSize, false); + immOpndsRequiringOffsetAdjustmentForRefloc[symLoc] = offset; + } + } else if (mirModule.IsJavaModule()) { + auto it = immOpndsRequiringOffsetAdjustment.find(symLoc); + if ((it != immOpndsRequiringOffsetAdjustment.end()) && (symbol->GetType()->GetPrimType() != PTY_agg)) { + offset = (*it).second; + } else { + offset = &CreateImmOperand(GetBaseOffset(*symLoc) + stImm.GetOffset(), k64BitSize, false); + if (symbol->GetType()->GetKind() != kTypeClass) { + immOpndsRequiringOffsetAdjustment[symLoc] = offset; + } + } + } else { + /* Do not cache modified symbol location */ + offset = &CreateImmOperand(GetBaseOffset(*symLoc) + stImm.GetOffset(), k64BitSize, false); + } + + SelectAdd(result, *GetBaseReg(*symLoc), *offset, PTY_u64); + if (GetCG()->GenerateVerboseCG()) { + /* Add a comment */ + Insn *insn = GetCurBB()->GetLastInsn(); + std::string comm = "local/formal var: "; + comm.append(symbol->GetName()); + insn->SetComment(comm); + } + } else if (symbol->IsThreadLocal()) { + SelectAddrofThreadLocal(result, stImm); + return; + } else { + Operand *srcOpnd = &result; + if (!IsAfterRegAlloc()) { + // Create a new vreg/preg for the upper bits of the address + PregIdx pregIdx = GetFunction().GetPregTab()->CreatePreg(PTY_a64); + MIRPreg *tmpPreg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + regno_t vRegNO = NewVReg(kRegTyInt, GetPrimTypeSize(PTY_a64)); + RegOperand &tmpreg = GetOrCreateVirtualRegisterOperand(vRegNO); + + // Register this vreg mapping + RegisterVregMapping(vRegNO, pregIdx); + + // Store rematerialization info in the preg + tmpPreg->SetOp(OP_addrof); + tmpPreg->rematInfo.sym = symbol; + tmpPreg->fieldID = field; + tmpPreg->addrUpper = true; + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, tmpreg, stImm)); + srcOpnd = &tmpreg; + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, result, stImm)); + } + if (CGOptions::IsPIC() && symbol->NeedPIC()) { + /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ + OfstOperand &offset = CreateOfstOpnd(*stImm.GetSymbol(), stImm.GetOffset(), stImm.GetRelocs()); + + auto size = GetPointerSize() * kBitsPerByte; + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, size, static_cast(srcOpnd), + nullptr, &offset, nullptr); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(size == k64BitSize ? MOP_xldr : MOP_wldr, result, memOpnd)); + + if (stImm.GetOffset() > 0) { + ImmOperand &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); + SelectAdd(result, result, immOpnd, PTY_u64); + } + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, result, *srcOpnd, stImm)); + } + } +} + +void AArch64CGFunc::SelectAddrof(Operand &result, MemOperand &memOpnd, FieldID field) +{ + const MIRSymbol *symbol = memOpnd.GetSymbol(); + if (symbol->GetStorageClass() == kScAuto) { + auto *offsetOpnd = static_cast(memOpnd.GetOffsetImmediate()); + Operand &immOpnd = CreateImmOperand(offsetOpnd->GetOffsetValue(), PTY_u32, false); + DEBUG_ASSERT(memOpnd.GetBaseRegister() != nullptr, "nullptr check"); + SelectAdd(result, *memOpnd.GetBaseRegister(), immOpnd, PTY_u32); + SetStackProtectInfo(kAddrofStack); + } else if (!IsAfterRegAlloc()) { + // Create a new vreg/preg for the upper bits of the address + PregIdx pregIdx = GetFunction().GetPregTab()->CreatePreg(PTY_a64); + MIRPreg *tmpPreg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + regno_t vRegNO = NewVReg(kRegTyInt, GetPrimTypeSize(PTY_a64)); + RegOperand &tmpreg = GetOrCreateVirtualRegisterOperand(vRegNO); + + // Register this vreg mapping + RegisterVregMapping(vRegNO, pregIdx); + + // Store rematerialization info in the preg + tmpPreg->SetOp(OP_addrof); + tmpPreg->rematInfo.sym = symbol; + tmpPreg->fieldID = field; + tmpPreg->addrUpper = true; + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, tmpreg, memOpnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, result, tmpreg, memOpnd)); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, result, memOpnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, result, result, memOpnd)); + } +} + +Operand *AArch64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff) +{ + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); + int32 offset = 0; + AddrofoffNode &addrofoffExpr = static_cast(static_cast(expr)); + if (isAddrofoff) { + offset = addrofoffExpr.offset; + } else { + if (expr.GetFieldID() != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + /* with array of structs, it is possible to have nullptr */ + if (structType != nullptr) { + offset = GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first; + } + } + } + if ((symbol->GetStorageClass() == kScFormal) && (symbol->GetSKind() == kStVar) && + ((!isAddrofoff && expr.GetFieldID() != 0) || + (GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()) > k16ByteSize))) { + /* + * Struct param is copied on the stack by caller if struct size > 16. + * Else if size < 16 then struct param is copied into one or two registers. + */ + RegOperand *stackAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + /* load the base address of the struct copy from stack. */ + SelectAddrof(*stackAddr, CreateStImmOperand(*symbol, 0, 0)); + Operand *structAddr; + if (GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()) <= k16ByteSize) { + isAggParamInReg = true; + structAddr = stackAddr; + } else { + OfstOperand *offopnd = &CreateOfstOpnd(0, k32BitSize); + MemOperand *mo = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPointerSize() * kBitsPerByte, stackAddr, + nullptr, offopnd, nullptr); + structAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xldr, *structAddr, *mo)); + } + if (offset == 0) { + return structAddr; + } else { + /* add the struct offset to the base address */ + Operand *result = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + ImmOperand *imm = &CreateImmOperand(PTY_a64, offset); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *result, *structAddr, *imm)); + return result; + } + } + PrimType ptype = expr.GetPrimType(); + Operand &result = GetOrCreateResOperand(parent, ptype); + if (symbol->IsReflectionClassInfo() && !symbol->IsReflectionArrayClassInfo() && !GetCG()->IsLibcore()) { + /* + * Turn addrof __cinf_X into a load of _PTR__cinf_X + * adrp x1, _PTR__cinf_Ljava_2Flang_2FSystem_3B + * ldr x1, [x1, #:lo12:_PTR__cinf_Ljava_2Flang_2FSystem_3B] + */ + std::string ptrName = namemangler::kPtrPrefixStr + symbol->GetName(); + MIRType *ptrType = GlobalTables::GetTypeTable().GetPtr(); + symbol = GetMirModule().GetMIRBuilder()->GetOrCreateGlobalDecl(ptrName, *ptrType); + symbol->SetStorageClass(kScFstatic); + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_adrp_ldr, result, CreateStImmOperand(*symbol, 0, 0))); + /* make it un rematerializable. */ + MIRPreg *preg = GetPseudoRegFromVirtualRegNO(static_cast(result).GetRegisterNumber()); + if (preg) { + preg->SetOp(OP_undef); + } + return &result; + } + + SelectAddrof(result, CreateStImmOperand(*symbol, offset, 0), isAddrofoff ? 0 : expr.GetFieldID()); + return &result; +} + +Operand *AArch64CGFunc::SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) +{ + return SelectAddrof(static_cast(static_cast(expr)), parent, true); +} + +Operand &AArch64CGFunc::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) +{ + uint32 instrSize = static_cast(expr.SizeOfInstr()); + PrimType primType = (instrSize == k8ByteSize) + ? PTY_u64 + : (instrSize == k4ByteSize) ? PTY_u32 : (instrSize == k2ByteSize) ? PTY_u16 : PTY_u8; + Operand &operand = GetOrCreateResOperand(parent, primType); + MIRFunction *mirFunction = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(expr.GetPUIdx()); + SelectAddrof(operand, CreateStImmOperand(*mirFunction->GetFuncSymbol(), 0, 0)); + return operand; +} + +/* For an entire aggregate that can fit inside a single 8 byte register. */ +PrimType AArch64CGFunc::GetDestTypeFromAggSize(uint32 bitSize) const +{ + PrimType primType; + switch (bitSize) { + case k8BitSize: { + primType = PTY_u8; + break; + } + case k16BitSize: { + primType = PTY_u16; + break; + } + case k32BitSize: { + primType = PTY_u32; + break; + } + case k64BitSize: { + primType = PTY_u64; + break; + } + default: + CHECK_FATAL(false, "aggregate of unhandled size"); + } + return primType; +} + +Operand &AArch64CGFunc::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) +{ + /* adrp reg, label-id */ + uint32 instrSize = static_cast(expr.SizeOfInstr()); + PrimType primType = (instrSize == k8ByteSize) + ? PTY_u64 + : (instrSize == k4ByteSize) ? PTY_u32 : (instrSize == k2ByteSize) ? PTY_u16 : PTY_u8; + Operand &dst = GetOrCreateResOperand(parent, primType); + Operand &immOpnd = CreateImmOperand(expr.GetOffset(), k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_adrp_label, dst, immOpnd)); + return dst; +} + +Operand *AArch64CGFunc::SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) +{ + auto offset = ireadoff.GetOffset(); + auto primType = ireadoff.GetPrimType(); + auto bitSize = GetPrimTypeBitSize(primType); + auto *baseAddr = ireadoff.Opnd(0); + auto *result = &CreateRegisterOperandOfType(primType); + auto *addrOpnd = HandleExpr(ireadoff, *baseAddr); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, PTY_a64), offset, bitSize); + auto mop = PickLdInsn(bitSize, primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, *result, memOpnd)); + return result; +} + +RegOperand *AArch64CGFunc::GenLmbcParamLoad(int32 offset, uint32 byteSize, RegType regType, PrimType primType, + AArch64reg baseRegno) +{ + MemOperand *memOpnd = GenLmbcFpMemOperand(offset, byteSize, baseRegno); + RegOperand *result = &GetOrCreateVirtualRegisterOperand(NewVReg(regType, byteSize)); + MOperator mOp = PickLdInsn(byteSize * kBitsPerByte, primType); + Insn &load = GetInsnBuilder()->BuildInsn(mOp, *result, *memOpnd); + GetCurBB()->AppendInsn(load); + return result; +} + +RegOperand *AArch64CGFunc::LmbcStructReturnLoad(int32 offset) +{ + RegOperand *result = nullptr; + MIRFunction &func = GetFunction(); + CHECK_FATAL(func.IsReturnStruct(), "LmbcStructReturnLoad: not struct return"); + MIRType *ty = func.GetReturnType(); + uint32 sz = GetBecommon().GetTypeSize(ty->GetTypeIndex()); + uint32 fpSize; + uint32 numFpRegs = FloatParamRegRequired(static_cast(ty), fpSize); + if (numFpRegs > 0) { + PrimType pType = (fpSize <= k4ByteSize) ? PTY_f32 : PTY_f64; + for (int32 i = (numFpRegs - kOneRegister); i > 0; --i) { + result = GenLmbcParamLoad(offset + (i * static_cast(fpSize)), fpSize, kRegTyFloat, pType); + AArch64reg regNo = static_cast(V0 + static_cast(i)); + RegOperand *reg = &GetOrCreatePhysicalRegisterOperand(regNo, fpSize * kBitsPerByte, kRegTyFloat); + SelectCopy(*reg, pType, *result, pType); + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_float, *reg); + GetCurBB()->AppendInsn(pseudo); + } + result = GenLmbcParamLoad(offset, fpSize, kRegTyFloat, pType); + } else if (sz <= k4ByteSize) { + result = GenLmbcParamLoad(offset, k4ByteSize, kRegTyInt, PTY_u32); + } else if (sz <= k8ByteSize) { + result = GenLmbcParamLoad(offset, k8ByteSize, kRegTyInt, PTY_i64); + } else if (sz <= k16ByteSize) { + result = GenLmbcParamLoad(offset + k8ByteSizeInt, k8ByteSize, kRegTyInt, PTY_i64); + RegOperand *r1 = &GetOrCreatePhysicalRegisterOperand(R1, k8ByteSize * kBitsPerByte, kRegTyInt); + SelectCopy(*r1, PTY_i64, *result, PTY_i64); + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, *r1); + GetCurBB()->AppendInsn(pseudo); + result = GenLmbcParamLoad(offset, k8ByteSize, kRegTyInt, PTY_i64); + } + return result; +} + +Operand *AArch64CGFunc::SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) +{ + int32 offset = ireadoff.GetOffset(); + PrimType primType = ireadoff.GetPrimType(); + uint32 bytelen = GetPrimTypeSize(primType); + uint32 bitlen = bytelen * kBitsPerByte; + RegType regty = GetRegTyFromPrimTy(primType); + RegOperand *result = nullptr; + if (offset >= 0) { + LmbcFormalParamInfo *info = GetLmbcFormalParamInfo(static_cast(offset)); + if (info->GetPrimType() == PTY_agg) { + if (info->IsOnStack()) { + result = GenLmbcParamLoad(info->GetOnStackOffset(), GetPrimTypeSize(PTY_a64), kRegTyInt, PTY_a64); + regno_t baseRegno = result->GetRegisterNumber(); + result = GenLmbcParamLoad(offset - static_cast(info->GetOffset()), bytelen, regty, primType, + (AArch64reg)baseRegno); + } else if (primType == PTY_agg) { + CHECK_FATAL(parent.GetOpCode() == OP_regassign, "SelectIreadfpoff of agg"); + result = LmbcStructReturnLoad(offset); + } else { + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } + } else { + CHECK_FATAL(primType == info->GetPrimType(), "Incorrect primtype"); + CHECK_FATAL(offset == info->GetOffset(), "Incorrect offset"); + if (info->GetRegNO() == 0 || !info->HasRegassign()) { + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } else { + result = &GetOrCreatePhysicalRegisterOperand(static_cast(info->GetRegNO()), bitlen, regty); + } + } + } else { + if (primType == PTY_agg) { + CHECK_FATAL(parent.GetOpCode() == OP_regassign, "SelectIreadfpoff of agg"); + result = LmbcStructReturnLoad(offset); + } else { + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } + } + return result; +} + +Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset, + PrimType finalBitFieldDestType) +{ + int32 offset = 0; + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(expr.GetTyIdx()); + MIRPtrType *pointerType = static_cast(type); + DEBUG_ASSERT(pointerType != nullptr, "expect a pointer type at iread node"); + MIRType *pointedType = nullptr; + bool isRefField = false; + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + + if (expr.GetFieldID() != 0) { + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType->GetPointedTyIdx()); + MIRStructType *structType = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structType = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structType = static_cast(pointedTy)->GetParentType(); + } + + DEBUG_ASSERT(structType != nullptr, "SelectIread: non-zero fieldID for non-structure"); + pointedType = structType->GetFieldType(expr.GetFieldID()); + offset = GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first; + isRefField = GetBecommon().IsRefField(*structType, expr.GetFieldID()); + } else { + pointedType = GetPointedToType(*pointerType); + if (GetFunction().IsJava() && (pointedType->GetKind() == kTypePointer)) { + MIRType *nextPointedType = GlobalTables::GetTypeTable().GetTypeFromTyIdx( + static_cast(pointedType)->GetPointedTyIdx()); + if (nextPointedType->GetKind() != kTypeScalar) { + isRefField = true; /* read from an object array, or an high-dimentional array */ + } + } + } + + RegType regType = GetRegTyFromPrimTy(expr.GetPrimType()); + uint32 regSize = GetPrimTypeSize(expr.GetPrimType()); + if (expr.GetFieldID() == 0 && pointedType->GetPrimType() == PTY_agg) { + /* Maple IR can passing small struct to be loaded into a single register. */ + if (regType == kRegTyFloat) { + /* regsize is correct */ + } else { + uint32 sz = GetBecommon().GetTypeSize(pointedType->GetTypeIndex().GetIdx()); + regSize = (sz <= k4ByteSize) ? k4ByteSize : k8ByteSize; + } + } else if (regSize < k4ByteSize) { + regSize = k4ByteSize; /* 32-bit */ + } + Operand *result = nullptr; + if (parent.GetOpCode() == OP_eval) { + /* regSize << 3, that is regSize * 8, change bytes to bits */ + result = &GetZeroOpnd(regSize << 3); + } else { + result = &GetOrCreateResOperand(parent, expr.GetPrimType()); + } + + PrimType destType = pointedType->GetPrimType(); + + uint32 bitSize = 0; + if ((pointedType->GetKind() == kTypeStructIncomplete) || (pointedType->GetKind() == kTypeClassIncomplete) || + (pointedType->GetKind() == kTypeInterfaceIncomplete)) { + bitSize = GetPrimTypeBitSize(expr.GetPrimType()); + maple::LogInfo::MapleLogger(kLlErr) << "Warning: objsize is zero! \n"; + } else { + if (pointedType->IsStructType()) { + MIRStructType *structType = static_cast(pointedType); + /* size << 3, that is size * 8, change bytes to bits */ + bitSize = std::min(structType->GetSize(), static_cast(GetPointerSize())) << 3; + } else { + bitSize = GetPrimTypeBitSize(destType); + } + if (regType == kRegTyFloat) { + destType = expr.GetPrimType(); + bitSize = GetPrimTypeBitSize(destType); + } else if (destType == PTY_agg) { + switch (bitSize) { + case k8BitSize: + destType = PTY_u8; + break; + case k16BitSize: + destType = PTY_u16; + break; + case k32BitSize: + destType = PTY_u32; + break; + case k64BitSize: + destType = PTY_u64; + break; + default: + destType = PTY_u64; // when eval agg . a way to round up + break; + } + } + } + + MemOperand *memOpnd = + CreateMemOpndOrNull(destType, expr, *expr.Opnd(0), static_cast(offset) + extraOffset, memOrd); + if (aggParamReg != nullptr) { + isAggParamInReg = false; + return aggParamReg; + } + DEBUG_ASSERT(memOpnd != nullptr, "memOpnd should not be nullptr"); + if (isVolLoad && (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi)) { + memOrd = AArch64isa::kMoAcquire; + isVolLoad = false; + } + + memOpnd = memOpnd->IsOffsetMisaligned(bitSize) ? &ConstraintOffsetToSafeRegion(bitSize, *memOpnd) : memOpnd; + if (memOrd == AArch64isa::kMoNone) { + MOperator mOp = 0; + if (finalBitFieldDestType == kPtyInvalid) { + mOp = PickLdInsn(bitSize, destType); + } else { + mOp = PickLdInsn(GetPrimTypeBitSize(finalBitFieldDestType), finalBitFieldDestType); + } + if ((memOpnd->GetMemVaryType() == kNotVary) && !IsOperandImmValid(mOp, memOpnd, 1)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, bitSize); + } + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, *result, *memOpnd); + if (parent.GetOpCode() == OP_eval && result->IsRegister() && + static_cast(result)->GetRegisterNumber() == RZR) { + insn.SetComment("null-check"); + } + GetCurBB()->AppendInsn(insn); + + if (parent.op != OP_eval) { + const InsnDesc *md = &AArch64CG::kMd[insn.GetMachineOpcode()]; + auto *prop = md->GetOpndDes(0); + if ((prop->GetSize()) < insn.GetOperand(0).GetSize()) { + switch (destType) { + case PTY_i8: + mOp = MOP_xsxtb64; + break; + case PTY_i16: + mOp = MOP_xsxth64; + break; + case PTY_i32: + mOp = MOP_xsxtw64; + break; + case PTY_u8: + mOp = MOP_xuxtb32; + break; + case PTY_u16: + mOp = MOP_xuxth32; + break; + case PTY_u32: + mOp = MOP_xuxtw64; + break; + default: + break; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, insn.GetOperand(0), insn.GetOperand(0))); + } + } + } else { + if ((memOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*memOpnd, bitSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, bitSize); + } + AArch64CGFunc::SelectLoadAcquire(*result, destType, *memOpnd, destType, memOrd, false); + } + GetCurBB()->GetLastInsn()->MarkAsAccessRefField(isRefField); + return result; +} + +Operand *AArch64CGFunc::SelectIntConst(const MIRIntConst &intConst) +{ + return &CreateImmOperand(intConst.GetExtValue(), GetPrimTypeSize(intConst.GetType().GetPrimType()) * kBitsPerByte, + false); +} + +template +Operand *SelectLiteral(T *c, MIRFunction *func, uint32 labelIdx, AArch64CGFunc *cgFunc) +{ + MIRSymbol *st = func->GetSymTab()->CreateSymbol(kScopeLocal); + std::string lblStr(".LB_"); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + std::string funcName = funcSt->GetName(); + lblStr.append(funcName).append(std::to_string(labelIdx)); + st->SetNameStrIdx(lblStr); + st->SetStorageClass(kScPstatic); + st->SetSKind(kStConst); + st->SetKonst(c); + cgFunc->SetLocalSymLabelIndex(*st, labelIdx); + PrimType primType = c->GetType().GetPrimType(); + st->SetTyIdx(TyIdx(primType)); + uint32 typeBitSize = GetPrimTypeBitSize(primType); + + if (cgFunc->GetMirModule().IsCModule() && (T::GetPrimType() == PTY_f32 || T::GetPrimType() == PTY_f64)) { + return static_cast(&cgFunc->GetOrCreateMemOpnd(*st, 0, typeBitSize)); + } + if (T::GetPrimType() == PTY_f32) { + return (fabs(c->GetValue()) < std::numeric_limits::denorm_min()) + ? static_cast( + &cgFunc->CreateImmOperand(Operand::kOpdFPImmediate, 0, static_cast(typeBitSize), false)) + : static_cast(&cgFunc->GetOrCreateMemOpnd(*st, 0, typeBitSize)); + } else if (T::GetPrimType() == PTY_f64) { + return (fabs(c->GetValue()) < std::numeric_limits::denorm_min()) + ? static_cast( + &cgFunc->CreateImmOperand(Operand::kOpdFPImmediate, 0, static_cast(typeBitSize), false)) + : static_cast(&cgFunc->GetOrCreateMemOpnd(*st, 0, typeBitSize)); + } else { + CHECK_FATAL(false, "Unsupported const type"); + } + return nullptr; +} + +Operand *AArch64CGFunc::HandleFmovImm(PrimType stype, int64 val, MIRConst &mirConst, const BaseNode &parent) +{ + Operand *result; + bool is64Bits = (GetPrimTypeBitSize(stype) == k64BitSize); + uint64 canRepreset = is64Bits ? (val & 0xffffffffffff) : (val & 0x7ffff); + uint32 val1 = is64Bits ? (val >> 61) & 0x3 : (val >> 29) & 0x3; + uint32 val2 = is64Bits ? (val >> 54) & 0xff : (val >> 25) & 0x1f; + bool isSame = is64Bits ? ((val2 == 0) || (val2 == 0xff)) : ((val2 == 0) || (val2 == 0x1f)); + canRepreset = (canRepreset == 0) && ((val1 & 0x1) ^ ((val1 & 0x2) >> 1)) && isSame; + if (canRepreset) { + uint64 temp1 = is64Bits ? (val >> 63) << 7 : (val >> 31) << 7; + uint64 temp2 = is64Bits ? val >> 48 : val >> 19; + int64 imm8 = (temp2 & 0x7f) | temp1; + Operand *newOpnd0 = &CreateImmOperand(imm8, k8BitSize, true, kNotVary, true); + result = &GetOrCreateResOperand(parent, stype); + MOperator mopFmov = (is64Bits ? MOP_xdfmovri : MOP_wsfmovri); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *result, *newOpnd0)); + } else { + if (is64Bits) { // For DoubleConst, use ldr .literal + uint32 labelIdxTmp = GetLabelIdx(); + result = SelectLiteral(static_cast(&mirConst), &GetFunction(), labelIdxTmp++, this); + SetLabelIdx(labelIdxTmp); + return result; + } + Operand *newOpnd0 = &CreateImmOperand(val, GetPrimTypeSize(stype) * kBitsPerByte, false); + PrimType itype = (stype == PTY_f32) ? PTY_i32 : PTY_i64; + RegOperand ®Opnd = LoadIntoRegister(*newOpnd0, itype); + + result = &GetOrCreateResOperand(parent, stype); + MOperator mopFmov = (is64Bits ? MOP_xvmovdr : MOP_xvmovsr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *result, regOpnd)); + } + return result; +} + +Operand *AArch64CGFunc::SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) +{ + PrimType stype = floatConst.GetType().GetPrimType(); + int32 val = floatConst.GetIntValue(); + /* according to aarch64 encoding format, convert int to float expression */ + Operand *result; + result = HandleFmovImm(stype, val, floatConst, parent); + return result; +} + +Operand *AArch64CGFunc::SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) +{ + PrimType stype = doubleConst.GetType().GetPrimType(); + int64 val = doubleConst.GetIntValue(); + /* according to aarch64 encoding format, convert int to float expression */ + Operand *result; + result = HandleFmovImm(stype, val, doubleConst, parent); + return result; +} + +template +Operand *SelectStrLiteral(T &c, AArch64CGFunc &cgFunc) +{ + std::string labelStr; + if (c.GetKind() == kConstStrConst) { + labelStr.append(".LUstr_"); + } else if (c.GetKind() == kConstStr16Const) { + labelStr.append(".LUstr16_"); + } else { + CHECK_FATAL(false, "Unsupported literal type"); + } + labelStr.append(std::to_string(c.GetValue())); + + MIRSymbol *labelSym = + GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(labelStr)); + if (labelSym == nullptr) { + labelSym = cgFunc.GetMirModule().GetMIRBuilder()->CreateGlobalDecl(labelStr, c.GetType()); + labelSym->SetStorageClass(kScFstatic); + labelSym->SetSKind(kStConst); + /* c may be local, we need a global node here */ + labelSym->SetKonst(cgFunc.NewMirConst(c)); + } + + if (c.GetPrimType() == PTY_ptr) { + StImmOperand &stOpnd = cgFunc.CreateStImmOperand(*labelSym, 0, 0); + RegOperand &addrOpnd = cgFunc.CreateRegisterOperandOfType(PTY_a64); + cgFunc.SelectAddrof(addrOpnd, stOpnd); + return &addrOpnd; + } + CHECK_FATAL(false, "Unsupported const string type"); + return nullptr; +} + +Operand *AArch64CGFunc::SelectStrConst(MIRStrConst &strConst) +{ + return SelectStrLiteral(strConst, *this); +} + +Operand *AArch64CGFunc::SelectStr16Const(MIRStr16Const &str16Const) +{ + return SelectStrLiteral(str16Const, *this); +} + +static inline void AppendInstructionTo(Insn &i, CGFunc &f) +{ + f.GetCurBB()->AppendInsn(i); +} + +/* + * Returns the number of leading 0-bits in x, starting at the most significant bit position. + * If x is 0, the result is -1. + */ +static int32 GetHead0BitNum(int64 val) +{ + uint32 bitNum = 0; + for (; bitNum < k64BitSize; bitNum++) { + if ((0x8000000000000000ULL >> static_cast(bitNum)) & static_cast(val)) { + break; + } + } + if (bitNum == k64BitSize) { + return -1; + } + return bitNum; +} + +/* + * Returns the number of trailing 0-bits in x, starting at the least significant bit position. + * If x is 0, the result is -1. + */ +static int32 GetTail0BitNum(int64 val) +{ + uint32 bitNum = 0; + for (; bitNum < k64BitSize; bitNum++) { + if ((static_cast(1) << static_cast(bitNum)) & static_cast(val)) { + break; + } + } + if (bitNum == k64BitSize) { + return -1; + } + return bitNum; +} + +/* + * If the input integer is power of 2, return log2(input) + * else return -1 + */ +static inline int32 GetLog2(uint64 val) +{ + if (__builtin_popcountll(val) == 1) { + return __builtin_ffsll(static_cast(val)) - 1; + } + return -1; +} + +MOperator AArch64CGFunc::PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSigned) const +{ + switch (cmpOp) { + case OP_ne: + return (brOp == OP_brtrue) ? MOP_bne : MOP_beq; + case OP_eq: + return (brOp == OP_brtrue) ? MOP_beq : MOP_bne; + case OP_lt: + return (brOp == OP_brtrue) ? (isSigned ? MOP_blt : MOP_blo) + : (isFloat ? MOP_bpl : (isSigned ? MOP_bge : MOP_bhs)); + case OP_le: + return (brOp == OP_brtrue) ? (isSigned ? MOP_ble : MOP_bls) + : (isFloat ? MOP_bhi : (isSigned ? MOP_bgt : MOP_bhi)); + case OP_gt: + return (brOp == OP_brtrue) ? (isFloat ? MOP_bgt : (isSigned ? MOP_bgt : MOP_bhi)) + : (isSigned ? MOP_ble : MOP_bls); + case OP_ge: + return (brOp == OP_brtrue) ? (isFloat ? MOP_bpl : (isSigned ? MOP_bge : MOP_bhs)) + : (isSigned ? MOP_blt : MOP_blo); + default: + CHECK_FATAL(false, "PickJmpInsn error"); + } +} + +bool AArch64CGFunc::GenerateCompareWithZeroInstruction(Opcode jmpOp, Opcode cmpOp, bool is64Bits, PrimType primType, + LabelOperand &targetOpnd, Operand &opnd0) +{ + bool finish = true; + MOperator mOpCode = MOP_undef; + switch (cmpOp) { + case OP_ne: { + if (jmpOp == OP_brtrue) { + mOpCode = is64Bits ? MOP_xcbnz : MOP_wcbnz; + } else { + mOpCode = is64Bits ? MOP_xcbz : MOP_wcbz; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, targetOpnd)); + break; + } + case OP_eq: { + if (jmpOp == OP_brtrue) { + mOpCode = is64Bits ? MOP_xcbz : MOP_wcbz; + } else { + mOpCode = is64Bits ? MOP_xcbnz : MOP_wcbnz; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, targetOpnd)); + break; + } + /* + * TBZ/TBNZ instruction have a range of +/-32KB, need to check if the jump target is reachable in a later + * phase. If the branch target is not reachable, then we change tbz/tbnz into combination of ubfx and + * cbz/cbnz, which will clobber one extra register. With LSRA under O2, we can use of the reserved registers + * for that purpose. + */ + case OP_lt: { + if (primType == PTY_u64 || primType == PTY_u32) { + return false; + } + ImmOperand &signBit = + CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits, k8BitSize, false); + if (jmpOp == OP_brtrue) { + mOpCode = is64Bits ? MOP_xtbnz : MOP_wtbnz; + } else { + mOpCode = is64Bits ? MOP_xtbz : MOP_wtbz; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, signBit, targetOpnd)); + break; + } + case OP_ge: { + if (primType == PTY_u64 || primType == PTY_u32) { + return false; + } + ImmOperand &signBit = + CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits, k8BitSize, false); + if (jmpOp == OP_brtrue) { + mOpCode = is64Bits ? MOP_xtbz : MOP_wtbz; + } else { + mOpCode = is64Bits ? MOP_xtbnz : MOP_wtbnz; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, signBit, targetOpnd)); + break; + } + default: + finish = false; + break; + } + return finish; +} + +void AArch64CGFunc::SelectIgoto(Operand *opnd0) +{ + Operand *srcOpnd = opnd0; + if (opnd0->GetKind() == Operand::kOpdMem) { + Operand *dst = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xldr, *dst, *opnd0)); + srcOpnd = dst; + } + GetCurBB()->SetKind(BB::kBBIgoto); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xbr, *srcOpnd)); +} + +void AArch64CGFunc::SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcode cmpOp, Operand &origOpnd0, + Operand &origOpnd1, PrimType primType, bool signedCond) +{ + Operand *opnd0 = &origOpnd0; + Operand *opnd1 = &origOpnd1; + opnd0 = &LoadIntoRegister(origOpnd0, primType); + + bool is64Bits = GetPrimTypeBitSize(primType) == k64BitSize; + bool isFloat = IsPrimitiveFloat(primType); + Operand &rflag = GetOrCreateRflag(); + if (isFloat) { + opnd1 = &LoadIntoRegister(origOpnd1, primType); + MOperator mOp = + is64Bits ? MOP_dcmperr : ((GetPrimTypeBitSize(primType) == k32BitSize) ? MOP_scmperr : MOP_hcmperr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, rflag, *opnd0, *opnd1)); + } else { + bool isImm = ((origOpnd1.GetKind() == Operand::kOpdImmediate) || (origOpnd1.GetKind() == Operand::kOpdOffset)); + if ((origOpnd1.GetKind() != Operand::kOpdRegister) && !isImm) { + opnd1 = &SelectCopy(origOpnd1, primType, primType); + } + MOperator mOp = is64Bits ? MOP_xcmprr : MOP_wcmprr; + + if (isImm) { + /* Special cases, i.e., comparing with zero + * Do not perform optimization for C, unlike Java which has no unsigned int. + */ + if (static_cast(opnd1)->IsZero() && + (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0)) { + bool finish = GenerateCompareWithZeroInstruction(jmpOp, cmpOp, is64Bits, primType, targetOpnd, *opnd0); + if (finish) { + return; + } + } + + /* + * aarch64 assembly takes up to 24-bits immediate, generating + * either cmp or cmp with shift 12 encoding + */ + ImmOperand *immOpnd = static_cast(opnd1); + if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) { + mOp = is64Bits ? MOP_xcmpri : MOP_wcmpri; + } else { + opnd1 = &SelectCopy(*opnd1, primType, primType); + } + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, rflag, *opnd0, *opnd1)); + } + + bool isSigned = IsPrimitiveInteger(primType) ? IsSignedInteger(primType) : (signedCond ? true : false); + MOperator jmpOperator = PickJmpInsn(jmpOp, cmpOp, isFloat, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(jmpOperator, rflag, targetOpnd)); +} + +/* + * brtrue @label0 (ge u8 i32 ( + * cmp i32 i64 (dread i64 %Reg2_J, dread i64 %Reg4_J), + * constval i32 0)) + * ===> + * cmp r1, r2 + * bge Cond, label0 + */ +void AArch64CGFunc::SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &expr) +{ + DEBUG_ASSERT(expr.GetOpCode() == OP_cmp, "unexpect opcode"); + Operand *opnd0 = HandleExpr(expr, *expr.Opnd(0)); + Operand *opnd1 = HandleExpr(expr, *expr.Opnd(1)); + CompareNode *node = static_cast(&expr); + bool isFloat = IsPrimitiveFloat(node->GetOpndType()); + opnd0 = &LoadIntoRegister(*opnd0, node->GetOpndType()); + /* + * most of FP constants are passed as MemOperand + * except 0.0 which is passed as kOpdFPImmediate + */ + Operand::OperandType opnd1Type = opnd1->GetKind(); + if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) && + (opnd1Type != Operand::kOpdOffset)) { + opnd1 = &LoadIntoRegister(*opnd1, node->GetOpndType()); + } + SelectAArch64Cmp(*opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(node->GetOpndType())); + /* handle condgoto now. */ + LabelIdx labelIdx = stmt.GetOffset(); + BaseNode *condNode = stmt.Opnd(0); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(labelIdx); + Opcode cmpOp = condNode->GetOpCode(); + PrimType pType = static_cast(condNode)->GetOpndType(); + isFloat = IsPrimitiveFloat(pType); + Operand &rflag = GetOrCreateRflag(); + bool isSigned = + IsPrimitiveInteger(pType) ? IsSignedInteger(pType) : (IsSignedInteger(condNode->GetPrimType()) ? true : false); + MOperator jmpOp = PickJmpInsn(stmt.GetOpCode(), cmpOp, isFloat, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(jmpOp, rflag, targetOpnd)); +} + +/* + * Special case: + * brfalse(ge (cmpg (op0, op1), 0) ==> + * fcmp op1, op2 + * blo + */ +void AArch64CGFunc::SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &expr) +{ + auto &cmpNode = static_cast(expr); + Operand *opnd0 = HandleExpr(cmpNode, *cmpNode.Opnd(0)); + Operand *opnd1 = HandleExpr(cmpNode, *cmpNode.Opnd(1)); + PrimType operandType = cmpNode.GetOpndType(); + opnd0 = opnd0->IsRegister() ? static_cast(opnd0) : &SelectCopy(*opnd0, operandType, operandType); + Operand::OperandType opnd1Type = opnd1->GetKind(); + if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) && + (opnd1Type != Operand::kOpdOffset)) { + opnd1 = opnd1->IsRegister() ? static_cast(opnd1) : &SelectCopy(*opnd1, operandType, operandType); + } +#ifdef DEBUG + bool isFloat = IsPrimitiveFloat(operandType); + if (!isFloat) { + DEBUG_ASSERT(false, "incorrect operand types"); + } +#endif + SelectTargetFPCmpQuiet(*opnd0, *opnd1, GetPrimTypeBitSize(operandType)); + Operand &rFlag = GetOrCreateRflag(); + LabelIdx tempLabelIdx = stmt.GetOffset(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(tempLabelIdx); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_blo, rFlag, targetOpnd)); +} + +void AArch64CGFunc::SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) +{ + /* + * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node + * such as a dread for example + */ + LabelIdx labelIdx = stmt.GetOffset(); + BaseNode *condNode = stmt.Opnd(0); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(labelIdx); + Opcode cmpOp; + + if (opnd0.IsRegister() && (static_cast(&opnd0)->GetValidBitsNum() == 1) && + (condNode->GetOpCode() == OP_lior)) { + ImmOperand &condBit = CreateImmOperand(0, k8BitSize, false); + if (stmt.GetOpCode() == OP_brtrue) { + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(MOP_wtbnz, static_cast(opnd0), condBit, targetOpnd)); + } else { + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(MOP_wtbz, static_cast(opnd0), condBit, targetOpnd)); + } + return; + } + + PrimType pType; + if (kOpcodeInfo.IsCompare(condNode->GetOpCode())) { + cmpOp = condNode->GetOpCode(); + pType = static_cast(condNode)->GetOpndType(); + } else { + /* not a compare node; dread for example, take its pType */ + cmpOp = OP_ne; + pType = condNode->GetPrimType(); + } + bool signedCond = IsSignedInteger(pType) || IsPrimitiveFloat(pType); + SelectCondGoto(targetOpnd, stmt.GetOpCode(), cmpOp, opnd0, opnd1, pType, signedCond); +} + +void AArch64CGFunc::SelectGoto(GotoNode &stmt) +{ + Operand &targetOpnd = GetOrCreateLabelOperand(stmt.GetOffset()); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); + GetCurBB()->SetKind(BB::kBBGoto); +} + +Operand *AArch64CGFunc::SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + /* promoted type */ + PrimType primType = + isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + if (parent.GetOpCode() == OP_regassign) { + auto ®AssignNode = static_cast(parent); + PregIdx pregIdx = regAssignNode.GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, dtype); + } else { + resOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + } + } else { + resOpnd = &CreateRegisterOperandOfType(primType); + } + SelectAdd(*resOpnd, opnd0, opnd1, primType); + } else { + /* vector operands */ + resOpnd = + SelectVectorBinOp(dtype, &opnd0, node.Opnd(0)->GetPrimType(), &opnd1, node.Opnd(1)->GetPrimType(), OP_add); + } + return resOpnd; +} + +void AArch64CGFunc::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + Operand::OperandType opnd0Type = opnd0.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + if (opnd0Type != Operand::kOpdRegister) { + /* add #imm, #imm */ + if (opnd1Type != Operand::kOpdRegister) { + SelectAdd(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType); + return; + } + /* add #imm, reg */ + SelectAdd(resOpnd, opnd1, opnd0, primType); /* commutative */ + return; + } + /* add reg, reg */ + if (opnd1Type == Operand::kOpdRegister) { + DEBUG_ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI add"); + MOperator mOp = + IsPrimitiveFloat(primType) ? (is64Bits ? MOP_dadd : MOP_sadd) : (is64Bits ? MOP_xaddrrr : MOP_waddrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + return; + } else if (!((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset))) { + /* add reg, otheregType */ + SelectAdd(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType); + return; + } else { + /* add reg, #imm */ + ImmOperand *immOpnd = static_cast(&opnd1); + if (immOpnd->IsNegative()) { + immOpnd->Negate(); + SelectSub(resOpnd, opnd0, *immOpnd, primType); + return; + } + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { + /* + * ADD Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers + * ADD Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers + * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 + * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 + */ + MOperator mOpCode = MOP_undef; + Operand *newOpnd0 = &opnd0; + if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + /* process higher 12 bits */ + ImmOperand &immOpnd2 = + CreateImmOperand(static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits), + immOpnd->GetSize(), immOpnd->IsSignedValue()); + mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24; + Operand *tmpRes = IsAfterRegAlloc() ? &resOpnd : &CreateRegisterOperandOfType(primType); + BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, *tmpRes, opnd0, immOpnd2, shiftopnd); + GetCurBB()->AppendInsn(newInsn); + immOpnd->ModuloByPow2(static_cast(kMaxImmVal12Bits)); + newOpnd0 = tmpRes; + } + /* process lower 12 bits */ + mOpCode = is64Bits ? MOP_xaddrri12 : MOP_waddrri12; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *newOpnd0, *immOpnd); + GetCurBB()->AppendInsn(newInsn); + return; + } + /* load into register */ + int64 immVal = immOpnd->GetValue(); + int32 tail0bitNum = GetTail0BitNum(immVal); + int32 head0bitNum = GetHead0BitNum(immVal); + const int32 bitNum = (k64BitSizeInt - head0bitNum) - tail0bitNum; + RegOperand ®Opnd = CreateRegisterOperandOfType(primType); + if (isAfterRegAlloc) { + RegType regty = GetRegTyFromPrimTy(primType); + uint32 bytelen = GetPrimTypeSize(primType); + regOpnd = GetOrCreatePhysicalRegisterOperand(static_cast(R16), bytelen, regty); + } + regno_t regNO0 = static_cast(opnd0).GetRegisterNumber(); + /* addrrrs do not support sp */ + if (bitNum <= k16ValidBit && regNO0 != RSP) { + int64 newImm = (static_cast(immVal) >> static_cast(tail0bitNum)) & 0xFFFF; + ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); + SelectCopyImm(regOpnd, immOpnd1, primType); + uint32 mopBadd = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs; + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + BitShiftOperand &bitShiftOpnd = + CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast(tail0bitNum), bitLen); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mopBadd, resOpnd, opnd0, regOpnd, bitShiftOpnd); + GetCurBB()->AppendInsn(newInsn); + return; + } + + SelectCopyImm(regOpnd, *immOpnd, primType); + MOperator mOpCode = is64Bits ? MOP_xaddrrr : MOP_waddrrr; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, regOpnd); + GetCurBB()->AppendInsn(newInsn); + } +} + +Operand *AArch64CGFunc::SelectMadd(BinaryNode &node, Operand &opndM0, Operand &opndM1, Operand &opnd1, + const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + /* promoted type */ + PrimType primType = is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32); + RegOperand &resOpnd = GetOrCreateResOperand(parent, primType); + SelectMadd(resOpnd, opndM0, opndM1, opnd1, primType); + return &resOpnd; +} + +void AArch64CGFunc::SelectMadd(Operand &resOpnd, Operand &opndM0, Operand &opndM1, Operand &opnd1, PrimType primType) +{ + Operand::OperandType opndM0Type = opndM0.GetKind(); + Operand::OperandType opndM1Type = opndM1.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + + if (opndM0Type != Operand::kOpdRegister) { + SelectMadd(resOpnd, SelectCopy(opndM0, primType, primType), opndM1, opnd1, primType); + return; + } else if (opndM1Type != Operand::kOpdRegister) { + SelectMadd(resOpnd, opndM0, SelectCopy(opndM1, primType, primType), opnd1, primType); + return; + } else if (opnd1Type != Operand::kOpdRegister) { + SelectMadd(resOpnd, opndM0, opndM1, SelectCopy(opnd1, primType, primType), primType); + return; + } + + DEBUG_ASSERT(IsPrimitiveInteger(primType), "NYI MAdd"); + MOperator mOp = is64Bits ? MOP_xmaddrrrr : MOP_wmaddrrrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opndM0, opndM1, opnd1)); +} + +Operand &AArch64CGFunc::SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) +{ + BaseNode *opnd0 = node.Opnd(0); + BaseNode *opnd1 = node.Opnd(1); + DEBUG_ASSERT(opnd1->GetOpCode() == OP_constval, "Internal error, opnd1->op should be OP_constval."); + + switch (opnd0->op) { + case OP_regread: { + RegreadNode *regreadNode = static_cast(opnd0); + return *SelectRegread(*regreadNode); + } + case OP_addrof: { + AddrofNode *addrofNode = static_cast(opnd0); + MIRSymbol &symbol = *mirModule.CurFunction()->GetLocalOrGlobalSymbol(addrofNode->GetStIdx()); + DEBUG_ASSERT(addrofNode->GetFieldID() == 0, "For debug SelectCGArrayElemAdd."); + + Operand &result = GetOrCreateResOperand(parent, PTY_a64); + + /* OP_constval */ + ConstvalNode *constvalNode = static_cast(opnd1); + MIRConst *mirConst = constvalNode->GetConstVal(); + MIRIntConst *mirIntConst = static_cast(mirConst); + SelectAddrof(result, CreateStImmOperand(symbol, mirIntConst->GetExtValue(), 0)); + + return result; + } + default: + CHECK_FATAL(0, "Internal error, cannot handle opnd0."); + } +} + +void AArch64CGFunc::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(primType); + Operand *opnd0Bak = &LoadIntoRegister(opnd0, primType); + if (opnd1Type == Operand::kOpdRegister) { + MOperator mOp = isFloat ? (is64Bits ? MOP_dsub : MOP_ssub) : (is64Bits ? MOP_xsubrrr : MOP_wsubrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, *opnd0Bak, opnd1)); + return; + } + + if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdOffset)) { + SelectSub(resOpnd, *opnd0Bak, SelectCopy(opnd1, primType, primType), primType); + return; + } + + ImmOperand *immOpnd = static_cast(&opnd1); + if (immOpnd->IsNegative()) { + immOpnd->Negate(); + SelectAdd(resOpnd, *opnd0Bak, *immOpnd, primType); + return; + } + + int64 higher12BitVal = static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits); + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0) && higher12BitVal + 1 <= kMaxPimm8) { + /* + * SUB Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers + * SUB Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers + * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 + * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 + * large offset is treated as sub (higher 12 bits + 4096) + add + * it gives opportunities for combining add + ldr due to the characteristics of aarch64's load/store + */ + MOperator mOpCode = MOP_undef; + bool isSplitSub = false; + if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + isSplitSub = true; + /* process higher 12 bits */ + ImmOperand &immOpnd2 = CreateImmOperand(higher12BitVal + 1, immOpnd->GetSize(), immOpnd->IsSignedValue()); + + mOpCode = is64Bits ? MOP_xsubrri24 : MOP_wsubrri24; + BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, immOpnd2, shiftopnd); + GetCurBB()->AppendInsn(newInsn); + immOpnd->ModuloByPow2(static_cast(kMaxImmVal12Bits)); + immOpnd->SetValue(static_cast(kMax12UnsignedImm) - immOpnd->GetValue()); + opnd0Bak = &resOpnd; + } + /* process lower 12 bits */ + mOpCode = isSplitSub ? (is64Bits ? MOP_xaddrri12 : MOP_waddrri12) : (is64Bits ? MOP_xsubrri12 : MOP_wsubrri12); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, *immOpnd); + GetCurBB()->AppendInsn(newInsn); + return; + } + + /* load into register */ + int64 immVal = immOpnd->GetValue(); + int32 tail0bitNum = GetTail0BitNum(immVal); + int32 head0bitNum = GetHead0BitNum(immVal); + const int32 bitNum = (k64BitSizeInt - head0bitNum) - tail0bitNum; + RegOperand ®Opnd = CreateRegisterOperandOfType(primType); + if (isAfterRegAlloc) { + RegType regty = GetRegTyFromPrimTy(primType); + uint32 bytelen = GetPrimTypeSize(primType); + regOpnd = GetOrCreatePhysicalRegisterOperand(static_cast(R16), bytelen, regty); + } + + if (bitNum <= k16ValidBit) { + int64 newImm = (static_cast(immVal) >> static_cast(tail0bitNum)) & 0xFFFF; + ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); + SelectCopyImm(regOpnd, immOpnd1, primType); + uint32 mopBsub = is64Bits ? MOP_xsubrrrs : MOP_wsubrrrs; + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + BitShiftOperand &bitShiftOpnd = + CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast(tail0bitNum), bitLen); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBsub, resOpnd, *opnd0Bak, regOpnd, bitShiftOpnd)); + return; + } + + SelectCopyImm(regOpnd, *immOpnd, primType); + MOperator mOpCode = is64Bits ? MOP_xsubrrr : MOP_wsubrrr; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, regOpnd); + GetCurBB()->AppendInsn(newInsn); +} + +Operand *AArch64CGFunc::SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + /* promoted type */ + PrimType primType = + isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + resOpnd = &GetOrCreateResOperand(parent, primType); + SelectSub(*resOpnd, opnd0, opnd1, primType); + } else { + /* vector operands */ + resOpnd = + SelectVectorBinOp(dtype, &opnd0, node.Opnd(0)->GetPrimType(), &opnd1, node.Opnd(1)->GetPrimType(), OP_sub); + } + return resOpnd; +} + +Operand *AArch64CGFunc::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + /* promoted type */ + PrimType primType = + isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + resOpnd = &GetOrCreateResOperand(parent, primType); + SelectMpy(*resOpnd, opnd0, opnd1, primType); + } else { + resOpnd = + SelectVectorBinOp(dtype, &opnd0, node.Opnd(0)->GetPrimType(), &opnd1, node.Opnd(1)->GetPrimType(), OP_mul); + } + return resOpnd; +} + +void AArch64CGFunc::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + Operand::OperandType opnd0Type = opnd0.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + + if (((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset) || + (opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) && + IsPrimitiveInteger(primType)) { + ImmOperand *imm = ((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset)) + ? static_cast(&opnd0) + : static_cast(&opnd1); + Operand *otherOp = + ((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset)) ? &opnd1 : &opnd0; + int64 immValue = llabs(imm->GetValue()); + if (immValue != 0 && (static_cast(immValue) & (static_cast(immValue) - 1)) == 0) { + /* immValue is 1 << n */ + if (otherOp->GetKind() != Operand::kOpdRegister) { + otherOp = &SelectCopy(*otherOp, primType, primType); + } + int64 shiftVal = __builtin_ffsll(immValue); + ImmOperand &shiftNum = CreateImmOperand(shiftVal - 1, dsize, false); + SelectShift(resOpnd, *otherOp, shiftNum, kShiftLeft, primType); + bool reachSignBit = (is64Bits && (shiftVal == k64BitSize)) || (!is64Bits && (shiftVal == k32BitSize)); + if (imm->GetValue() < 0 && !reachSignBit) { + SelectNeg(resOpnd, resOpnd, primType); + } + + return; + } else if (immValue > 2) { + uint32 zeroNum = __builtin_ffsll(immValue) - 1; + int64 headVal = static_cast(immValue) >> zeroNum; + /* + * if (headVal - 1) & (headVal - 2) == 0, that is (immVal >> zeroNum) - 1 == 1 << n + * otherOp * immVal = (otherOp * (immVal >> zeroNum) * (1 << zeroNum) + * = (otherOp * ((immVal >> zeroNum) - 1) + otherOp) * (1 << zeroNum) + */ + if (((static_cast(headVal) - 1) & (static_cast(headVal) - 2)) == 0) { + if (otherOp->GetKind() != Operand::kOpdRegister) { + otherOp = &SelectCopy(*otherOp, primType, primType); + } + ImmOperand &shiftNum1 = CreateImmOperand(__builtin_ffsll(headVal - 1) - 1, dsize, false); + RegOperand &tmpOpnd = CreateRegisterOperandOfType(primType); + SelectShift(tmpOpnd, *otherOp, shiftNum1, kShiftLeft, primType); + SelectAdd(resOpnd, *otherOp, tmpOpnd, primType); + ImmOperand &shiftNum2 = CreateImmOperand(zeroNum, dsize, false); + SelectShift(resOpnd, resOpnd, shiftNum2, kShiftLeft, primType); + if (imm->GetValue() < 0) { + SelectNeg(resOpnd, resOpnd, primType); + } + + return; + } + } + } + + if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) { + SelectMpy(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType); + } else if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) { + SelectMpy(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType); + } else if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) { + SelectMpy(resOpnd, opnd1, opnd0, primType); + } else { + DEBUG_ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI Mpy"); + MOperator mOp = + IsPrimitiveFloat(primType) ? (is64Bits ? MOP_xvmuld : MOP_xvmuls) : (is64Bits ? MOP_xmulrrr : MOP_wmulrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + } +} + +void AArch64CGFunc::SelectDiv(Operand &resOpnd, Operand &origOpnd0, Operand &opnd1, PrimType primType) +{ + Operand &opnd0 = LoadIntoRegister(origOpnd0, primType); + Operand::OperandType opnd0Type = opnd0.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + if (((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) && + IsSignedInteger(primType)) { + ImmOperand *imm = static_cast(&opnd1); + int64 immValue = llabs(imm->GetValue()); + if ((immValue != 0) && (static_cast(immValue) & (static_cast(immValue) - 1)) == 0) { + if (immValue == 1) { + if (imm->GetValue() > 0) { + uint32 mOp = is64Bits ? MOP_xmovrr : MOP_wmovrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0)); + } else { + SelectNeg(resOpnd, opnd0, primType); + } + + return; + } + int32 shiftNumber = __builtin_ffsll(immValue) - 1; + ImmOperand &shiftNum = CreateImmOperand(shiftNumber, dsize, false); + Operand &tmpOpnd = CreateRegisterOperandOfType(primType); + SelectShift(tmpOpnd, opnd0, CreateImmOperand(dsize - 1, dsize, false), kShiftAright, primType); + uint32 mopBadd = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs; + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + BitShiftOperand &shiftOpnd = CreateBitShiftOperand(BitShiftOperand::kLSR, dsize - shiftNumber, bitLen); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBadd, tmpOpnd, opnd0, tmpOpnd, shiftOpnd)); + SelectShift(resOpnd, tmpOpnd, shiftNum, kShiftAright, primType); + if (imm->GetValue() < 0) { + SelectNeg(resOpnd, resOpnd, primType); + } + + return; + } + } else if (((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) && + IsUnsignedInteger(primType)) { + ImmOperand *imm = static_cast(&opnd1); + if (imm->GetValue() != 0) { + if ((imm->GetValue() > 0) && + ((static_cast(imm->GetValue()) & (static_cast(imm->GetValue()) - 1)) == 0)) { + ImmOperand &shiftNum = CreateImmOperand(__builtin_ffsll(imm->GetValue()) - 1, dsize, false); + SelectShift(resOpnd, opnd0, shiftNum, kShiftLright, primType); + + return; + } else if (imm->GetValue() < 0) { + SelectAArch64Cmp(opnd0, *imm, true, dsize); + SelectAArch64CSet(resOpnd, GetCondOperand(CC_CS), is64Bits); + + return; + } + } + } + } + + if (opnd0Type != Operand::kOpdRegister) { + SelectDiv(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType); + } else if (opnd1Type != Operand::kOpdRegister) { + SelectDiv(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType); + } else { + DEBUG_ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI Div"); + MOperator mOp = IsPrimitiveFloat(primType) + ? (is64Bits ? MOP_ddivrrr : MOP_sdivrrr) + : (IsSignedInteger(primType) ? (is64Bits ? MOP_xsdivrrr : MOP_wsdivrrr) + : (is64Bits ? MOP_xudivrrr : MOP_wudivrrr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + } +} + +Operand *AArch64CGFunc::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + CHECK_FATAL(!IsPrimitiveVector(dtype), "NYI DIV vector operands"); + /* promoted type */ + PrimType primType = + isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + RegOperand &resOpnd = GetOrCreateResOperand(parent, primType); + SelectDiv(resOpnd, opnd0, opnd1, primType); + return &resOpnd; +} + +void AArch64CGFunc::SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, PrimType primType, bool isSigned, + bool is64Bits) +{ + Operand &opnd0 = LoadIntoRegister(lhsOpnd, primType); + Operand &opnd1 = LoadIntoRegister(rhsOpnd, primType); + + DEBUG_ASSERT(IsPrimitiveInteger(primType), "Wrong type for REM"); + /* + * printf("%d \n", 29 % 7 ); + * -> 1 + * printf("%u %d \n", (unsigned)-7, (unsigned)(-7) % 7 ); + * -> 4294967289 4 + * printf("%d \n", (-7) % 7 ); + * -> 0 + * printf("%d \n", 237 % -7 ); + * 6-> + * printf("implicit i->u conversion %d \n", ((unsigned)237) % -7 ); + * implicit conversion 237 + + * http://stackoverflow.com/questions/35351470/obtaining-remainder-using-single-aarch64-instruction + * input: x0=dividend, x1=divisor + * udiv|sdiv x2, x0, x1 + * msub x3, x2, x1, x0 -- multply-sub : x3 <- x0 - x2*x1 + * result: x2=quotient, x3=remainder + * + * allocate temporary register + */ + RegOperand &temp = CreateRegisterOperandOfType(primType); + /* + * mov w1, #2 + * sdiv wTemp, w0, w1 + * msub wRespond, wTemp, w1, w0 + * ========> + * asr wTemp, w0, #31 + * lsr wTemp, wTemp, #31 (#30 for 4, #29 for 8, ...) + * add wRespond, w0, wTemp + * and wRespond, wRespond, #1 (#3 for 4, #7 for 8, ...) + * sub wRespond, wRespond, w2 + * + * if divde by 2 + * ========> + * lsr wTemp, w0, #31 + * add wRespond, w0, wTemp + * and wRespond, wRespond, #1 + * sub wRespond, wRespond, w2 + * + * for unsigned rem op, just use and + */ + if ((Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2)) { + ImmOperand *imm = nullptr; + Insn *movImmInsn = GetCurBB()->GetLastInsn(); + if (movImmInsn && + ((movImmInsn->GetMachineOpcode() == MOP_wmovri32) || (movImmInsn->GetMachineOpcode() == MOP_xmovri64)) && + movImmInsn->GetOperand(0).Equals(opnd1)) { + /* + * mov w1, #2 + * rem res, w0, w1 + */ + imm = static_cast(&movImmInsn->GetOperand(kInsnSecondOpnd)); + } else if (opnd1.IsImmediate()) { + /* + * rem res, w0, #2 + */ + imm = static_cast(&opnd1); + } + /* positive or negative do not have effect on the result */ + int64 dividor = 0; + if (imm && (imm->GetValue() != LONG_MIN)) { + dividor = abs(imm->GetValue()); + } + const int64 Log2OfDividor = GetLog2(static_cast(dividor)); + if ((dividor != 0) && (Log2OfDividor > 0)) { + if (is64Bits) { + CHECK_FATAL(Log2OfDividor < k64BitSize, "imm out of bound"); + if (isSigned) { + ImmOperand &rightShiftValue = CreateImmOperand(k64BitSize - Log2OfDividor, k64BitSize, isSigned); + if (Log2OfDividor != 1) { + /* 63->shift ALL , 32 ->32bit register */ + ImmOperand &rightShiftAll = CreateImmOperand(63, k64BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xasrrri6, temp, opnd0, rightShiftAll)); + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xlsrrri6, temp, temp, rightShiftValue)); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xlsrrri6, temp, opnd0, rightShiftValue)); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrrr, resOpnd, opnd0, temp)); + ImmOperand &remBits = CreateImmOperand(dividor - 1, k64BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, resOpnd, resOpnd, remBits)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xsubrrr, resOpnd, resOpnd, temp)); + return; + } else if (imm && imm->GetValue() > 0) { + ImmOperand &remBits = CreateImmOperand(dividor - 1, k64BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, resOpnd, opnd0, remBits)); + return; + } + } else { + CHECK_FATAL(Log2OfDividor < k32BitSize, "imm out of bound"); + if (isSigned) { + ImmOperand &rightShiftValue = CreateImmOperand(k32BitSize - Log2OfDividor, k32BitSize, isSigned); + if (Log2OfDividor != 1) { + /* 31->shift ALL , 32 ->32bit register */ + ImmOperand &rightShiftAll = CreateImmOperand(31, k32BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wasrrri5, temp, opnd0, rightShiftAll)); + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wlsrrri5, temp, temp, rightShiftValue)); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wlsrrri5, temp, opnd0, rightShiftValue)); + } + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_waddrrr, resOpnd, opnd0, temp)); + ImmOperand &remBits = CreateImmOperand(dividor - 1, k32BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, resOpnd, resOpnd, remBits)); + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wsubrrr, resOpnd, resOpnd, temp)); + return; + } else if (imm && imm->GetValue() > 0) { + ImmOperand &remBits = CreateImmOperand(dividor - 1, k32BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, resOpnd, opnd0, remBits)); + return; + } + } + } + } + + uint32 mopDiv = is64Bits ? (isSigned ? MOP_xsdivrrr : MOP_xudivrrr) : (isSigned ? MOP_wsdivrrr : MOP_wudivrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopDiv, temp, opnd0, opnd1)); + + uint32 mopSub = is64Bits ? MOP_xmsubrrrr : MOP_wmsubrrrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopSub, resOpnd, temp, opnd1, opnd0)); +} + +Operand *AArch64CGFunc::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + DEBUG_ASSERT(IsPrimitiveInteger(dtype), "wrong type for rem"); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + CHECK_FATAL(!IsPrimitiveVector(dtype), "NYI DIV vector operands"); + + /* promoted type */ + PrimType primType = ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + RegOperand &resOpnd = GetOrCreateResOperand(parent, primType); + SelectRem(resOpnd, opnd0, opnd1, primType, isSigned, is64Bits); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectLand(BinaryNode &node, Operand &lhsOpnd, Operand &rhsOpnd, const BaseNode &parent) +{ + PrimType primType = node.GetPrimType(); + DEBUG_ASSERT(IsPrimitiveInteger(primType), "Land should be integer type"); + bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize); + RegOperand &resOpnd = GetOrCreateResOperand(parent, is64Bits ? PTY_u64 : PTY_u32); + /* + * OP0 band Op1 + * cmp OP0, 0 # compare X0 with 0, sets Z bit + * ccmp OP1, 0, 4 //==0100b, ne # if(OP0!=0) cmp Op1 and 0, else NZCV <- 0100 makes OP0==0 + * cset RES, ne # if Z==1(i.e., OP0==0||OP1==0) RES<-0, RES<-1 + */ + Operand &opnd0 = LoadIntoRegister(lhsOpnd, primType); + SelectAArch64Cmp(opnd0, CreateImmOperand(0, primType, false), true, GetPrimTypeBitSize(primType)); + Operand &opnd1 = LoadIntoRegister(rhsOpnd, primType); + SelectAArch64CCmp(opnd1, CreateImmOperand(0, primType, false), CreateImmOperand(4, PTY_u8, false), + GetCondOperand(CC_NE), is64Bits); + SelectAArch64CSet(resOpnd, GetCondOperand(CC_NE), is64Bits); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectLor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent, + bool parentIsBr) +{ + PrimType primType = node.GetPrimType(); + DEBUG_ASSERT(IsPrimitiveInteger(primType), "Lior should be integer type"); + bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize); + RegOperand &resOpnd = GetOrCreateResOperand(parent, is64Bits ? PTY_u64 : PTY_u32); + /* + * OP0 band Op1 + * cmp OP0, 0 # compare X0 with 0, sets Z bit + * ccmp OP1, 0, 0 //==0100b, eq # if(OP0==0,eq) cmp Op1 and 0, else NZCV <- 0000 makes OP0!=0 + * cset RES, ne # if Z==1(i.e., OP0==0&&OP1==0) RES<-0, RES<-1 + */ + if (parentIsBr && !is64Bits && opnd0.IsRegister() && (static_cast(&opnd0)->GetValidBitsNum() == 1) && + opnd1.IsRegister() && (static_cast(&opnd1)->GetValidBitsNum() == 1)) { + uint32 mOp = MOP_wiorrrr; + static_cast(resOpnd).SetValidBitsNum(1); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + } else { + SelectBior(resOpnd, opnd0, opnd1, primType); + SelectAArch64Cmp(resOpnd, CreateImmOperand(0, primType, false), true, GetPrimTypeBitSize(primType)); + SelectAArch64CSet(resOpnd, GetCondOperand(CC_NE), is64Bits); + } + return &resOpnd; +} + +void AArch64CGFunc::SelectCmpOp(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, Opcode opcode, PrimType primType, + const BaseNode &parent) +{ + uint32 dsize = resOpnd.GetSize(); + bool isFloat = IsPrimitiveFloat(primType); + Operand &opnd0 = LoadIntoRegister(lhsOpnd, primType); + + /* + * most of FP constants are passed as MemOperand + * except 0.0 which is passed as kOpdFPImmediate + */ + Operand::OperandType opnd1Type = rhsOpnd.GetKind(); + Operand *opnd1 = &rhsOpnd; + if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) && + (opnd1Type != Operand::kOpdOffset)) { + opnd1 = &LoadIntoRegister(rhsOpnd, primType); + } + + bool unsignedIntegerComparison = !isFloat && !IsSignedInteger(primType); + /* + * OP_cmp, OP_cmpl, OP_cmpg + * OP0, OP1 ; fcmp for OP_cmpl/OP_cmpg, cmp/fcmpe for OP_cmp + * CSINV RES, WZR, WZR, GE + * CSINC RES, RES, WZR, LE + * if OP_cmpl, CSINV RES, RES, WZR, VC (no overflow) + * if OP_cmpg, CSINC RES, RES, WZR, VC (no overflow) + */ + RegOperand &xzr = GetZeroOpnd(dsize); + if ((opcode == OP_cmpl) || (opcode == OP_cmpg)) { + DEBUG_ASSERT(isFloat, "incorrect operand types"); + SelectTargetFPCmpQuiet(opnd0, *opnd1, GetPrimTypeBitSize(primType)); + SelectAArch64CSINV(resOpnd, xzr, xzr, GetCondOperand(CC_GE), (dsize == k64BitSize)); + SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LE), (dsize == k64BitSize)); + if (opcode == OP_cmpl) { + SelectAArch64CSINV(resOpnd, resOpnd, xzr, GetCondOperand(CC_VC), (dsize == k64BitSize)); + } else { + SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_VC), (dsize == k64BitSize)); + } + return; + } + + if (opcode == OP_cmp) { + SelectAArch64Cmp(opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(primType)); + if (unsignedIntegerComparison) { + SelectAArch64CSINV(resOpnd, xzr, xzr, GetCondOperand(CC_HS), (dsize == k64BitSize)); + SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LS), (dsize == k64BitSize)); + } else { + SelectAArch64CSINV(resOpnd, xzr, xzr, GetCondOperand(CC_GE), (dsize == k64BitSize)); + SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LE), (dsize == k64BitSize)); + } + return; + } + + // lt u8 i32 ( xxx, 0 ) => get sign bit + if ((opcode == OP_lt) && opnd0.IsRegister() && opnd1->IsImmediate() && + (static_cast(opnd1)->GetValue() == 0) && parent.GetOpCode() != OP_select && !isFloat) { + bool is64Bits = (opnd0.GetSize() == k64BitSize); + if (!unsignedIntegerComparison) { + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + ImmOperand &shiftNum = CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits, + static_cast(bitLen), false); + MOperator mOpCode = is64Bits ? MOP_xlsrrri6 : MOP_wlsrrri5; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, shiftNum)); + return; + } + ImmOperand &constNum = CreateImmOperand(0, is64Bits ? k64BitSize : k32BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(is64Bits ? MOP_xmovri64 : MOP_wmovri32, resOpnd, constNum)); + return; + } + SelectAArch64Cmp(opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(primType)); + + ConditionCode cc = CC_EQ; + switch (opcode) { + case OP_eq: + cc = CC_EQ; + break; + case OP_ne: + cc = CC_NE; + break; + case OP_le: + cc = unsignedIntegerComparison ? CC_LS : CC_LE; + break; + case OP_ge: + cc = unsignedIntegerComparison ? CC_HS : CC_GE; + break; + case OP_gt: + cc = unsignedIntegerComparison ? CC_HI : CC_GT; + break; + case OP_lt: + cc = unsignedIntegerComparison ? CC_LO : CC_LT; + break; + default: + CHECK_FATAL(false, "illegal logical operator"); + } + SelectAArch64CSet(resOpnd, GetCondOperand(cc), (dsize == k64BitSize)); +} + +Operand *AArch64CGFunc::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(node.GetPrimType())) { + resOpnd = &GetOrCreateResOperand(parent, node.GetPrimType()); + SelectCmpOp(*resOpnd, opnd0, opnd1, node.GetOpCode(), node.GetOpndType(), parent); + } else { + resOpnd = SelectVectorCompare(&opnd0, node.Opnd(0)->GetPrimType(), &opnd1, node.Opnd(1)->GetPrimType(), + node.GetOpCode()); + } + return resOpnd; +} + +void AArch64CGFunc::SelectTargetFPCmpQuiet(Operand &o0, Operand &o1, uint32 dsize) +{ + MOperator mOpCode = 0; + if (o1.GetKind() == Operand::kOpdFPImmediate) { + CHECK_FATAL(static_cast(o0).GetValue() == 0, "NIY"); + mOpCode = (dsize == k64BitSize) ? MOP_dcmpqri : (dsize == k32BitSize) ? MOP_scmpqri : MOP_hcmpqri; + } else if (o1.GetKind() == Operand::kOpdRegister) { + mOpCode = (dsize == k64BitSize) ? MOP_dcmpqrr : (dsize == k32BitSize) ? MOP_scmpqrr : MOP_hcmpqrr; + } else { + CHECK_FATAL(false, "unsupported operand type"); + } + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, rflag, o0, o1)); +} + +void AArch64CGFunc::SelectAArch64Cmp(Operand &o0, Operand &o1, bool isIntType, uint32 dsize) +{ + MOperator mOpCode = 0; + Operand *newO1 = &o1; + if (isIntType) { + if ((o1.GetKind() == Operand::kOpdImmediate) || (o1.GetKind() == Operand::kOpdOffset)) { + ImmOperand *immOpnd = static_cast(&o1); + /* + * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 + * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 + */ + if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) { + mOpCode = (dsize == k64BitSize) ? MOP_xcmpri : MOP_wcmpri; + } else { + /* load into register */ + PrimType ptype = (dsize == k64BitSize) ? PTY_i64 : PTY_i32; + newO1 = &SelectCopy(o1, ptype, ptype); + mOpCode = (dsize == k64BitSize) ? MOP_xcmprr : MOP_wcmprr; + } + } else if (o1.GetKind() == Operand::kOpdRegister) { + mOpCode = (dsize == k64BitSize) ? MOP_xcmprr : MOP_wcmprr; + } else { + CHECK_FATAL(false, "unsupported operand type"); + } + } else { /* float */ + if (o1.GetKind() == Operand::kOpdFPImmediate) { + CHECK_FATAL(static_cast(o1).GetValue() == 0, "NIY"); + mOpCode = (dsize == k64BitSize) ? MOP_dcmperi : ((dsize == k32BitSize) ? MOP_scmperi : MOP_hcmperi); + } else if (o1.GetKind() == Operand::kOpdRegister) { + mOpCode = (dsize == k64BitSize) ? MOP_dcmperr : ((dsize == k32BitSize) ? MOP_scmperr : MOP_hcmperr); + } else { + CHECK_FATAL(false, "unsupported operand type"); + } + } + DEBUG_ASSERT(mOpCode != 0, "mOpCode undefined"); + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, rflag, o0, *newO1)); +} + +void AArch64CGFunc::SelectAArch64CCmp(Operand &o, Operand &i, Operand &nzcv, CondOperand &cond, bool is64Bits) +{ + uint32 mOpCode = is64Bits ? MOP_xccmpriic : MOP_wccmpriic; + Operand &rflag = GetOrCreateRflag(); + std::vector opndVec; + opndVec.push_back(&rflag); + opndVec.push_back(&o); + opndVec.push_back(&i); + opndVec.push_back(&nzcv); + opndVec.push_back(&cond); + opndVec.push_back(&rflag); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opndVec)); +} + +void AArch64CGFunc::SelectAArch64CSet(Operand &r, CondOperand &cond, bool is64Bits) +{ + MOperator mOpCode = is64Bits ? MOP_xcsetrc : MOP_wcsetrc; + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, r, cond, rflag)); +} + +void AArch64CGFunc::SelectAArch64CSINV(Operand &res, Operand &o0, Operand &o1, CondOperand &cond, bool is64Bits) +{ + MOperator mOpCode = is64Bits ? MOP_xcsinvrrrc : MOP_wcsinvrrrc; + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, res, o0, o1, cond, rflag)); +} + +void AArch64CGFunc::SelectAArch64CSINC(Operand &res, Operand &o0, Operand &o1, CondOperand &cond, bool is64Bits) +{ + MOperator mOpCode = is64Bits ? MOP_xcsincrrrc : MOP_wcsincrrrc; + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, res, o0, o1, cond, rflag)); +} + +Operand *AArch64CGFunc::SelectBand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + return SelectRelationOperator(kAND, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + SelectRelationOperator(kAND, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64CGFunc::SelectRelationOperator(RelationOperator operatorCode, const BinaryNode &node, Operand &opnd0, + Operand &opnd1, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + PrimType primType = + is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32); /* promoted type */ + resOpnd = &GetOrCreateResOperand(parent, primType); + SelectRelationOperator(operatorCode, *resOpnd, opnd0, opnd1, primType); + } else { + /* vector operations */ + resOpnd = SelectVectorBitwiseOp(dtype, &opnd0, node.Opnd(0)->GetPrimType(), &opnd1, node.Opnd(1)->GetPrimType(), + (operatorCode == kAND) ? OP_band : (operatorCode == kIOR ? OP_bior : OP_bxor)); + } + return resOpnd; +} + +MOperator AArch64CGFunc::SelectRelationMop(RelationOperator operatorCode, RelationOperatorOpndPattern opndPattern, + bool is64Bits, bool isBitmaskImmediate, bool isBitNumLessThan16) const +{ + MOperator mOp = MOP_undef; + if (opndPattern == kRegReg) { + switch (operatorCode) { + case kAND: + mOp = is64Bits ? MOP_xandrrr : MOP_wandrrr; + break; + case kIOR: + mOp = is64Bits ? MOP_xiorrrr : MOP_wiorrrr; + break; + case kEOR: + mOp = is64Bits ? MOP_xeorrrr : MOP_weorrrr; + break; + default: + break; + } + return mOp; + } + /* opndPattern == KRegImm */ + if (isBitmaskImmediate) { + switch (operatorCode) { + case kAND: + mOp = is64Bits ? MOP_xandrri13 : MOP_wandrri12; + break; + case kIOR: + mOp = is64Bits ? MOP_xiorrri13 : MOP_wiorrri12; + break; + case kEOR: + mOp = is64Bits ? MOP_xeorrri13 : MOP_weorrri12; + break; + default: + break; + } + return mOp; + } + /* normal imm value */ + if (isBitNumLessThan16) { + switch (operatorCode) { + case kAND: + mOp = is64Bits ? MOP_xandrrrs : MOP_wandrrrs; + break; + case kIOR: + mOp = is64Bits ? MOP_xiorrrrs : MOP_wiorrrrs; + break; + case kEOR: + mOp = is64Bits ? MOP_xeorrrrs : MOP_weorrrrs; + break; + default: + break; + } + return mOp; + } + return mOp; +} + +void AArch64CGFunc::SelectRelationOperator(RelationOperator operatorCode, Operand &resOpnd, Operand &opnd0, + Operand &opnd1, PrimType primType) +{ + Operand::OperandType opnd0Type = opnd0.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + /* op #imm. #imm */ + if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) { + SelectRelationOperator(operatorCode, resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType); + return; + } + /* op #imm, reg -> op reg, #imm */ + if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) { + SelectRelationOperator(operatorCode, resOpnd, opnd1, opnd0, primType); + return; + } + /* op reg, reg */ + if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) { + DEBUG_ASSERT(IsPrimitiveInteger(primType), "NYI band"); + MOperator mOp = SelectRelationMop(operatorCode, kRegReg, is64Bits, false, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + return; + } + /* op reg, #imm */ + if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) { + if (!((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset))) { + SelectRelationOperator(operatorCode, resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType); + return; + } + + ImmOperand *immOpnd = static_cast(&opnd1); + if (immOpnd->IsZero()) { + if (operatorCode == kAND) { + uint32 mopMv = is64Bits ? MOP_xmovrr : MOP_wmovrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopMv, resOpnd, GetZeroOpnd(dsize))); + } else if ((operatorCode == kIOR) || (operatorCode == kEOR)) { + SelectCopy(resOpnd, primType, opnd0, primType); + } + } else if ((immOpnd->IsAllOnes()) || (!is64Bits && immOpnd->IsAllOnes32bit())) { + if (operatorCode == kAND) { + SelectCopy(resOpnd, primType, opnd0, primType); + } else if (operatorCode == kIOR) { + uint32 mopMovn = is64Bits ? MOP_xmovnri16 : MOP_wmovnri16; + ImmOperand &src16 = CreateImmOperand(0, k16BitSize, false); + BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(0, is64Bits); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopMovn, resOpnd, src16, *lslOpnd)); + } else if (operatorCode == kEOR) { + SelectMvn(resOpnd, opnd0, primType); + } + } else if (immOpnd->IsBitmaskImmediate()) { + MOperator mOp = SelectRelationMop(operatorCode, kRegImm, is64Bits, true, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + } else { + int64 immVal = immOpnd->GetValue(); + int32 tail0BitNum = GetTail0BitNum(immVal); + int32 head0BitNum = GetHead0BitNum(immVal); + const int32 bitNum = (k64BitSizeInt - head0BitNum) - tail0BitNum; + RegOperand ®Opnd = CreateRegisterOperandOfType(primType); + + if (bitNum <= k16ValidBit) { + int64 newImm = (static_cast(immVal) >> static_cast(tail0BitNum)) & 0xFFFF; + ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); + SelectCopyImm(regOpnd, immOpnd1, primType); + MOperator mOp = SelectRelationMop(operatorCode, kRegImm, is64Bits, false, true); + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + BitShiftOperand &shiftOpnd = + CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast(tail0BitNum), bitLen); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, regOpnd, shiftOpnd)); + } else { + SelectCopyImm(regOpnd, *immOpnd, primType); + MOperator mOp = SelectRelationMop(operatorCode, kRegReg, is64Bits, false, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, regOpnd)); + } + } + } +} + +Operand *AArch64CGFunc::SelectBior(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + return SelectRelationOperator(kIOR, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + SelectRelationOperator(kIOR, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64CGFunc::SelectMinOrMax(bool isMin, const BinaryNode &node, Operand &opnd0, Operand &opnd1, + const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + /* promoted type */ + PrimType primType = isFloat ? dtype : (is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)); + RegOperand &resOpnd = GetOrCreateResOperand(parent, primType); + SelectMinOrMax(isMin, resOpnd, opnd0, opnd1, primType); + return &resOpnd; +} + +void AArch64CGFunc::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + if (IsPrimitiveInteger(primType)) { + RegOperand ®Opnd0 = LoadIntoRegister(opnd0, primType); + Operand ®Opnd1 = LoadIntoRegister(opnd1, primType); + SelectAArch64Cmp(regOpnd0, regOpnd1, true, dsize); + Operand &newResOpnd = LoadIntoRegister(resOpnd, primType); + if (isMin) { + CondOperand &cc = IsSignedInteger(primType) ? GetCondOperand(CC_LT) : GetCondOperand(CC_LO); + SelectAArch64Select(newResOpnd, regOpnd0, regOpnd1, cc, true, dsize); + } else { + CondOperand &cc = IsSignedInteger(primType) ? GetCondOperand(CC_GT) : GetCondOperand(CC_HI); + SelectAArch64Select(newResOpnd, regOpnd0, regOpnd1, cc, true, dsize); + } + } else if (IsPrimitiveFloat(primType)) { + RegOperand ®Opnd0 = LoadIntoRegister(opnd0, primType); + RegOperand ®Opnd1 = LoadIntoRegister(opnd1, primType); + SelectFMinFMax(resOpnd, regOpnd0, regOpnd1, is64Bits, isMin); + } else { + CHECK_FATAL(false, "NIY type max or min"); + } +} + +Operand *AArch64CGFunc::SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + return SelectMinOrMax(true, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + SelectMinOrMax(true, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64CGFunc::SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + return SelectMinOrMax(false, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + SelectMinOrMax(false, resOpnd, opnd0, opnd1, primType); +} + +void AArch64CGFunc::SelectFMinFMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, bool is64Bits, bool isMin) +{ + uint32 mOpCode = isMin ? (is64Bits ? MOP_xfminrrr : MOP_wfminrrr) : (is64Bits ? MOP_xfmaxrrr : MOP_wfmaxrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, opnd1)); +} + +Operand *AArch64CGFunc::SelectBxor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + return SelectRelationOperator(kEOR, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + SelectRelationOperator(kEOR, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64CGFunc::SelectShift(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + RegOperand *resOpnd = nullptr; + Opcode opcode = node.GetOpCode(); + + bool isOneElemVector = false; + BaseNode *expr = node.Opnd(0); + if (expr->GetOpCode() == OP_dread) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(static_cast(expr)->GetStIdx()); + isOneElemVector = symbol->GetAttr(ATTR_oneelem_simd); + } + + Operand *opd0 = &opnd0; + PrimType otyp0 = expr->GetPrimType(); + if (IsPrimitiveVector(dtype) && opnd0.IsConstImmediate()) { + opd0 = SelectVectorFromScalar(dtype, opd0, node.Opnd(0)->GetPrimType()); + otyp0 = dtype; + } + + if (IsPrimitiveVector(dtype) && opnd1.IsConstImmediate()) { + int64 sConst = static_cast(opnd1).GetValue(); + resOpnd = SelectVectorShiftImm(dtype, opd0, &opnd1, static_cast(sConst), opcode); + } else if ((IsPrimitiveVector(dtype) || isOneElemVector) && !opnd1.IsConstImmediate()) { + resOpnd = SelectVectorShift(dtype, opd0, otyp0, &opnd1, node.Opnd(1)->GetPrimType(), opcode); + } else { + PrimType primType = + isFloat ? dtype : (is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)); + resOpnd = &GetOrCreateResOperand(parent, primType); + ShiftDirection direct = (opcode == OP_lshr) ? kShiftLright : ((opcode == OP_ashr) ? kShiftAright : kShiftLeft); + SelectShift(*resOpnd, opnd0, opnd1, direct, primType); + } + + if (dtype == PTY_i16) { + MOperator exOp = is64Bits ? MOP_xsxth64 : MOP_xsxth32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(exOp, *resOpnd, *resOpnd)); + } else if (dtype == PTY_i8) { + MOperator exOp = is64Bits ? MOP_xsxtb64 : MOP_xsxtb32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(exOp, *resOpnd, *resOpnd)); + } + return resOpnd; +} + +Operand *AArch64CGFunc::SelectRor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + uint32 dsize = GetPrimTypeBitSize(dtype); + PrimType primType = (dsize == k64BitSize) ? PTY_u64 : PTY_u32; + RegOperand *resOpnd = &GetOrCreateResOperand(parent, primType); + Operand *firstOpnd = &LoadIntoRegister(opnd0, primType); + MOperator mopRor = (dsize == k64BitSize) ? MOP_xrorrrr : MOP_wrorrrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopRor, *resOpnd, *firstOpnd, opnd1)); + return resOpnd; +} + +void AArch64CGFunc::SelectBxorShift(Operand &resOpnd, Operand *opnd0, Operand *opnd1, Operand &opnd2, PrimType primType) +{ + opnd0 = &LoadIntoRegister(*opnd0, primType); + opnd1 = &LoadIntoRegister(*opnd1, primType); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + MOperator mopBxor = is64Bits ? MOP_xeorrrrs : MOP_weorrrrs; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBxor, resOpnd, *opnd0, *opnd1, opnd2)); +} + +void AArch64CGFunc::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, ShiftDirection direct, + PrimType primType) +{ + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + Operand *firstOpnd = &LoadIntoRegister(opnd0, primType); + + MOperator mopShift; + if ((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) { + ImmOperand *immOpnd1 = static_cast(&opnd1); + const int64 kVal = immOpnd1->GetValue(); + const uint32 kShiftamt = is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits; + if (kVal == 0) { + SelectCopy(resOpnd, primType, *firstOpnd, primType); + return; + } + /* e.g. a >> -1 */ + if ((kVal < 0) || (kVal > kShiftamt)) { + SelectShift(resOpnd, *firstOpnd, SelectCopy(opnd1, primType, primType), direct, primType); + return; + } + switch (direct) { + case kShiftLeft: + mopShift = is64Bits ? MOP_xlslrri6 : MOP_wlslrri5; + break; + case kShiftAright: + mopShift = is64Bits ? MOP_xasrrri6 : MOP_wasrrri5; + break; + case kShiftLright: + mopShift = is64Bits ? MOP_xlsrrri6 : MOP_wlsrrri5; + break; + } + } else if (opnd1Type != Operand::kOpdRegister) { + SelectShift(resOpnd, *firstOpnd, SelectCopy(opnd1, primType, primType), direct, primType); + return; + } else { + switch (direct) { + case kShiftLeft: + mopShift = is64Bits ? MOP_xlslrrr : MOP_wlslrrr; + break; + case kShiftAright: + mopShift = is64Bits ? MOP_xasrrrr : MOP_wasrrrr; + break; + case kShiftLright: + mopShift = is64Bits ? MOP_xlsrrrr : MOP_wlsrrrr; + break; + } + } + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopShift, resOpnd, *firstOpnd, opnd1)); +} + +Operand *AArch64CGFunc::SelectAbsSub(Insn &lastInsn, const UnaryNode &node, Operand &newOpnd0) +{ + PrimType dtyp = node.GetPrimType(); + bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize); + /* promoted type */ + PrimType primType = is64Bits ? (PTY_i64) : (PTY_i32); + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + uint32 mopCsneg = is64Bits ? MOP_xcnegrrrc : MOP_wcnegrrrc; + /* ABS requires the operand be interpreted as a signed integer */ + CondOperand &condOpnd = GetCondOperand(CC_MI); + MOperator newMop = lastInsn.GetMachineOpcode() + 1; + Operand &rflag = GetOrCreateRflag(); + std::vector opndVec; + opndVec.push_back(&rflag); + for (uint32 i = 0; i < lastInsn.GetOperandSize(); i++) { + opndVec.push_back(&lastInsn.GetOperand(i)); + } + Insn *subsInsn = &GetInsnBuilder()->BuildInsn(newMop, opndVec); + GetCurBB()->ReplaceInsn(lastInsn, *subsInsn); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopCsneg, resOpnd, newOpnd0, condOpnd, rflag)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectAbs(UnaryNode &node, Operand &opnd0) +{ + PrimType dtyp = node.GetPrimType(); + if (IsPrimitiveVector(dtyp)) { + return SelectVectorAbs(dtyp, &opnd0); + } else if (IsPrimitiveFloat(dtyp)) { + CHECK_FATAL(GetPrimTypeBitSize(dtyp) >= k32BitSize, "We don't support hanf-word FP operands yet"); + bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize); + Operand &newOpnd0 = LoadIntoRegister(opnd0, dtyp); + RegOperand &resOpnd = CreateRegisterOperandOfType(dtyp); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(is64Bits ? MOP_dabsrr : MOP_sabsrr, resOpnd, newOpnd0)); + return &resOpnd; + } else { + bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize); + /* promoted type */ + PrimType primType = is64Bits ? (PTY_i64) : (PTY_i32); + Operand &newOpnd0 = LoadIntoRegister(opnd0, primType); + Insn *lastInsn = GetCurBB()->GetLastInsn(); + if (lastInsn != nullptr && lastInsn->GetMachineOpcode() >= MOP_xsubrrr && + lastInsn->GetMachineOpcode() <= MOP_wsubrri12) { + return SelectAbsSub(*lastInsn, node, newOpnd0); + } + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + SelectAArch64Cmp(newOpnd0, CreateImmOperand(0, is64Bits ? PTY_u64 : PTY_u32, false), true, + GetPrimTypeBitSize(dtyp)); + uint32 mopCsneg = is64Bits ? MOP_xcsnegrrrc : MOP_wcsnegrrrc; + /* ABS requires the operand be interpreted as a signed integer */ + CondOperand &condOpnd = GetCondOperand(CC_GE); + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopCsneg, resOpnd, newOpnd0, newOpnd0, condOpnd, rflag)); + return &resOpnd; + } +} + +Operand *AArch64CGFunc::SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + DEBUG_ASSERT(IsPrimitiveInteger(dtype) || IsPrimitiveVectorInteger(dtype), "bnot expect integer or NYI"); + uint32 bitSize = GetPrimTypeBitSize(dtype); + bool is64Bits = (bitSize == k64BitSize); + bool isSigned = IsSignedInteger(dtype); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + /* promoted type */ + PrimType primType = is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32); + resOpnd = &GetOrCreateResOperand(parent, primType); + + Operand &newOpnd0 = LoadIntoRegister(opnd0, primType); + + uint32 mopBnot = is64Bits ? MOP_xnotrr : MOP_wnotrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBnot, *resOpnd, newOpnd0)); + /* generate and resOpnd, resOpnd, 0x1/0xFF/0xFFFF for PTY_u1/PTY_u8/PTY_u16 */ + int64 immValue = 0; + if (bitSize == k1BitSize) { + immValue = 1; + } else if (bitSize == k8BitSize) { + immValue = 0xFF; + } else if (bitSize == k16BitSize) { + immValue = 0xFFFF; + } + if (immValue != 0) { + ImmOperand &imm = CreateImmOperand(PTY_u32, immValue); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, *resOpnd, *resOpnd, imm)); + } + } else { + /* vector operand */ + resOpnd = SelectVectorNot(dtype, &opnd0); + } + return resOpnd; +} + +Operand *AArch64CGFunc::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + auto bitWidth = (GetPrimTypeBitSize(dtype)); + RegOperand *resOpnd = nullptr; + resOpnd = &GetOrCreateResOperand(parent, dtype); + Operand &newOpnd0 = LoadIntoRegister(opnd0, dtype); + uint32 mopBswap = bitWidth == 64 ? MOP_xrevrr : (bitWidth == 32 ? MOP_wrevrr : MOP_wrevrr16); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBswap, *resOpnd, newOpnd0)); + return resOpnd; +} + +Operand *AArch64CGFunc::SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint8 bitOffset = node.GetBitsOffset(); + uint8 bitSize = node.GetBitsSize(); + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + CHECK_FATAL(!is64Bits, "dest opnd should not be 64bit"); + PrimType destType = GetIntegerPrimTypeBySizeAndSign(bitSize, isSigned); + Operand *result = + SelectIread(parent, *static_cast(node.Opnd(0)), static_cast(bitOffset / k8BitSize), destType); + return result; +} + +Operand *AArch64CGFunc::SelectExtractbits(ExtractbitsNode &node, Operand &srcOpnd, const BaseNode &parent) +{ + uint8 bitOffset = node.GetBitsOffset(); + uint8 bitSize = node.GetBitsSize(); + RegOperand *srcVecRegOperand = static_cast(&srcOpnd); + if (srcVecRegOperand && srcVecRegOperand->IsRegister() && (srcVecRegOperand->GetSize() == k128BitSize)) { + if ((bitSize == k8BitSize || bitSize == k16BitSize || bitSize == k32BitSize || bitSize == k64BitSize) && + (bitOffset % bitSize) == k0BitSize) { + uint32 lane = bitOffset / bitSize; + PrimType srcVecPtype; + if (bitSize == k64BitSize) { + srcVecPtype = PTY_v2u64; + } else if (bitSize == k32BitSize) { + srcVecPtype = PTY_v4u32; + } else if (bitSize == k16BitSize) { + srcVecPtype = PTY_v8u16; + } else { + srcVecPtype = PTY_v16u8; + } + RegOperand *resRegOperand = + SelectVectorGetElement(node.GetPrimType(), &srcOpnd, srcVecPtype, static_cast(lane)); + return resRegOperand; + } else { + CHECK_FATAL(false, "NYI"); + } + } + PrimType dtype = node.GetPrimType(); + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + bool isSigned = + (node.GetOpCode() == OP_sext) ? true : (node.GetOpCode() == OP_zext) ? false : IsSignedInteger(dtype); + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + uint32 immWidth = is64Bits ? kMaxImmVal13Bits : kMaxImmVal12Bits; + Operand &opnd0 = LoadIntoRegister(srcOpnd, dtype); + if (bitOffset == 0) { + if (!isSigned && (bitSize < immWidth)) { + SelectBand(resOpnd, opnd0, + CreateImmOperand(static_cast((static_cast(1) << bitSize) - 1), immWidth, false), + dtype); + return &resOpnd; + } else { + MOperator mOp = MOP_undef; + if (bitSize == k8BitSize) { + mOp = is64Bits ? (isSigned ? MOP_xsxtb64 : MOP_undef) + : (isSigned ? MOP_xsxtb32 : (opnd0.GetSize() == k32BitSize ? MOP_xuxtb32 : MOP_undef)); + } else if (bitSize == k16BitSize) { + mOp = is64Bits ? (isSigned ? MOP_xsxth64 : MOP_undef) + : (isSigned ? MOP_xsxth32 : (opnd0.GetSize() == k32BitSize ? MOP_xuxth32 : MOP_undef)); + } else if (bitSize == k32BitSize) { + mOp = is64Bits ? (isSigned ? MOP_xsxtw64 : MOP_xuxtw64) : MOP_wmovrr; + } + if (mOp != MOP_undef) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0)); + return &resOpnd; + } + } + } + uint32 mopBfx = + is64Bits ? (isSigned ? MOP_xsbfxrri6i6 : MOP_xubfxrri6i6) : (isSigned ? MOP_wsbfxrri5i5 : MOP_wubfxrri5i5); + ImmOperand &immOpnd1 = CreateImmOperand(bitOffset, k8BitSize, false); + ImmOperand &immOpnd2 = CreateImmOperand(bitSize, k8BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBfx, resOpnd, opnd0, immOpnd1, immOpnd2)); + return &resOpnd; +} + +/* + * operand fits in MOVK if + * is64Bits && boffst == 0, 16, 32, 48 && bSize == 16, so boffset / 16 == 0, 1, 2, 3; (boffset / 16 ) & (~3) == 0 + * or is32Bits && boffset == 0, 16 && bSize == 16, so boffset / 16 == 0, 1; (boffset / 16) & (~1) == 0 + * imm range of aarch64-movk [0 - 65536] imm16 + */ +inline bool IsMoveWideKeepable(int64 offsetVal, uint32 bitOffset, uint32 bitSize, bool is64Bits) +{ + DEBUG_ASSERT(is64Bits || (bitOffset < k32BitSize), ""); + bool isOutOfRange = offsetVal < 0; + if (!isOutOfRange) { + isOutOfRange = (static_cast(offsetVal) >> k16BitSize) > 0; + } + return (!isOutOfRange) && bitSize == k16BitSize && + ((bitOffset >> k16BitShift) & ~static_cast(is64Bits ? 0x3 : 0x1)) == 0; +} + +/* we use the fact that A ^ B ^ A == B, A ^ 0 = A */ +Operand *AArch64CGFunc::SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + uint32 bitOffset = node.GetBitsOffset(); + uint32 bitSize = node.GetBitsSize(); + PrimType regType = node.GetPrimType(); + bool is64Bits = GetPrimTypeBitSize(regType) == k64BitSize; + /* + * if operand 1 is immediate and fits in MOVK, use it + * MOVK Wd, #imm{, LSL #shift} ; 32-bit general registers + * MOVK Xd, #imm{, LSL #shift} ; 64-bit general registers + */ + if (opnd1.IsIntImmediate() && + IsMoveWideKeepable(static_cast(opnd1).GetValue(), bitOffset, bitSize, is64Bits)) { + RegOperand &resOpnd = GetOrCreateResOperand(parent, regType); + SelectCopy(resOpnd, regType, opnd0, regType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn((is64Bits ? MOP_xmovkri16 : MOP_wmovkri16), resOpnd, opnd1, + *GetLogicalShiftLeftOperand(bitOffset, is64Bits))); + return &resOpnd; + } else { + Operand &movOpnd = LoadIntoRegister(opnd1, regType); + uint32 mopBfi = is64Bits ? MOP_xbfirri6i6 : MOP_wbfirri5i5; + ImmOperand &immOpnd1 = CreateImmOperand(bitOffset, k8BitSize, false); + ImmOperand &immOpnd2 = CreateImmOperand(bitSize, k8BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBfi, opnd0, movOpnd, immOpnd1, immOpnd2)); + return &opnd0; + } +} + +Operand *AArch64CGFunc::SelectLnot(UnaryNode &node, Operand &srcOpnd, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + Operand &opnd0 = LoadIntoRegister(srcOpnd, dtype); + SelectAArch64Cmp(opnd0, CreateImmOperand(0, is64Bits ? PTY_u64 : PTY_u32, false), true, GetPrimTypeBitSize(dtype)); + SelectAArch64CSet(resOpnd, GetCondOperand(CC_EQ), is64Bits); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + PrimType primType; + if (IsPrimitiveFloat(dtype)) { + primType = dtype; + } else { + primType = is64Bits ? (PTY_i64) : (PTY_i32); /* promoted type */ + } + resOpnd = &GetOrCreateResOperand(parent, primType); + SelectNeg(*resOpnd, opnd0, primType); + } else { + /* vector operand */ + resOpnd = SelectVectorNeg(dtype, &opnd0); + } + return resOpnd; +} + +void AArch64CGFunc::SelectNeg(Operand &dest, Operand &srcOpnd, PrimType primType) +{ + Operand &opnd0 = LoadIntoRegister(srcOpnd, primType); + bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize); + MOperator mOp; + if (IsPrimitiveFloat(primType)) { + mOp = is64Bits ? MOP_xfnegrr : MOP_wfnegrr; + } else { + mOp = is64Bits ? MOP_xinegrr : MOP_winegrr; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, opnd0)); +} + +void AArch64CGFunc::SelectMvn(Operand &dest, Operand &src, PrimType primType) +{ + Operand &opnd0 = LoadIntoRegister(src, primType); + bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize); + MOperator mOp; + DEBUG_ASSERT(!IsPrimitiveFloat(primType), "Instruction 'mvn' do not have float version."); + mOp = is64Bits ? MOP_xnotrr : MOP_wnotrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, opnd0)); +} + +Operand *AArch64CGFunc::SelectRecip(UnaryNode &node, Operand &src, const BaseNode &parent) +{ + /* + * fconsts s15, #112 + * fdivs s0, s15, s0 + */ + PrimType dtype = node.GetPrimType(); + if (!IsPrimitiveFloat(dtype)) { + DEBUG_ASSERT(false, "should be float type"); + return nullptr; + } + Operand &opnd0 = LoadIntoRegister(src, dtype); + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + Operand *one = nullptr; + if (GetPrimTypeBitSize(dtype) == k64BitSize) { + MIRDoubleConst *c = memPool->New(1.0, *GlobalTables::GetTypeTable().GetTypeTable().at(PTY_f64)); + one = SelectDoubleConst(*c, node); + } else if (GetPrimTypeBitSize(dtype) == k32BitSize) { + MIRFloatConst *c = memPool->New(1.0f, *GlobalTables::GetTypeTable().GetTypeTable().at(PTY_f32)); + one = SelectFloatConst(*c, node); + } else { + CHECK_FATAL(false, "we don't support half-precision fp operations yet"); + } + SelectDiv(resOpnd, *one, opnd0, dtype); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectSqrt(UnaryNode &node, Operand &src, const BaseNode &parent) +{ + /* + * gcc generates code like below for better accurate + * fsqrts s15, s0 + * fcmps s15, s15 + * fmstat + * beq .L4 + * push {r3, lr} + * bl sqrtf + * pop {r3, pc} + * .L4: + * fcpys s0, s15 + * bx lr + */ + PrimType dtype = node.GetPrimType(); + if (!IsPrimitiveFloat(dtype)) { + DEBUG_ASSERT(false, "should be float type"); + return nullptr; + } + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + Operand &opnd0 = LoadIntoRegister(src, dtype); + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(is64Bits ? MOP_vsqrtd : MOP_vsqrts, resOpnd, opnd0)); + return &resOpnd; +} + +void AArch64CGFunc::SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) +{ + bool is64BitsFloat = (ftype == PTY_f64); + MOperator mOp = 0; + + DEBUG_ASSERT(((ftype == PTY_f64) || (ftype == PTY_f32)), "wrong from type"); + Operand &opnd0 = LoadIntoRegister(srcOpnd, ftype); + switch (itype) { + case PTY_i32: + mOp = !is64BitsFloat ? MOP_vcvtrf : MOP_vcvtrd; + break; + case PTY_u32: + case PTY_a32: + mOp = !is64BitsFloat ? MOP_vcvturf : MOP_vcvturd; + break; + case PTY_i64: + mOp = !is64BitsFloat ? MOP_xvcvtrf : MOP_xvcvtrd; + break; + case PTY_u64: + case PTY_a64: + mOp = !is64BitsFloat ? MOP_xvcvturf : MOP_xvcvturd; + break; + default: + CHECK_FATAL(false, "unexpected type"); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0)); +} + +void AArch64CGFunc::SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType) +{ + DEBUG_ASSERT((toType == PTY_f32) || (toType == PTY_f64), "unexpected type"); + bool is64BitsFloat = (toType == PTY_f64); + MOperator mOp = 0; + uint32 fsize = GetPrimTypeBitSize(fromType); + + PrimType itype = (GetPrimTypeBitSize(fromType) == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64) + : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32); + + Operand *opnd0 = &LoadIntoRegister(origOpnd0, itype); + + /* need extension before cvt */ + DEBUG_ASSERT(opnd0->IsRegister(), "opnd should be a register operand"); + Operand *srcOpnd = opnd0; + if (IsSignedInteger(fromType) && (fsize < k32BitSize)) { + srcOpnd = &CreateRegisterOperandOfType(itype); + mOp = (fsize == k8BitSize) ? MOP_xsxtb32 : MOP_xsxth32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *srcOpnd, *opnd0)); + } + + switch (itype) { + case PTY_i32: + mOp = !is64BitsFloat ? MOP_vcvtfr : MOP_vcvtdr; + break; + case PTY_u32: + mOp = !is64BitsFloat ? MOP_vcvtufr : MOP_vcvtudr; + break; + case PTY_i64: + mOp = !is64BitsFloat ? MOP_xvcvtfr : MOP_xvcvtdr; + break; + case PTY_u64: + mOp = !is64BitsFloat ? MOP_xvcvtufr : MOP_xvcvtudr; + break; + default: + CHECK_FATAL(false, "unexpected type"); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, *srcOpnd)); +} + +Operand *AArch64CGFunc::SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name) +{ + BaseNode *argexpr = intrnNode.Opnd(0); + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + if (intrnNode.GetIntrinsic() == INTRN_C_ffs) { + DEBUG_ASSERT(intrnNode.GetPrimType() == PTY_i32, "Unexpect Size"); + return SelectAArch64ffs(*opnd, ptype); + } + if (opnd->IsMemoryAccessOperand()) { + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } + std::vector opndVec; + RegOperand *dst = &CreateRegisterOperandOfType(ptype); + opndVec.push_back(dst); /* result */ + opndVec.push_back(opnd); /* param 0 */ + SelectLibCall(name, opndVec, ptype, ptype); + + return dst; +} + +Operand *AArch64CGFunc::SelectIntrinsicOpWithNParams(IntrinsicopNode &intrnNode, PrimType retType, + const std::string &name) +{ + MapleVector argNodes = intrnNode.GetNopnd(); + std::vector opndVec; + std::vector opndTypes; + RegOperand *retOpnd = &CreateRegisterOperandOfType(retType); + opndVec.push_back(retOpnd); + opndTypes.push_back(retType); + + for (BaseNode *argexpr : argNodes) { + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + if (opnd->IsMemoryAccessOperand()) { + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } + opndVec.push_back(opnd); + opndTypes.push_back(ptype); + } + SelectLibCallNArg(name, opndVec, opndTypes, retType, false); + + return retOpnd; +} + +/* According to gcc.target/aarch64/ffs.c */ +Operand *AArch64CGFunc::SelectAArch64ffs(Operand &argOpnd, PrimType argType) +{ + RegOperand &destOpnd = LoadIntoRegister(argOpnd, argType); + uint32 argSize = GetPrimTypeBitSize(argType); + DEBUG_ASSERT((argSize == k64BitSize || argSize == k32BitSize), "Unexpect arg type"); + /* cmp */ + ImmOperand &zeroOpnd = CreateImmOperand(0, argSize, false); + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(argSize == k64BitSize ? MOP_xcmpri : MOP_wcmpri, rflag, destOpnd, zeroOpnd)); + /* rbit */ + RegOperand *tempResReg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, GetPrimTypeSize(argType))); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(argSize == k64BitSize ? MOP_xrbit : MOP_wrbit, *tempResReg, destOpnd)); + /* clz */ + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(argSize == k64BitSize ? MOP_xclz : MOP_wclz, *tempResReg, *tempResReg)); + /* csincc */ + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(argSize == k64BitSize ? MOP_xcsincrrrc : MOP_wcsincrrrc, + *tempResReg, GetZeroOpnd(k32BitSize), *tempResReg, + GetCondOperand(CC_EQ), rflag)); + return tempResReg; +} + +Operand *AArch64CGFunc::SelectRoundLibCall(RoundType roundType, const TypeCvtNode &node, Operand &opnd0) +{ + PrimType ftype = node.FromType(); + PrimType rtype = node.GetPrimType(); + bool is64Bits = (ftype == PTY_f64); + std::vector opndVec; + RegOperand *resOpnd; + if (is64Bits) { + resOpnd = &GetOrCreatePhysicalRegisterOperand(D0, k64BitSize, kRegTyFloat); + } else { + resOpnd = &GetOrCreatePhysicalRegisterOperand(S0, k32BitSize, kRegTyFloat); + } + opndVec.push_back(resOpnd); + RegOperand ®Opnd0 = LoadIntoRegister(opnd0, ftype); + opndVec.push_back(®Opnd0); + std::string libName; + if (roundType == kCeil) { + libName.assign(is64Bits ? "ceil" : "ceilf"); + } else if (roundType == kFloor) { + libName.assign(is64Bits ? "floor" : "floorf"); + } else { + libName.assign(is64Bits ? "round" : "roundf"); + } + SelectLibCall(libName, opndVec, ftype, rtype); + + return resOpnd; +} + +Operand *AArch64CGFunc::SelectRoundOperator(RoundType roundType, const TypeCvtNode &node, Operand &opnd0, + const BaseNode &parent) +{ + PrimType itype = node.GetPrimType(); + if ((mirModule.GetSrcLang() == kSrcLangC) && ((itype == PTY_f64) || (itype == PTY_f32))) { + SelectRoundLibCall(roundType, node, opnd0); + } + PrimType ftype = node.FromType(); + DEBUG_ASSERT(((ftype == PTY_f64) || (ftype == PTY_f32)), "wrong float type"); + bool is64Bits = (ftype == PTY_f64); + RegOperand &resOpnd = GetOrCreateResOperand(parent, itype); + RegOperand ®Opnd0 = LoadIntoRegister(opnd0, ftype); + MOperator mop = MOP_undef; + if (roundType == kCeil) { + mop = is64Bits ? MOP_xvcvtps : MOP_vcvtps; + } else if (roundType == kFloor) { + mop = is64Bits ? MOP_xvcvtms : MOP_vcvtms; + } else { + mop = is64Bits ? MOP_xvcvtas : MOP_vcvtas; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, resOpnd, regOpnd0)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectCeil(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) +{ + return SelectRoundOperator(kCeil, node, opnd0, parent); +} + +/* float to int floor */ +Operand *AArch64CGFunc::SelectFloor(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) +{ + return SelectRoundOperator(kFloor, node, opnd0, parent); +} + +Operand *AArch64CGFunc::SelectRound(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) +{ + return SelectRoundOperator(kRound, node, opnd0, parent); +} + +static bool LIsPrimitivePointer(PrimType ptype) +{ + return ((ptype >= PTY_ptr) && (ptype <= PTY_a64)); +} + +Operand *AArch64CGFunc::SelectRetype(TypeCvtNode &node, Operand &opnd0) +{ + PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType toType = node.GetPrimType(); + DEBUG_ASSERT(GetPrimTypeSize(fromType) == GetPrimTypeSize(toType), "retype bit widith doesn' match"); + if (LIsPrimitivePointer(fromType) && LIsPrimitivePointer(toType)) { + return &LoadIntoRegister(opnd0, toType); + } + if (IsPrimitiveVector(fromType) || IsPrimitiveVector(toType)) { + return &LoadIntoRegister(opnd0, toType); + } + Operand::OperandType opnd0Type = opnd0.GetKind(); + RegOperand *resOpnd = &CreateRegisterOperandOfType(toType); + if (IsPrimitiveInteger(fromType) || IsPrimitiveFloat(fromType)) { + bool isFromInt = IsPrimitiveInteger(fromType); + bool is64Bits = GetPrimTypeBitSize(fromType) == k64BitSize; + PrimType itype = + isFromInt ? ((GetPrimTypeBitSize(fromType) == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64) + : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32)) + : (is64Bits ? PTY_f64 : PTY_f32); + + /* + * if source operand is in memory, + * simply read it as a value of 'toType 'into the dest operand + * and return + */ + if (opnd0Type == Operand::kOpdMem) { + resOpnd = &SelectCopy(opnd0, toType, toType); + return resOpnd; + } + /* according to aarch64 encoding format, convert int to float expression */ + bool isImm = false; + ImmOperand *imm = static_cast(&opnd0); + uint64 val = static_cast(imm->GetValue()); + uint64 canRepreset = is64Bits ? (val & 0xffffffffffff) : (val & 0x7ffff); + uint32 val1 = is64Bits ? (val >> 61) & 0x3 : (val >> 29) & 0x3; + uint32 val2 = is64Bits ? (val >> 54) & 0xff : (val >> 25) & 0x1f; + bool isSame = is64Bits ? ((val2 == 0) || (val2 == 0xff)) : ((val2 == 0) || (val2 == 0x1f)); + canRepreset = (canRepreset == 0) && ((val1 & 0x1) ^ ((val1 & 0x2) >> 1)) && isSame; + Operand *newOpnd0 = &opnd0; + if (IsPrimitiveInteger(fromType) && IsPrimitiveFloat(toType) && canRepreset) { + uint64 temp1 = is64Bits ? (val >> 63) << 7 : (val >> 31) << 7; + uint64 temp2 = is64Bits ? val >> 48 : val >> 19; + int64 imm8 = (temp2 & 0x7f) | temp1; + newOpnd0 = &CreateImmOperand(imm8, k8BitSize, false, kNotVary, true); + isImm = true; + } else { + newOpnd0 = &LoadIntoRegister(opnd0, itype); + } + if ((IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) || + (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType))) { + MOperator mopFmov = (isImm ? (is64Bits ? MOP_xdfmovri : MOP_wsfmovri) : isFromInt) + ? (is64Bits ? MOP_xvmovdr : MOP_xvmovsr) + : (is64Bits ? MOP_xvmovrd : MOP_xvmovrs); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *resOpnd, *newOpnd0)); + return resOpnd; + } else { + return newOpnd0; + } + } else { + CHECK_FATAL(false, "NYI retype"); + } + return nullptr; +} + +void AArch64CGFunc::SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) +{ + Operand &opnd0 = LoadIntoRegister(srcOpnd, fromType); + MOperator mOp = 0; + switch (toType) { + case PTY_f32: { + CHECK_FATAL(fromType == PTY_f64, "unexpected cvt from type"); + mOp = MOP_xvcvtfd; + break; + } + case PTY_f64: { + CHECK_FATAL(fromType == PTY_f32, "unexpected cvt from type"); + mOp = MOP_xvcvtdf; + break; + } + default: + CHECK_FATAL(false, "unexpected cvt to type"); + } + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0)); +} + +/* + * This should be regarded only as a reference. + * + * C11 specification. + * 6.3.1.3 Signed and unsigned integers + * 1 When a value with integer type is converted to another integer + * type other than _Bool, if the value can be represented by the + * new type, it is unchanged. + * 2 Otherwise, if the new type is unsigned, the value is converted + * by repeatedly adding or subtracting one more than the maximum + * value that can be represented in the new type until the value + * is in the range of the new type.60) + * 3 Otherwise, the new type is signed and the value cannot be + * represented in it; either the result is implementation-defined + * or an implementation-defined signal is raised. + */ +void AArch64CGFunc::SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, Operand *opnd0, PrimType fromType, + PrimType toType) +{ + uint32 fsize = GetPrimTypeBitSize(fromType); + if (fromType == PTY_i128 || fromType == PTY_u128) { + fsize = k64BitSize; + } + uint32 tsize = GetPrimTypeBitSize(toType); + if (toType == PTY_i128 || toType == PTY_u128) { + tsize = k64BitSize; + } + bool isExpand = tsize > fsize; + bool is64Bit = (tsize == k64BitSize); + if ((parent != nullptr) && opnd0->IsIntImmediate() && + ((parent->GetOpCode() == OP_band) || (parent->GetOpCode() == OP_bior) || (parent->GetOpCode() == OP_bxor) || + (parent->GetOpCode() == OP_ashr) || (parent->GetOpCode() == OP_lshr) || (parent->GetOpCode() == OP_shl))) { + ImmOperand *simm = static_cast(opnd0); + DEBUG_ASSERT(simm != nullptr, "simm is nullptr in AArch64CGFunc::SelectCvtInt2Int"); + bool isSign = false; + int64 origValue = simm->GetValue(); + int64 newValue = origValue; + int64 signValue = 0; + if (!isExpand) { + /* 64--->32 */ + if (fsize > tsize) { + if (IsSignedInteger(toType)) { + if (origValue < 0) { + signValue = static_cast(0xFFFFFFFFFFFFFFFFLL & (1ULL << static_cast(tsize))); + } + newValue = static_cast( + (static_cast(origValue) & ((1ULL << static_cast(tsize)) - 1u)) | + static_cast(signValue)); + } else { + newValue = static_cast(origValue) & ((1ULL << static_cast(tsize)) - 1u); + } + } + } + if (IsSignedInteger(toType)) { + isSign = true; + } + resOpnd = &static_cast(CreateImmOperand(newValue, GetPrimTypeSize(toType) * kBitsPerByte, isSign)); + return; + } + if (isExpand) { /* Expansion */ + /* if cvt expr's parent is add,and,xor and some other,we can use the imm version */ + PrimType primType = ((fsize == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64) + : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32)); + opnd0 = &LoadIntoRegister(*opnd0, primType); + + if (IsSignedInteger(fromType)) { + DEBUG_ASSERT((is64Bit || (fsize == k8BitSize || fsize == k16BitSize)), "incorrect from size"); + + MOperator mOp = + (is64Bit ? ((fsize == k8BitSize) ? MOP_xsxtb64 : ((fsize == k16BitSize) ? MOP_xsxth64 : MOP_xsxtw64)) + : ((fsize == k8BitSize) ? MOP_xsxtb32 : MOP_xsxth32)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0)); + } else { + /* Unsigned */ + if (is64Bit) { + if (fsize == k8BitSize) { + ImmOperand &immOpnd = CreateImmOperand(0xff, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, *resOpnd, *opnd0, immOpnd)); + } else if (fsize == k16BitSize) { + ImmOperand &immOpnd = CreateImmOperand(0xffff, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, *resOpnd, *opnd0, immOpnd)); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuxtw64, *resOpnd, *opnd0)); + } + } else { + DEBUG_ASSERT(((fsize == k8BitSize) || (fsize == k16BitSize)), "incorrect from size"); + if (fsize == k8BitSize) { + static_cast(opnd0)->SetValidBitsNum(k8BitSize); + static_cast(resOpnd)->SetValidBitsNum(k8BitSize); + } + if (fromType == PTY_u1) { + static_cast(opnd0)->SetValidBitsNum(1); + static_cast(resOpnd)->SetValidBitsNum(1); + } + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn((fsize == k8BitSize) ? MOP_xuxtb32 : MOP_xuxth32, *resOpnd, *opnd0)); + } + } + } else { /* Same size or truncate */ +#ifdef CNV_OPTIMIZE + /* + * No code needed for aarch64 with same reg. + * Just update regno. + */ + RegOperand *reg = static_cast(resOpnd); + reg->regNo = static_cast(opnd0)->regNo; +#else + /* + * This is not really needed if opnd0 is result from a load. + * Hopefully the FE will get rid of the redundant conversions for loads. + */ + PrimType primType = ((fsize == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64) + : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32)); + opnd0 = &LoadIntoRegister(*opnd0, primType); + + if (fsize > tsize) { + if (tsize == k8BitSize) { + MOperator mOp = IsSignedInteger(toType) ? MOP_xsxtb32 : MOP_xuxtb32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0)); + } else if (tsize == k16BitSize) { + MOperator mOp = IsSignedInteger(toType) ? MOP_xsxth32 : MOP_xuxth32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0)); + } else { + MOperator mOp = IsSignedInteger(toType) ? MOP_xsbfxrri6i6 : MOP_xubfxrri6i6; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0, + CreateImmOperand(0, k8BitSize, false), + CreateImmOperand(tsize, k8BitSize, false))); + } + } else { + /* same size, so resOpnd can be set */ + if ((mirModule.IsJavaModule()) || (IsSignedInteger(fromType) == IsSignedInteger(toType)) || + (GetPrimTypeSize(toType) >= k4BitSize)) { + resOpnd = opnd0; + } else if (IsUnsignedInteger(toType)) { + MOperator mop; + switch (toType) { + case PTY_u8: + mop = MOP_xuxtb32; + break; + case PTY_u16: + mop = MOP_xuxth32; + break; + default: + CHECK_FATAL(0, "Unhandled unsigned convert"); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, *resOpnd, *opnd0)); + } else { + /* signed target */ + uint32 size = GetPrimTypeSize(toType); + MOperator mop; + switch (toType) { + case PTY_i8: + mop = (size > k4BitSize) ? MOP_xsxtb64 : MOP_xsxtb32; + break; + case PTY_i16: + mop = (size > k4BitSize) ? MOP_xsxth64 : MOP_xsxth32; + break; + default: + CHECK_FATAL(0, "Unhandled unsigned convert"); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, *resOpnd, *opnd0)); + } + } +#endif + } +} + +Operand *AArch64CGFunc::SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0) +{ + PrimType fromType = node.FromType(); + PrimType toType = node.GetPrimType(); + if (fromType == toType) { + return &opnd0; /* noop */ + } + Operand *resOpnd = &GetOrCreateResOperand(parent, toType); + if (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType)) { + SelectCvtInt2Float(*resOpnd, opnd0, toType, fromType); + } else if (IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) { + SelectCvtFloat2Int(*resOpnd, opnd0, toType, fromType); + } else if (IsPrimitiveInteger(fromType) && IsPrimitiveInteger(toType)) { + SelectCvtInt2Int(&parent, resOpnd, &opnd0, fromType, toType); + } else if (IsPrimitiveVector(toType) || IsPrimitiveVector(fromType)) { + CHECK_FATAL(IsPrimitiveVector(toType) && IsPrimitiveVector(fromType), "Invalid vector cvt operands"); + SelectVectorCvt(resOpnd, toType, &opnd0, fromType); + } else { /* both are float type */ + SelectCvtFloat2Float(*resOpnd, opnd0, fromType, toType); + } + return resOpnd; +} + +Operand *AArch64CGFunc::SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) +{ + PrimType ftype = node.FromType(); + bool is64Bits = (GetPrimTypeBitSize(node.GetPrimType()) == k64BitSize); + PrimType itype = (is64Bits) ? (IsSignedInteger(node.GetPrimType()) ? PTY_i64 : PTY_u64) + : (IsSignedInteger(node.GetPrimType()) ? PTY_i32 : PTY_u32); /* promoted type */ + RegOperand &resOpnd = GetOrCreateResOperand(parent, itype); + SelectCvtFloat2Int(resOpnd, opnd0, itype, ftype); + return &resOpnd; +} + +void AArch64CGFunc::SelectSelect(Operand &resOpnd, Operand &condOpnd, Operand &trueOpnd, Operand &falseOpnd, + PrimType dtype, PrimType ctype, bool hasCompare, ConditionCode cc) +{ + DEBUG_ASSERT(&resOpnd != &condOpnd, "resOpnd cannot be the same as condOpnd"); + bool isIntType = IsPrimitiveInteger(dtype); + DEBUG_ASSERT((IsPrimitiveInteger(dtype) || IsPrimitiveFloat(dtype)), "unknown type for select"); + // making condOpnd and cmpInsn closer will provide more opportunity for opt + Operand &newTrueOpnd = LoadIntoRegister(trueOpnd, dtype); + Operand &newFalseOpnd = LoadIntoRegister(falseOpnd, dtype); + Operand &newCondOpnd = LoadIntoRegister(condOpnd, ctype); + if (hasCompare) { + SelectAArch64Cmp(newCondOpnd, CreateImmOperand(0, ctype, false), true, GetPrimTypeBitSize(ctype)); + cc = CC_NE; + } + Operand &newResOpnd = LoadIntoRegister(resOpnd, dtype); + SelectAArch64Select(newResOpnd, newTrueOpnd, newFalseOpnd, GetCondOperand(cc), isIntType, + GetPrimTypeBitSize(dtype)); +} + +Operand *AArch64CGFunc::SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent, bool hasCompare) +{ + PrimType dtype = expr.GetPrimType(); + PrimType ctype = expr.Opnd(0)->GetPrimType(); + + ConditionCode cc = CC_NE; + Opcode opcode = expr.Opnd(0)->GetOpCode(); + PrimType cmpType = static_cast(expr.Opnd(0))->GetOpndType(); + bool isFloat = false; + bool unsignedIntegerComparison = false; + if (!IsPrimitiveVector(cmpType)) { + isFloat = IsPrimitiveFloat(cmpType); + unsignedIntegerComparison = !isFloat && !IsSignedInteger(cmpType); + } else { + isFloat = IsPrimitiveVectorFloat(cmpType); + unsignedIntegerComparison = !isFloat && IsPrimitiveUnSignedVector(cmpType); + } + switch (opcode) { + case OP_eq: + cc = CC_EQ; + break; + case OP_ne: + cc = CC_NE; + break; + case OP_le: + cc = unsignedIntegerComparison ? CC_LS : CC_LE; + break; + case OP_ge: + cc = unsignedIntegerComparison ? CC_HS : CC_GE; + break; + case OP_gt: + cc = unsignedIntegerComparison ? CC_HI : CC_GT; + break; + case OP_lt: + cc = unsignedIntegerComparison ? CC_LO : CC_LT; + break; + default: + hasCompare = true; + break; + } + if (!IsPrimitiveVector(dtype)) { + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + SelectSelect(resOpnd, cond, trueOpnd, falseOpnd, dtype, ctype, hasCompare, cc); + return &resOpnd; + } else { + return SelectVectorSelect(cond, dtype, trueOpnd, falseOpnd); + } +} + +/* + * syntax: select (, , ) + * must be of integer type. + * and must be of the type given by . + * If is not 0, return . Otherwise, return . + */ +void AArch64CGFunc::SelectAArch64Select(Operand &dest, Operand &o0, Operand &o1, CondOperand &cond, bool isIntType, + uint32 dsize) +{ + uint32 mOpCode = + isIntType ? ((dsize == k64BitSize) ? MOP_xcselrrrc : MOP_wcselrrrc) + : ((dsize == k64BitSize) ? MOP_dcselrrrc : ((dsize == k32BitSize) ? MOP_scselrrrc : MOP_hcselrrrc)); + Operand &rflag = GetOrCreateRflag(); + if (o1.IsImmediate()) { + uint32 movOp = (dsize == k64BitSize ? MOP_xmovri64 : MOP_wmovri32); + RegOperand &movDest = + CreateVirtualRegisterOperand(NewVReg(kRegTyInt, (dsize == k64BitSize) ? k8ByteSize : k4ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(movOp, movDest, o1)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, dest, o0, movDest, cond, rflag)); + return; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, dest, o0, o1, cond, rflag)); +} + +void AArch64CGFunc::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) +{ + const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable(); + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)); + /* + * we store 8-byte displacement ( jump_label - offset_table_address ) + * in the table. Refer to AArch64Emit::Emit() in aarch64emit.cpp + */ + std::vector sizeArray; + sizeArray.emplace_back(switchTable.size()); + MIRArrayType *arrayType = memPool->New(etype->GetTypeIndex(), sizeArray); + MIRAggConst *arrayConst = memPool->New(mirModule, *arrayType); + for (const auto &itPair : switchTable) { + LabelIdx labelIdx = itPair.second; + GetCurBB()->PushBackRangeGotoLabel(labelIdx); + MIRConst *mirConst = memPool->New(labelIdx, GetFunction().GetPuidx(), *etype); + arrayConst->AddItem(mirConst, 0); + } + + MIRSymbol *lblSt = GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + lblSt->SetStorageClass(kScFstatic); + lblSt->SetSKind(kStConst); + lblSt->SetTyIdx(arrayType->GetTypeIndex()); + lblSt->SetKonst(arrayConst); + std::string lblStr(".LB_"); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(GetFunction().GetStIdx().Idx()); + uint32 labelIdxTmp = GetLabelIdx(); + lblStr.append(funcSt->GetName()).append(std::to_string(labelIdxTmp++)); + SetLabelIdx(labelIdxTmp); + lblSt->SetNameStrIdx(lblStr); + AddEmitSt(GetCurBB()->GetId(), *lblSt); + + PrimType itype = rangeGotoNode.Opnd(0)->GetPrimType(); + Operand &opnd0 = LoadIntoRegister(srcOpnd, itype); + + regno_t vRegNO = NewVReg(kRegTyInt, 8u); + RegOperand *addOpnd = &CreateVirtualRegisterOperand(vRegNO); + + int32 minIdx = switchTable[0].first; + SelectAdd(*addOpnd, opnd0, + CreateImmOperand(-static_cast(minIdx) - static_cast(rangeGotoNode.GetTagOffset()), + GetPrimTypeBitSize(itype), true), + itype); + + /* contains the index */ + if (addOpnd->GetSize() != GetPrimTypeBitSize(PTY_u64)) { + addOpnd = static_cast(&SelectCopy(*addOpnd, PTY_u64, PTY_u64)); + } + + RegOperand &baseOpnd = CreateRegisterOperandOfType(PTY_u64); + StImmOperand &stOpnd = CreateStImmOperand(*lblSt, 0, 0); + + /* load the address of the switch table */ + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, baseOpnd, stOpnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, baseOpnd, baseOpnd, stOpnd)); + + /* load the displacement into a register by accessing memory at base + index*8 */ + Operand *disp = CreateMemOperand(MemOperand::kAddrModeBOrX, k64BitSize, baseOpnd, *addOpnd, k8BitShift); + RegOperand &tgt = CreateRegisterOperandOfType(PTY_a64); + SelectAdd(tgt, baseOpnd, *disp, PTY_u64); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xbr, tgt)); +} + +Operand *AArch64CGFunc::SelectLazyLoad(Operand &opnd0, PrimType primType) +{ + DEBUG_ASSERT(opnd0.IsRegister(), "wrong type."); + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_lazy_ldr, resOpnd, opnd0)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectLazyLoadStatic(MIRSymbol &st, int64 offset, PrimType primType) +{ + StImmOperand &srcOpnd = CreateStImmOperand(st, offset, 0); + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_lazy_ldr_static, resOpnd, srcOpnd)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimType primType) +{ + StImmOperand &srcOpnd = CreateStImmOperand(st, offset, 0); + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_arrayclass_cache_ldr, resOpnd, srcOpnd)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectAlloca(UnaryNode &node, Operand &opnd0) +{ + if (!CGOptions::IsArm64ilp32()) { + DEBUG_ASSERT((node.GetPrimType() == PTY_a64), "wrong type"); + } + if (GetCG()->IsLmbc()) { + SetHasVLAOrAlloca(true); + } + PrimType stype = node.Opnd(0)->GetPrimType(); + Operand *resOpnd = &opnd0; + if (GetPrimTypeBitSize(stype) < GetPrimTypeBitSize(PTY_u64)) { + resOpnd = &CreateRegisterOperandOfType(PTY_u64); + SelectCvtInt2Int(nullptr, resOpnd, &opnd0, stype, PTY_u64); + } + + RegOperand &aliOp = CreateRegisterOperandOfType(PTY_u64); + + SelectAdd(aliOp, *resOpnd, CreateImmOperand(kAarch64StackPtrAlignment - 1, k64BitSize, true), PTY_u64); + Operand &shifOpnd = CreateImmOperand(__builtin_ctz(kAarch64StackPtrAlignment), k64BitSize, true); + SelectShift(aliOp, aliOp, shifOpnd, kShiftLright, PTY_u64); + SelectShift(aliOp, aliOp, shifOpnd, kShiftLeft, PTY_u64); + Operand &spOpnd = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + SelectSub(spOpnd, spOpnd, aliOp, PTY_u64); + int64 allocaOffset = GetMemlayout()->SizeOfArgsToStackPass(); + if (GetCG()->IsLmbc()) { + allocaOffset -= kDivide2 * k8ByteSize; + } + if (allocaOffset > 0) { + RegOperand &resallo = CreateRegisterOperandOfType(PTY_u64); + SelectAdd(resallo, spOpnd, CreateImmOperand(allocaOffset, k64BitSize, true), PTY_u64); + return &resallo; + } else { + return &SelectCopy(spOpnd, PTY_u64, PTY_u64); + } +} + +Operand *AArch64CGFunc::SelectMalloc(UnaryNode &node, Operand &opnd0) +{ + PrimType retType = node.GetPrimType(); + DEBUG_ASSERT((retType == PTY_a64), "wrong type"); + + std::vector opndVec; + RegOperand &resOpnd = CreateRegisterOperandOfType(retType); + opndVec.emplace_back(&resOpnd); + opndVec.emplace_back(&opnd0); + /* Use calloc to make sure allocated memory is zero-initialized */ + const std::string &funcName = "calloc"; + PrimType srcPty = PTY_u64; + if (opnd0.GetSize() <= k32BitSize) { + srcPty = PTY_u32; + } + Operand &opnd1 = CreateImmOperand(1, srcPty, false); + opndVec.emplace_back(&opnd1); + SelectLibCall(funcName, opndVec, srcPty, retType); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectGCMalloc(GCMallocNode &node) +{ + PrimType retType = node.GetPrimType(); + DEBUG_ASSERT((retType == PTY_a64), "wrong type"); + + /* Get the size and alignment of the type. */ + TyIdx tyIdx = node.GetTyIdx(); + uint64 size = GetBecommon().GetTypeSize(tyIdx); + uint8 align = RTSupport::GetRTSupportInstance().GetObjectAlignment(); + + /* Generate the call to MCC_NewObj */ + Operand &opndSize = CreateImmOperand(static_cast(size), k64BitSize, false); + Operand &opndAlign = CreateImmOperand(align, k64BitSize, false); + + RegOperand &resOpnd = CreateRegisterOperandOfType(retType); + + std::vector opndVec {&resOpnd, &opndSize, &opndAlign}; + + const std::string &funcName = "MCC_NewObj"; + SelectLibCall(funcName, opndVec, PTY_u64, retType); + + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectJarrayMalloc(JarrayMallocNode &node, Operand &opnd0) +{ + PrimType retType = node.GetPrimType(); + DEBUG_ASSERT((retType == PTY_a64), "wrong type"); + + /* Extract jarray type */ + TyIdx tyIdx = node.GetTyIdx(); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DEBUG_ASSERT(type != nullptr, "nullptr check"); + CHECK_FATAL(type->GetKind() == kTypeJArray, "expect MIRJarrayType"); + auto jaryType = static_cast(type); + uint64 fixedSize = RTSupport::GetRTSupportInstance().GetArrayContentOffset(); + uint8 align = RTSupport::GetRTSupportInstance().GetObjectAlignment(); + + MIRType *elemType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(jaryType->GetElemTyIdx()); + PrimType elemPrimType = elemType->GetPrimType(); + uint64 elemSize = GetPrimTypeSize(elemPrimType); + + /* Generate the cal to MCC_NewObj_flexible */ + Operand &opndFixedSize = CreateImmOperand(PTY_u64, static_cast(fixedSize)); + Operand &opndElemSize = CreateImmOperand(PTY_u64, static_cast(elemSize)); + + Operand *opndNElems = &opnd0; + + Operand *opndNElems64 = &static_cast(CreateRegisterOperandOfType(PTY_u64)); + SelectCvtInt2Int(nullptr, opndNElems64, opndNElems, PTY_u32, PTY_u64); + + Operand &opndAlign = CreateImmOperand(PTY_u64, align); + + RegOperand &resOpnd = CreateRegisterOperandOfType(retType); + + std::vector opndVec {&resOpnd, &opndFixedSize, &opndElemSize, opndNElems64, &opndAlign}; + + const std::string &funcName = "MCC_NewObj_flexible"; + SelectLibCall(funcName, opndVec, PTY_u64, retType); + + /* Generate the store of the object length field */ + MemOperand &opndArrayLengthField = + CreateMemOpnd(resOpnd, static_cast(RTSupport::GetRTSupportInstance().GetArrayLengthOffset()), k4BitSize); + RegOperand *regOpndNElems = &SelectCopy(*opndNElems, PTY_u32, PTY_u32); + DEBUG_ASSERT(regOpndNElems != nullptr, "null ptr check!"); + SelectCopy(opndArrayLengthField, PTY_u32, *regOpndNElems, PTY_u32); + + return &resOpnd; +} + +bool AArch64CGFunc::IsRegRematCand(const RegOperand ®) const +{ + MIRPreg *preg = GetPseudoRegFromVirtualRegNO(reg.GetRegisterNumber(), CGOptions::DoCGSSA()); + if (preg != nullptr && preg->GetOp() != OP_undef) { + if (preg->GetOp() == OP_constval && cg->GetRematLevel() >= 1) { + return true; + } else if (preg->GetOp() == OP_addrof && cg->GetRematLevel() >= 2) { + return true; + } else if (preg->GetOp() == OP_iread && cg->GetRematLevel() >= 4) { + return true; + } else { + return false; + } + } else { + return false; + } +} + +void AArch64CGFunc::ClearRegRematInfo(const RegOperand ®) const +{ + MIRPreg *preg = GetPseudoRegFromVirtualRegNO(reg.GetRegisterNumber(), CGOptions::DoCGSSA()); + if (preg != nullptr && preg->GetOp() != OP_undef) { + preg->SetOp(OP_undef); + } +} + +bool AArch64CGFunc::IsRegSameRematInfo(const RegOperand ®Dest, const RegOperand ®Src) const +{ + MIRPreg *pregDest = GetPseudoRegFromVirtualRegNO(regDest.GetRegisterNumber(), CGOptions::DoCGSSA()); + MIRPreg *pregSrc = GetPseudoRegFromVirtualRegNO(regSrc.GetRegisterNumber(), CGOptions::DoCGSSA()); + if (pregDest != nullptr && pregDest == pregSrc) { + if (pregDest->GetOp() == OP_constval && cg->GetRematLevel() >= 1) { + return true; + } else if (pregDest->GetOp() == OP_addrof && cg->GetRematLevel() >= 2) { + return true; + } else if (pregDest->GetOp() == OP_iread && cg->GetRematLevel() >= 4) { + return true; + } else { + return false; + } + } else { + return false; + } +} + +void AArch64CGFunc::ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t destNO) +{ + auto opndNum = static_cast(insn.GetOperandSize()); + for (int i = opndNum - 1; i >= 0; --i) { + Operand &opnd = insn.GetOperand(static_cast(i)); + if (opnd.IsList()) { + std::list tempRegStore; + auto &opndList = static_cast(opnd).GetOperands(); + bool needReplace = false; + for (auto it = opndList.begin(), end = opndList.end(); it != end; ++it) { + auto *regOpnd = *it; + if (regOpnd->GetRegisterNumber() == destNO) { + needReplace = true; + if (regDest.GetSize() != regSrc.GetSize()) { + regDest.SetRegisterNumber(regSrc.GetRegisterNumber()); + tempRegStore.push_back(®Dest); + } else { + tempRegStore.push_back(®Src); + } + } else { + tempRegStore.push_back(regOpnd); + } + } + if (needReplace) { + opndList.clear(); + for (auto newOpnd : tempRegStore) { + static_cast(opnd).PushOpnd(*newOpnd); + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *baseRegOpnd = memOpnd.GetBaseRegister(); + RegOperand *indexRegOpnd = memOpnd.GetIndexRegister(); + MemOperand *newMem = static_cast(memOpnd.Clone(*GetMemoryPool())); + if ((baseRegOpnd != nullptr && baseRegOpnd->GetRegisterNumber() == destNO) || + (indexRegOpnd != nullptr && indexRegOpnd->GetRegisterNumber() == destNO)) { + if (baseRegOpnd != nullptr && baseRegOpnd->GetRegisterNumber() == destNO) { + if (regDest.GetSize() != regSrc.GetSize()) { + regDest.SetRegisterNumber(regSrc.GetRegisterNumber()); + newMem->SetBaseRegister(regDest); + } else { + newMem->SetBaseRegister(regSrc); + } + } + if (indexRegOpnd != nullptr && indexRegOpnd->GetRegisterNumber() == destNO) { + if (regDest.GetSize() != regSrc.GetSize()) { + regDest.SetRegisterNumber(regSrc.GetRegisterNumber()); + newMem->SetIndexRegister(regDest); + } else { + newMem->SetIndexRegister(regSrc); + } + } + insn.SetMemOpnd(&GetOrCreateMemOpnd(*newMem)); + } + } else if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + if (regOpnd.GetRegisterNumber() == destNO) { + DEBUG_ASSERT(regOpnd.GetRegisterNumber() != kRFLAG, "both condi and reg"); + if (regDest.GetSize() != regSrc.GetSize()) { + regOpnd.SetRegisterNumber(regSrc.GetRegisterNumber()); + } else { + insn.SetOperand(static_cast(i), regSrc); + } + } + } + } +} + +void AArch64CGFunc::CleanupDeadMov(bool dumpInfo) +{ + /* clean dead mov. */ + FOR_ALL_BB(bb, this) { + FOR_BB_INSNS_SAFE(insn, bb, ninsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetMachineOpcode() == MOP_xmovrr || insn->GetMachineOpcode() == MOP_wmovrr || + insn->GetMachineOpcode() == MOP_xvmovs || insn->GetMachineOpcode() == MOP_xvmovd) { + RegOperand ®Dest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + RegOperand ®Src = static_cast(insn->GetOperand(kInsnSecondOpnd)); + if (!regSrc.IsVirtualRegister() || !regDest.IsVirtualRegister()) { + continue; + } + + if (regSrc.GetRegisterNumber() == regDest.GetRegisterNumber()) { + bb->RemoveInsn(*insn); + } else if (insn->IsPhiMovInsn() && dumpInfo) { + LogInfo::MapleLogger() << "fail to remove mov: " << regDest.GetRegisterNumber() << " <- " + << regSrc.GetRegisterNumber() << std::endl; + } + } + } + } +} + +void AArch64CGFunc::GetRealCallerSaveRegs(const Insn &insn, std::set &realSaveRegs) +{ + auto *targetOpnd = insn.GetCallTargetOperand(); + CHECK_FATAL(targetOpnd != nullptr, "target is null in AArch64Insn::IsCallToFunctionThatNeverReturns"); + if (CGOptions::DoIPARA() && targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + DEBUG_ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + for (auto preg : func->GetReferedRegs()) { + if (AArch64Abi::IsCallerSaveReg(static_cast(preg))) { + realSaveRegs.insert(preg); + } + } + return; + } + } + for (uint32 i = R0; i <= kMaxRegNum; ++i) { + if (AArch64Abi::IsCallerSaveReg(static_cast(i))) { + realSaveRegs.insert(i); + } + } +} + +RegOperand &AArch64CGFunc::GetZeroOpnd(uint32 bitLen) +{ + /* + * It is possible to have a bitLen < 32, eg stb. + * Set it to 32 if it is less than 32. + */ + if (bitLen < k32BitSize) { + bitLen = k32BitSize; + } + DEBUG_ASSERT((bitLen == k32BitSize || bitLen == k64BitSize), "illegal bit length = %d", bitLen); + return (bitLen == k32BitSize) ? GetOrCreatePhysicalRegisterOperand(RZR, k32BitSize, kRegTyInt) + : GetOrCreatePhysicalRegisterOperand(RZR, k64BitSize, kRegTyInt); +} + +bool AArch64CGFunc::IsFrameReg(const RegOperand &opnd) const +{ + if (opnd.GetRegisterNumber() == RFP) { + return true; + } else { + return false; + } +} + +bool AArch64CGFunc::IsSaveReg(const RegOperand ®, MIRType &mirType, BECommon &cgBeCommon) +{ + CCImpl &retLocator = *GetOrCreateLocator(GetCurCallConvKind()); + CCLocInfo retMechanism; + retLocator.InitReturnInfo(mirType, retMechanism); + if (retMechanism.GetRegCount() > 0) { + return reg.GetRegisterNumber() == retMechanism.GetReg0() || reg.GetRegisterNumber() == retMechanism.GetReg1() || + reg.GetRegisterNumber() == retMechanism.GetReg2() || reg.GetRegisterNumber() == retMechanism.GetReg3(); + } + return false; +} + +bool AArch64CGFunc::IsSPOrFP(const RegOperand &opnd) const +{ + const RegOperand ®Opnd = static_cast(opnd); + regno_t regNO = opnd.GetRegisterNumber(); + return (regOpnd.IsPhysicalRegister() && + (regNO == RSP || regNO == RFP || (regNO == R29 && CGOptions::UseFramePointer()))); +} + +bool AArch64CGFunc::IsReturnReg(const RegOperand &opnd) const +{ + regno_t regNO = opnd.GetRegisterNumber(); + return (regNO == R0) || (regNO == V0); +} + +/* + * This function returns true to indicate that the clean up code needs to be generated, + * otherwise it does not need. In GCOnly mode, it always returns false. + */ +bool AArch64CGFunc::NeedCleanup() +{ + if (CGOptions::IsGCOnly()) { + return false; + } + AArch64MemLayout *layout = static_cast(GetMemlayout()); + if (layout->GetSizeOfRefLocals() > 0) { + return true; + } + for (uint32 i = 0; i < GetFunction().GetFormalCount(); i++) { + TypeAttrs ta = GetFunction().GetNthParamAttr(i); + if (ta.GetAttr(ATTR_localrefvar)) { + return true; + } + } + + return false; +} + +/* + * bb must be the cleanup bb. + * this function must be invoked before register allocation. + * extended epilogue is specific for fast exception handling and is made up of + * clean up code and epilogue. + * clean up code is generated here while epilogue is generated in GeneratePrologEpilog() + */ +void AArch64CGFunc::GenerateCleanupCodeForExtEpilog(BB &bb) +{ + DEBUG_ASSERT(GetLastBB()->GetPrev()->GetFirstStmt() == GetCleanupLabel(), "must be"); + + if (NeedCleanup()) { + /* this is necessary for code insertion. */ + SetCurBB(bb); + + RegOperand ®Opnd0 = + GetOrCreatePhysicalRegisterOperand(R0, GetPointerSize() * kBitsPerByte, GetRegTyFromPrimTy(PTY_a64)); + RegOperand ®Opnd1 = + GetOrCreatePhysicalRegisterOperand(R1, GetPointerSize() * kBitsPerByte, GetRegTyFromPrimTy(PTY_a64)); + /* allocate 16 bytes to store reg0 and reg1 (each reg has 8 bytes) */ + MemOperand &frameAlloc = CreateCallFrameOperand(-16, GetPointerSize() * kBitsPerByte); + Insn &allocInsn = GetInsnBuilder()->BuildInsn(MOP_xstp, regOpnd0, regOpnd1, frameAlloc); + allocInsn.SetDoNotRemove(true); + AppendInstructionTo(allocInsn, *this); + + /* invoke MCC_CleanupLocalStackRef(). */ + HandleRCCall(false); + /* deallocate 16 bytes which used to store reg0 and reg1 */ + MemOperand &frameDealloc = CreateCallFrameOperand(16, GetPointerSize() * kBitsPerByte); + GenRetCleanup(cleanEANode, true); + Insn &deallocInsn = GetInsnBuilder()->BuildInsn(MOP_xldp, regOpnd0, regOpnd1, frameDealloc); + deallocInsn.SetDoNotRemove(true); + AppendInstructionTo(deallocInsn, *this); + /* Update cleanupbb since bb may have been splitted */ + SetCleanupBB(*GetCurBB()); + } +} + +/* + * bb must be the cleanup bb. + * this function must be invoked before register allocation. + */ +void AArch64CGFunc::GenerateCleanupCode(BB &bb) +{ + DEBUG_ASSERT(GetLastBB()->GetPrev()->GetFirstStmt() == GetCleanupLabel(), "must be"); + if (!NeedCleanup()) { + return; + } + + /* this is necessary for code insertion. */ + SetCurBB(bb); + + /* R0 is lived-in for clean-up code, save R0 before invocation */ + RegOperand &livein = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + + if (!GetCG()->GenLocalRC()) { + /* by pass local RC operations. */ + } else if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + regno_t vreg = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &backupRegOp = CreateVirtualRegisterOperand(vreg); + backupRegOp.SetRegNotBBLocal(); + SelectCopy(backupRegOp, PTY_a64, livein, PTY_a64); + + /* invoke MCC_CleanupLocalStackRef(). */ + HandleRCCall(false); + SelectCopy(livein, PTY_a64, backupRegOp, PTY_a64); + } else { + /* + * Register Allocation for O0 can not handle this case, so use a callee saved register directly. + * If yieldpoint is enabled, we use R20 instead R19. + */ + AArch64reg backupRegNO = GetCG()->GenYieldPoint() ? R20 : R19; + RegOperand &backupRegOp = + GetOrCreatePhysicalRegisterOperand(backupRegNO, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + SelectCopy(backupRegOp, PTY_a64, livein, PTY_a64); + /* invoke MCC_CleanupLocalStackRef(). */ + HandleRCCall(false); + SelectCopy(livein, PTY_a64, backupRegOp, PTY_a64); + } + + /* invoke _Unwind_Resume */ + std::string funcName("_Unwind_Resume"); + MIRSymbol *sym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + sym->SetNameStrIdx(funcName); + sym->SetStorageClass(kScText); + sym->SetSKind(kStFunc); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + srcOpnds->PushOpnd(livein); + AppendCall(*sym, *srcOpnds); + /* + * this instruction is unreachable, but we need it as the return address of previous + * "bl _Unwind_Resume" for stack unwinding. + */ + Insn &nop = GetInsnBuilder()->BuildInsn(MOP_xblr, livein, *srcOpnds); + GetCurBB()->AppendInsn(nop); + GetCurBB()->SetHasCall(); + + /* Update cleanupbb since bb may have been splitted */ + SetCleanupBB(*GetCurBB()); +} + +uint32 AArch64CGFunc::FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) +{ + CHECK_FATAL(GetFunction().GetAttr(FUNCATTR_ccall), "only c calling convention support here"); + AArch64CallConvImpl parmlocator(GetBecommon()); + return parmlocator.FloatParamRegRequired(*structType, fpSize); +} + +/* + * Map param registers to formals. For small structs passed in param registers, + * create a move to vreg since lmbc IR does not create a regassign for them. + */ +void AArch64CGFunc::AssignLmbcFormalParams() +{ + PrimType primType; + uint32 offset; + regno_t intReg = R0; + regno_t fpReg = V0; + for (auto param : GetLmbcParamVec()) { + primType = param->GetPrimType(); + offset = param->GetOffset(); + if (param->IsReturn()) { + param->SetRegNO(R8); + } else if (IsPrimitiveInteger(primType)) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + if (!param->HasRegassign()) { + uint32 bytelen = GetPrimTypeSize(primType); + uint32 bitlen = bytelen * kBitsPerByte; + MemOperand *mOpnd = GenLmbcFpMemOperand(static_cast(offset), bytelen); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(AArch64reg(intReg), bitlen, kRegTyInt); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetInsnBuilder()->BuildInsn(mOp, src, *mOpnd); + GetCurBB()->AppendInsn(store); + } + intReg++; + } + } else if (IsPrimitiveFloat(primType)) { + if (fpReg > V7) { + param->SetRegNO(0); + } else { + param->SetRegNO(fpReg); + if (!param->HasRegassign()) { + uint32 bytelen = GetPrimTypeSize(primType); + uint32 bitlen = bytelen * kBitsPerByte; + MemOperand *mOpnd = GenLmbcFpMemOperand(static_cast(offset), bytelen); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(AArch64reg(fpReg), bitlen, kRegTyFloat); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetInsnBuilder()->BuildInsn(mOp, src, *mOpnd); + GetCurBB()->AppendInsn(store); + } + fpReg++; + } + } else if (primType == PTY_agg) { + if (param->IsPureFloat()) { + uint32 numFpRegs = param->GetNumRegs(); + if ((fpReg + numFpRegs - kOneRegister) > V7) { + param->SetRegNO(0); + } else { + param->SetRegNO(fpReg); + param->SetNumRegs(numFpRegs); + fpReg += numFpRegs; + } + } else if (param->GetSize() > k16ByteSize) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + param->SetIsOnStack(); + param->SetOnStackOffset(((intReg - R0 + fpReg) - V0) * k8ByteSize); + uint32 bytelen = GetPrimTypeSize(PTY_a64); + uint32 bitlen = bytelen * kBitsPerByte; + MemOperand *mOpnd = GenLmbcFpMemOperand(static_cast(param->GetOnStackOffset()), bytelen); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(AArch64reg(intReg), bitlen, kRegTyInt); + MOperator mOp = PickStInsn(bitlen, PTY_a64); + Insn &store = GetInsnBuilder()->BuildInsn(mOp, src, *mOpnd); + GetCurBB()->AppendInsn(store); + intReg++; + } + } else if (param->GetSize() <= k8ByteSize) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + param->SetNumRegs(kOneRegister); + intReg++; + } + } else { + /* size > 8 && size <= 16 */ + if ((intReg + kOneRegister) > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + param->SetNumRegs(kTwoRegister); + intReg += kTwoRegister; + } + } + if (param->GetRegNO() != 0) { + for (uint32 i = 0; i < param->GetNumRegs(); ++i) { + PrimType pType = PTY_i64; + RegType rType = kRegTyInt; + uint32 rSize = k8ByteSize; + if (param->IsPureFloat()) { + rType = kRegTyFloat; + if (param->GetFpSize() <= k4ByteSize) { + pType = PTY_f32; + rSize = k4ByteSize; + } else { + pType = PTY_f64; + } + } + regno_t vreg = NewVReg(rType, rSize); + RegOperand &dest = GetOrCreateVirtualRegisterOperand(vreg); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(static_cast(param->GetRegNO() + i), + rSize * kBitsPerByte, rType); + SelectCopy(dest, pType, src, pType); + if (param->GetVregNO() == 0) { + param->SetVregNO(vreg); + } + Operand *memOpd = &CreateMemOpnd(RFP, offset + (i * rSize), rSize); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(PickStInsn(rSize * kBitsPerByte, pType), dest, *memOpd)); + } + } + } else { + CHECK_FATAL(false, "lmbc formal primtype not handled"); + } + } +} + +void AArch64CGFunc::LmbcGenSaveSpForAlloca() +{ + if (GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc || !HasVLAOrAlloca()) { + return; + } + Operand &spOpnd = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + RegOperand &spSaveOpnd = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, GetPointerSize())); + Insn &save = GetInsnBuilder()->BuildInsn(MOP_xmovrr, spSaveOpnd, spOpnd); + GetFirstBB()->AppendInsn(save); + for (auto *retBB : GetExitBBsVec()) { + Insn &restore = GetInsnBuilder()->BuildInsn(MOP_xmovrr, spOpnd, spSaveOpnd); + retBB->AppendInsn(restore); + restore.SetFrameDef(true); + } +} + +/* if offset < 0, allocation; otherwise, deallocation */ +MemOperand &AArch64CGFunc::CreateCallFrameOperand(int32 offset, uint32 size) +{ + MemOperand *memOpnd = CreateStackMemOpnd(RSP, offset, size); + memOpnd->SetIndexOpt((offset < 0) ? MemOperand::kPreIndex : MemOperand::kPostIndex); + return *memOpnd; +} + +BitShiftOperand *AArch64CGFunc::GetLogicalShiftLeftOperand(uint32 shiftAmount, bool is64bits) const +{ + /* num(0, 16, 32, 48) >> 4 is num1(0, 1, 2, 3), num1 & (~3) == 0 */ + DEBUG_ASSERT((!shiftAmount || ((shiftAmount >> 4) & ~static_cast(3)) == 0), + "shift amount should be one of 0, 16, 32, 48"); + /* movkLslOperands[4]~movkLslOperands[7] is for 64 bits */ + return &movkLslOperands[(shiftAmount >> 4) + (is64bits ? 4 : 0)]; +} + +AArch64CGFunc::MovkLslOperandArray AArch64CGFunc::movkLslOperands = { + BitShiftOperand(BitShiftOperand::kLSL, 0, 4), + BitShiftOperand(BitShiftOperand::kLSL, 16, 4), + BitShiftOperand(BitShiftOperand::kLSL, static_cast(-1), 0), /* invalid entry */ + BitShiftOperand(BitShiftOperand::kLSL, static_cast(-1), 0), /* invalid entry */ + BitShiftOperand(BitShiftOperand::kLSL, 0, 6), + BitShiftOperand(BitShiftOperand::kLSL, 16, 6), + BitShiftOperand(BitShiftOperand::kLSL, 32, 6), + BitShiftOperand(BitShiftOperand::kLSL, 48, 6), +}; + +MemOperand &AArch64CGFunc::CreateStkTopOpnd(uint32 offset, uint32 size) +{ + AArch64reg reg; + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + reg = RSP; + } else { + reg = RFP; + } + MemOperand *memOp = CreateStackMemOpnd(reg, static_cast(offset), size); + return *memOp; +} + +MemOperand *AArch64CGFunc::CreateStackMemOpnd(regno_t preg, int32 offset, uint32 size) +{ + auto *memOp = + memPool->New(memPool->New(preg, k64BitSize, kRegTyInt), + &CreateOfstOpnd(static_cast(static_cast(offset)), k32BitSize), size); + if (preg == RFP || preg == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand &base, + RegOperand *index, ImmOperand *offset, const MIRSymbol *symbol) const +{ + auto *memOp = memPool->New(mode, size, base, index, offset, symbol); + if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand &base, + RegOperand &index, ImmOperand *offset, const MIRSymbol &symbol, + bool noExtend) +{ + auto *memOp = memPool->New(mode, size, base, index, offset, symbol, noExtend); + if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, RegOperand &base, + RegOperand &indexOpnd, uint32 shift, bool isSigned) const +{ + auto *memOp = memPool->New(mode, dSize, base, indexOpnd, shift, isSigned); + if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, const MIRSymbol &sym) +{ + auto *memOp = memPool->New(mode, dSize, sym); + return memOp; +} + +void AArch64CGFunc::GenSaveMethodInfoCode(BB &bb) +{ + if (GetCG()->UseFastUnwind()) { + BB *formerCurBB = GetCurBB(); + GetDummyBB()->ClearInsns(); + SetCurBB(*GetDummyBB()); + /* + * FUNCATTR_bridge for function: Ljava_2Flang_2FString_3B_7CcompareTo_7C_28Ljava_2Flang_2FObject_3B_29I, to + * exclude this funciton this function is a bridge function generated for Java Genetic + */ + if ((GetFunction().GetAttr(FUNCATTR_native) || GetFunction().GetAttr(FUNCATTR_fast_native)) && + !GetFunction().GetAttr(FUNCATTR_critical_native) && !GetFunction().GetAttr(FUNCATTR_bridge)) { + RegOperand &fpReg = GetOrCreatePhysicalRegisterOperand(RFP, GetPointerSize() * kBitsPerByte, kRegTyInt); + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + srcOpnds->PushOpnd(parmRegOpnd1); + Operand &immOpnd = CreateImmOperand(0, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadri64, parmRegOpnd1, immOpnd)); + RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); + srcOpnds->PushOpnd(parmRegOpnd2); + SelectCopy(parmRegOpnd2, PTY_a64, fpReg, PTY_a64); + + MIRSymbol *sym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + std::string funcName("MCC_SetRiskyUnwindContext"); + sym->SetNameStrIdx(funcName); + + sym->SetStorageClass(kScText); + sym->SetSKind(kStFunc); + AppendCall(*sym, *srcOpnds); + bb.SetHasCall(); + } + + bb.InsertAtBeginning(*GetDummyBB()); + SetCurBB(*formerCurBB); + } +} + +bool AArch64CGFunc::HasStackLoadStore() +{ + FOR_ALL_BB(bb, this) { + FOR_BB_INSNS(insn, bb) { + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + + if ((base != nullptr) && base->IsRegister()) { + RegOperand *regOpnd = static_cast(base); + RegType regType = regOpnd->GetRegisterType(); + uint32 regNO = regOpnd->GetRegisterNumber(); + if (((regType != kRegTyCc) && ((regNO == RFP) || (regNO == RSP))) || (regType == kRegTyVary)) { + return true; + } + } + } + } + } + } + return false; +} + +void AArch64CGFunc::GenerateYieldpoint(BB &bb) +{ + /* ldr wzr, [RYP] # RYP hold address of the polling page. */ + auto &wzr = GetZeroOpnd(k32BitSize); + auto &pollingPage = CreateMemOpnd(RYP, 0, k32BitSize); + auto &yieldPoint = GetInsnBuilder()->BuildInsn(MOP_wldr, wzr, pollingPage); + if (GetCG()->GenerateVerboseCG()) { + yieldPoint.SetComment("yieldpoint"); + } + bb.AppendInsn(yieldPoint); +} + +Operand &AArch64CGFunc::ProcessReturnReg(PrimType primType, int32 sReg) +{ + return GetTargetRetOperand(primType, sReg); +} + +Operand &AArch64CGFunc::GetTargetRetOperand(PrimType primType, int32 sReg) +{ + uint32 bitSize = GetPrimTypeBitSize(primType) < k32BitSize ? k32BitSize : GetPrimTypeBitSize(primType); + AArch64reg pReg; + if (sReg < 0) { + return GetOrCreatePhysicalRegisterOperand(IsPrimitiveFloat(primType) || (IsPrimitiveVector(primType)) ? S0 : R0, + bitSize, GetRegTyFromPrimTy(primType)); + } else { + switch (sReg) { + case kSregRetval0: + pReg = IsPrimitiveFloat(primType) || (IsPrimitiveVector(primType)) ? S0 : R0; + break; + case kSregRetval1: + pReg = R1; + break; + default: + pReg = RLAST_INT_REG; + DEBUG_ASSERT(0, "GetTargetRetOperand: NYI"); + } + return GetOrCreatePhysicalRegisterOperand(pReg, bitSize, GetRegTyFromPrimTy(primType)); + } +} + +RegOperand &AArch64CGFunc::CreateRegisterOperandOfType(PrimType primType) +{ + RegType regType = GetRegTyFromPrimTy(primType); + uint32 byteLength = GetPrimTypeSize(primType); + return CreateRegisterOperandOfType(regType, byteLength); +} + +RegOperand &AArch64CGFunc::CreateRegisterOperandOfType(RegType regty, uint32 byteLen) +{ + /* BUG: if half-precision floating point operations are supported? */ + /* AArch64 has 32-bit and 64-bit registers only */ + if (byteLen < k4ByteSize) { + byteLen = k4ByteSize; + } + regno_t vRegNO = NewVReg(regty, byteLen); + return CreateVirtualRegisterOperand(vRegNO); +} + +RegOperand &AArch64CGFunc::CreateRflagOperand() +{ + /* AArch64 has Status register that is 32-bit wide. */ + regno_t vRegNO = NewVRflag(); + return CreateVirtualRegisterOperand(vRegNO); +} + +void AArch64CGFunc::MergeReturn() +{ + DEBUG_ASSERT(GetCurBB()->GetPrev()->GetFirstStmt() == GetCleanupLabel(), "must be"); + + uint32 exitBBSize = GetExitBBsVec().size(); + if (exitBBSize == 0) { + return; + } + if ((exitBBSize == 1) && GetExitBB(0) == GetCurBB()) { + return; + } + if (exitBBSize == 1) { + BB *onlyExitBB = GetExitBB(0); + BB *onlyExitBBNext = onlyExitBB->GetNext(); + StmtNode *stmt = onlyExitBBNext->GetFirstStmt(); + /* only deal with the return_BB in the middle */ + if (stmt != GetCleanupLabel()) { + LabelIdx labidx = CreateLabel(); + BB *retBB = CreateNewBB(labidx, onlyExitBB->IsUnreachable(), BB::kBBReturn, onlyExitBB->GetFrequency()); + onlyExitBB->AppendBB(*retBB); + /* modify the original return BB. */ + DEBUG_ASSERT(onlyExitBB->GetKind() == BB::kBBReturn, "Error: suppose to merge multi return bb"); + onlyExitBB->SetKind(BB::kBBFallthru); + + GetExitBBsVec().pop_back(); + GetExitBBsVec().emplace_back(retBB); + return; + } + } + + LabelIdx labidx = CreateLabel(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(labidx); + uint32 freq = 0; + for (auto *tmpBB : GetExitBBsVec()) { + DEBUG_ASSERT(tmpBB->GetKind() == BB::kBBReturn, "Error: suppose to merge multi return bb"); + tmpBB->SetKind(BB::kBBGoto); + tmpBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); + freq += tmpBB->GetFrequency(); + } + BB *retBB = CreateNewBB(labidx, false, BB::kBBReturn, freq); + GetCleanupBB()->PrependBB(*retBB); + + GetExitBBsVec().clear(); + GetExitBBsVec().emplace_back(retBB); +} + +void AArch64CGFunc::HandleRetCleanup(NaryStmtNode &retNode) +{ + if (!GetCG()->GenLocalRC()) { + /* handle local rc is disabled. */ + return; + } + + Opcode ops[11] = {OP_label, OP_goto, OP_brfalse, OP_brtrue, OP_return, OP_call, + OP_icall, OP_rangegoto, OP_catch, OP_try, OP_endtry}; + std::set branchOp(ops, ops + 11); + + /* get cleanup intrinsic */ + bool found = false; + StmtNode *cleanupNode = retNode.GetPrev(); + cleanEANode = nullptr; + while (cleanupNode != nullptr) { + if (branchOp.find(cleanupNode->GetOpCode()) != branchOp.end()) { + if (cleanupNode->GetOpCode() == OP_call) { + CallNode *callNode = static_cast(cleanupNode); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + MIRSymbol *fsym = GetFunction().GetLocalOrGlobalSymbol(fn->GetStIdx(), false); + if ((fsym->GetName() == "MCC_DecRef_NaiveRCFast") || (fsym->GetName() == "MCC_IncRef_NaiveRCFast") || + (fsym->GetName() == "MCC_IncDecRef_NaiveRCFast") || (fsym->GetName() == "MCC_LoadRefStatic") || + (fsym->GetName() == "MCC_LoadRefField") || (fsym->GetName() == "MCC_LoadReferentField") || + (fsym->GetName() == "MCC_LoadRefField_NaiveRCFast") || + (fsym->GetName() == "MCC_LoadVolatileField") || + (fsym->GetName() == "MCC_LoadVolatileStaticField") || (fsym->GetName() == "MCC_LoadWeakField") || + (fsym->GetName() == "MCC_CheckObjMem")) { + cleanupNode = cleanupNode->GetPrev(); + continue; + } else { + break; + } + } else { + break; + } + } + + if (cleanupNode->GetOpCode() == OP_intrinsiccall) { + IntrinsiccallNode *tempNode = static_cast(cleanupNode); + if ((tempNode->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS) || + (tempNode->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS_SKIP)) { + GenRetCleanup(tempNode); + if (cleanEANode != nullptr) { + GenRetCleanup(cleanEANode, true); + } + found = true; + break; + } + if (tempNode->GetIntrinsic() == INTRN_MPL_CLEANUP_NORETESCOBJS) { + cleanEANode = tempNode; + } + } + cleanupNode = cleanupNode->GetPrev(); + } + + if (!found) { + MIRSymbol *retRef = nullptr; + if (retNode.NumOpnds() != 0) { + retRef = GetRetRefSymbol(*static_cast(retNode).Opnd(0)); + } + HandleRCCall(false, retRef); + } +} + +bool AArch64CGFunc::GenRetCleanup(const IntrinsiccallNode *cleanupNode, bool forEA) +{ +#undef CC_DEBUG_INFO + +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "==============" << GetFunction().GetName() << "==============" << '\n'; +#endif + + if (cleanupNode == nullptr) { + return false; + } + + int32 minByteOffset = INT_MAX; + int32 maxByteOffset = 0; + + int32 skipIndex = -1; + MIRSymbol *skipSym = nullptr; + size_t refSymNum = 0; + if (cleanupNode->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS) { + refSymNum = cleanupNode->GetNopndSize(); + if (refSymNum < 1) { + return true; + } + } else if (cleanupNode->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS_SKIP) { + refSymNum = cleanupNode->GetNopndSize(); + /* refSymNum == 0, no local refvars; refSymNum == 1 and cleanup skip, so nothing to do */ + if (refSymNum < 2) { + return true; + } + BaseNode *skipExpr = cleanupNode->Opnd(refSymNum - 1); + + CHECK_FATAL(skipExpr->GetOpCode() == OP_dread, "should be dread"); + DreadNode *refNode = static_cast(skipExpr); + skipSym = GetFunction().GetLocalOrGlobalSymbol(refNode->GetStIdx()); + + refSymNum -= 1; + } else if (cleanupNode->GetIntrinsic() == INTRN_MPL_CLEANUP_NORETESCOBJS) { + refSymNum = cleanupNode->GetNopndSize(); + /* the number of operands of intrinsic call INTRN_MPL_CLEANUP_NORETESCOBJS must be more than 1 */ + if (refSymNum < 2) { + return true; + } + BaseNode *skipexpr = cleanupNode->Opnd(0); + CHECK_FATAL(skipexpr->GetOpCode() == OP_dread, "should be dread"); + DreadNode *refnode = static_cast(skipexpr); + skipSym = GetFunction().GetLocalOrGlobalSymbol(refnode->GetStIdx()); + } + + /* now compute the offset range */ + std::vector offsets; + AArch64MemLayout *memLayout = static_cast(this->GetMemlayout()); + for (size_t i = 0; i < refSymNum; ++i) { + BaseNode *argExpr = cleanupNode->Opnd(i); + CHECK_FATAL(argExpr->GetOpCode() == OP_dread, "should be dread"); + DreadNode *refNode = static_cast(argExpr); + MIRSymbol *refSymbol = GetFunction().GetLocalOrGlobalSymbol(refNode->GetStIdx()); + if (memLayout->GetSymAllocTable().size() <= refSymbol->GetStIndex()) { + ERR(kLncErr, "access memLayout->GetSymAllocTable() failed"); + return false; + } + AArch64SymbolAlloc *symLoc = + static_cast(memLayout->GetSymAllocInfo(refSymbol->GetStIndex())); + int32 tempOffset = GetBaseOffset(*symLoc); + offsets.emplace_back(tempOffset); +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "refsym " << refSymbol->GetName() << " offset " << tempOffset << '\n'; +#endif + minByteOffset = (minByteOffset > tempOffset) ? tempOffset : minByteOffset; + maxByteOffset = (maxByteOffset < tempOffset) ? tempOffset : maxByteOffset; + } + + /* get the skip offset */ + int32 skipOffset = -1; + if (skipSym != nullptr) { + AArch64SymbolAlloc *symLoc = + static_cast(memLayout->GetSymAllocInfo(skipSym->GetStIndex())); + CHECK_FATAL(GetBaseOffset(*symLoc) < std::numeric_limits::max(), "out of range"); + skipOffset = GetBaseOffset(*symLoc); + offsets.emplace_back(skipOffset); + +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "skip " << skipSym->GetName() << " offset " << skipOffset << '\n'; +#endif + + skipIndex = symLoc->GetOffset() / kOffsetAlign; + } + + /* call runtime cleanup */ + if (minByteOffset < INT_MAX) { + int32 refLocBase = memLayout->GetRefLocBaseLoc(); + uint32 refNum = memLayout->GetSizeOfRefLocals() / kOffsetAlign; + CHECK_FATAL((refLocBase + (refNum - 1) * kIntregBytelen) < std::numeric_limits::max(), "out of range"); + int32 refLocEnd = refLocBase + (refNum - 1) * kIntregBytelen; + int32 realMin = minByteOffset < refLocBase ? refLocBase : minByteOffset; + int32 realMax = maxByteOffset > refLocEnd ? refLocEnd : maxByteOffset; + if (forEA) { + std::sort(offsets.begin(), offsets.end()); + int32 prev = offsets[0]; + for (size_t i = 1; i < offsets.size(); i++) { + CHECK_FATAL((offsets[i] == prev) || ((offsets[i] - prev) == kIntregBytelen), "must be"); + prev = offsets[i]; + } + CHECK_FATAL((refLocBase - prev) == kIntregBytelen, "must be"); + realMin = minByteOffset; + realMax = maxByteOffset; + } +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << " realMin " << realMin << " realMax " << realMax << '\n'; +#endif + if (realMax < realMin) { + /* maybe there is a cleanup intrinsic bug, use CHECK_FATAL instead? */ + CHECK_FATAL(false, "must be"); + } + + /* optimization for little slot cleanup */ + if (realMax == realMin && !forEA) { + RegOperand &phyOpnd = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + Operand &stackLoc = CreateStkTopOpnd(static_cast(realMin), GetPointerSize() * kBitsPerByte); + Insn &ldrInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, stackLoc); + GetCurBB()->AppendInsn(ldrInsn); + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + srcOpnds->PushOpnd(phyOpnd); + MIRSymbol *callSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + std::string funcName("MCC_DecRef_NaiveRCFast"); + callSym->SetNameStrIdx(funcName); + callSym->SetStorageClass(kScText); + callSym->SetSKind(kStFunc); + Insn &callInsn = AppendCall(*callSym, *srcOpnds); + callInsn.SetRefSkipIdx(skipIndex); + GetCurBB()->SetHasCall(); + /* because of return stmt is often the last stmt */ + GetCurBB()->SetFrequency(frequency); + + return true; + } + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + + ImmOperand &beginOpnd = CreateImmOperand(realMin, k64BitSize, true); + regno_t vRegNO0 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg0 = CreateVirtualRegisterOperand(vRegNO0); + RegOperand &fpOpnd = GetOrCreateStackBaseRegOperand(); + SelectAdd(vReg0, fpOpnd, beginOpnd, PTY_i64); + + RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd1); + SelectCopy(parmRegOpnd1, PTY_a64, vReg0, PTY_a64); + + uint32 realRefNum = (realMax - realMin) / kOffsetAlign + 1; + + ImmOperand &countOpnd = CreateImmOperand(realRefNum, k64BitSize, true); + + RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd2); + SelectCopyImm(parmRegOpnd2, countOpnd, PTY_i64); + + MIRSymbol *funcSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + if ((skipSym != nullptr) && (skipOffset >= realMin) && (skipOffset <= realMax)) { + /* call cleanupskip */ + uint32 stOffset = (skipOffset - realMin) / kOffsetAlign; + ImmOperand &retLoc = CreateImmOperand(stOffset, k64BitSize, true); + + RegOperand &parmRegOpnd3 = GetOrCreatePhysicalRegisterOperand(R2, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd3); + SelectCopyImm(parmRegOpnd3, retLoc, PTY_i64); + + std::string funcName; + if (forEA) { + funcName = "MCC_CleanupNonRetEscObj"; + } else { + funcName = "MCC_CleanupLocalStackRefSkip_NaiveRCFast"; + } + funcSym->SetNameStrIdx(funcName); +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "num " << real_ref_num << " skip loc " << stOffset << '\n'; +#endif + } else { + /* call cleanup */ + CHECK_FATAL(!forEA, "must be"); + std::string funcName("MCC_CleanupLocalStackRef_NaiveRCFast"); + funcSym->SetNameStrIdx(funcName); +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "num " << real_ref_num << '\n'; +#endif + } + + funcSym->SetStorageClass(kScText); + funcSym->SetSKind(kStFunc); + Insn &callInsn = AppendCall(*funcSym, *srcOpnds); + callInsn.SetRefSkipIdx(skipIndex); + GetCurBB()->SetHasCall(); + GetCurBB()->SetFrequency(frequency); + } + return true; +} + +RegOperand *AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO, uint32 size, RegType kind, uint32 flg) const +{ + RegOperand *res = memPool->New(vRegNO, size, kind, flg); + return res; +} + +RegOperand &AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO) +{ + DEBUG_ASSERT((vRegOperandTable.find(vRegNO) == vRegOperandTable.end()), "already exist"); + DEBUG_ASSERT(vRegNO < vRegTable.size(), "index out of range"); + uint8 bitSize = static_cast((static_cast(vRegTable[vRegNO].GetSize())) * kBitsPerByte); + RegOperand *res = CreateVirtualRegisterOperand(vRegNO, bitSize, vRegTable.at(vRegNO).GetType()); + vRegOperandTable[vRegNO] = res; + return *res; +} + +RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(regno_t vRegNO) +{ + auto it = vRegOperandTable.find(vRegNO); + return (it != vRegOperandTable.end()) ? *(it->second) : CreateVirtualRegisterOperand(vRegNO); +} + +RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) +{ + regno_t regNO = regOpnd.GetRegisterNumber(); + auto it = vRegOperandTable.find(regNO); + if (it != vRegOperandTable.end()) { + it->second->SetSize(regOpnd.GetSize()); + it->second->SetRegisterNumber(regNO); + it->second->SetRegisterType(regOpnd.GetRegisterType()); + it->second->SetValidBitsNum(regOpnd.GetValidBitsNum()); + return *it->second; + } else { + auto *newRegOpnd = static_cast(regOpnd.Clone(*memPool)); + regno_t newRegNO = newRegOpnd->GetRegisterNumber(); + if (newRegNO >= maxRegCount) { + maxRegCount = newRegNO + kRegIncrStepLen; + vRegTable.resize(maxRegCount); + } + vRegOperandTable[newRegNO] = newRegOpnd; + VirtualRegNode *vregNode = memPool->New(newRegOpnd->GetRegisterType(), newRegOpnd->GetSize()); + vRegTable[newRegNO] = *vregNode; + vRegCount = maxRegCount; + return *newRegOpnd; + } +} + +/* + * Traverse all call insn to determine return type of it + * If the following insn is mov/str/blr and use R0/V0, it means the call insn have reture value + */ +void AArch64CGFunc::DetermineReturnTypeofCall() +{ + FOR_ALL_BB(bb, this) { + if (bb->IsUnreachable() || !bb->HasCall()) { + continue; + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsTargetInsn()) { + continue; + } + if (!insn->IsCall() || insn->GetMachineOpcode() == MOP_asm) { + continue; + } + Insn *nextInsn = insn->GetNextMachineInsn(); + if (nextInsn == nullptr) { + continue; + } + if ((nextInsn->GetMachineOpcode() != MOP_asm) && + ((nextInsn->IsMove() && nextInsn->GetOperand(kInsnSecondOpnd).IsRegister()) || nextInsn->IsStore() || + (nextInsn->IsCall() && nextInsn->GetOperand(kInsnFirstOpnd).IsRegister()))) { + auto *srcOpnd = static_cast(&nextInsn->GetOperand(kInsnFirstOpnd)); + CHECK_FATAL(srcOpnd != nullptr, "nullptr"); + if (!srcOpnd->IsPhysicalRegister()) { + continue; + } + if (srcOpnd->GetRegisterNumber() == R0) { + insn->SetRetType(Insn::kRegInt); + continue; + } + if (srcOpnd->GetRegisterNumber() == V0) { + insn->SetRetType(Insn::kRegFloat); + } + } + } + } +} + +void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) +{ + if (!GetCG()->GenLocalRC() && !begin) { + /* handle local rc is disabled. */ + return; + } + + AArch64MemLayout *memLayout = static_cast(this->GetMemlayout()); + int32 refNum = static_cast(memLayout->GetSizeOfRefLocals() / kOffsetAlign); + if (!refNum) { + if (begin) { + GenerateYieldpoint(*GetCurBB()); + yieldPointInsn = GetCurBB()->GetLastInsn(); + } + return; + } + + /* no MCC_CleanupLocalStackRefSkip when ret_ref is the only ref symbol */ + if ((refNum == 1) && (retRef != nullptr)) { + if (begin) { + GenerateYieldpoint(*GetCurBB()); + yieldPointInsn = GetCurBB()->GetLastInsn(); + } + return; + } + CHECK_FATAL(refNum < 0xFFFF, "not enough room for size."); + int32 refLocBase = memLayout->GetRefLocBaseLoc(); + CHECK_FATAL((refLocBase >= 0) && (refLocBase < 0xFFFF), "not enough room for offset."); + int32 formalRef = 0; + /* avoid store zero to formal localrefvars. */ + if (begin) { + for (uint32 i = 0; i < GetFunction().GetFormalCount(); ++i) { + if (GetFunction().GetNthParamAttr(i).GetAttr(ATTR_localrefvar)) { + refNum--; + formalRef++; + } + } + } + /* + * if the number of local refvar is less than 12, use stp or str to init local refvar + * else call function MCC_InitializeLocalStackRef to init. + */ + if (begin && (refNum <= kRefNum12) && ((refLocBase + kIntregBytelen * (refNum - 1)) < kStpLdpImm64UpperBound)) { + int32 pairNum = refNum / kDivide2; + int32 singleNum = refNum % kDivide2; + const int32 pairRefBytes = 16; /* the size of each pair of ref is 16 bytes */ + int32 ind = 0; + while (ind < pairNum) { + int32 offset = memLayout->GetRefLocBaseLoc() + kIntregBytelen * formalRef + pairRefBytes * ind; + Operand &zeroOp = GetZeroOpnd(k64BitSize); + Operand &stackLoc = CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + Insn &setInc = GetInsnBuilder()->BuildInsn(MOP_xstp, zeroOp, zeroOp, stackLoc); + GetCurBB()->AppendInsn(setInc); + ind++; + } + if (singleNum > 0) { + int32 offset = memLayout->GetRefLocBaseLoc() + kIntregBytelen * formalRef + kIntregBytelen * (refNum - 1); + Operand &zeroOp = GetZeroOpnd(k64BitSize); + Operand &stackLoc = CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + Insn &setInc = GetInsnBuilder()->BuildInsn(MOP_xstr, zeroOp, stackLoc); + GetCurBB()->AppendInsn(setInc); + } + /* Insert Yield Point just after localrefvar are initialized. */ + GenerateYieldpoint(*GetCurBB()); + yieldPointInsn = GetCurBB()->GetLastInsn(); + return; + } + + /* refNum is 1 and refvar is not returned, this refvar need to call MCC_DecRef_NaiveRCFast. */ + if ((refNum == 1) && !begin && (retRef == nullptr)) { + RegOperand &phyOpnd = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + Operand &stackLoc = + CreateStkTopOpnd(static_cast(memLayout->GetRefLocBaseLoc()), GetPointerSize() * kBitsPerByte); + Insn &ldrInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, stackLoc); + GetCurBB()->AppendInsn(ldrInsn); + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + srcOpnds->PushOpnd(phyOpnd); + MIRSymbol *callSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + std::string funcName("MCC_DecRef_NaiveRCFast"); + callSym->SetNameStrIdx(funcName); + callSym->SetStorageClass(kScText); + callSym->SetSKind(kStFunc); + + AppendCall(*callSym, *srcOpnds); + GetCurBB()->SetHasCall(); + if (frequency != 0) { + GetCurBB()->SetFrequency(frequency); + } + return; + } + + /* refNum is 2 and one of refvar is returned, only another one is needed to call MCC_DecRef_NaiveRCFast. */ + if ((refNum == 2) && !begin && retRef != nullptr) { + AArch64SymbolAlloc *symLoc = + static_cast(memLayout->GetSymAllocInfo(retRef->GetStIndex())); + int32 stOffset = symLoc->GetOffset() / kOffsetAlign; + RegOperand &phyOpnd = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + Operand *stackLoc = nullptr; + if (stOffset == 0) { + /* just have to Dec the next one. */ + stackLoc = &CreateStkTopOpnd(static_cast(memLayout->GetRefLocBaseLoc()) + kIntregBytelen, + GetPointerSize() * kBitsPerByte); + } else { + /* just have to Dec the current one. */ + stackLoc = + &CreateStkTopOpnd(static_cast(memLayout->GetRefLocBaseLoc()), GetPointerSize() * kBitsPerByte); + } + Insn &ldrInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, *stackLoc); + GetCurBB()->AppendInsn(ldrInsn); + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + srcOpnds->PushOpnd(phyOpnd); + MIRSymbol *callSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + std::string funcName("MCC_DecRef_NaiveRCFast"); + callSym->SetNameStrIdx(funcName); + callSym->SetStorageClass(kScText); + callSym->SetSKind(kStFunc); + Insn &callInsn = AppendCall(*callSym, *srcOpnds); + callInsn.SetRefSkipIdx(stOffset); + GetCurBB()->SetHasCall(); + if (frequency != 0) { + GetCurBB()->SetFrequency(frequency); + } + return; + } + + bool needSkip = false; + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + + ImmOperand *beginOpnd = + &CreateImmOperand(memLayout->GetRefLocBaseLoc() + kIntregBytelen * formalRef, k64BitSize, true); + ImmOperand *countOpnd = &CreateImmOperand(refNum, k64BitSize, true); + int32 refSkipIndex = -1; + if (!begin && retRef != nullptr) { + AArch64SymbolAlloc *symLoc = + static_cast(memLayout->GetSymAllocInfo(retRef->GetStIndex())); + int32 stOffset = symLoc->GetOffset() / kOffsetAlign; + refSkipIndex = stOffset; + if (stOffset == 0) { + /* ret_ref at begin. */ + beginOpnd = &CreateImmOperand(memLayout->GetRefLocBaseLoc() + kIntregBytelen, k64BitSize, true); + countOpnd = &CreateImmOperand(refNum - 1, k64BitSize, true); + } else if (stOffset == (refNum - 1)) { + /* ret_ref at end. */ + countOpnd = &CreateImmOperand(refNum - 1, k64BitSize, true); + } else { + needSkip = true; + } + } + + regno_t vRegNO0 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg0 = CreateVirtualRegisterOperand(vRegNO0); + RegOperand &fpOpnd = GetOrCreateStackBaseRegOperand(); + SelectAdd(vReg0, fpOpnd, *beginOpnd, PTY_i64); + + RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd1); + SelectCopy(parmRegOpnd1, PTY_a64, vReg0, PTY_a64); + + regno_t vRegNO1 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg1 = CreateVirtualRegisterOperand(vRegNO1); + SelectCopyImm(vReg1, *countOpnd, PTY_i64); + + RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd2); + SelectCopy(parmRegOpnd2, PTY_a64, vReg1, PTY_a64); + + MIRSymbol *sym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + if (begin) { + std::string funcName("MCC_InitializeLocalStackRef"); + sym->SetNameStrIdx(funcName); + CHECK_FATAL(countOpnd->GetValue() > 0, "refCount should be greater than 0."); + refCount = static_cast(countOpnd->GetValue()); + beginOffset = beginOpnd->GetValue(); + } else if (!needSkip) { + std::string funcName("MCC_CleanupLocalStackRef_NaiveRCFast"); + sym->SetNameStrIdx(funcName); + } else { + CHECK_NULL_FATAL(retRef); + if (retRef->GetStIndex() >= memLayout->GetSymAllocTable().size()) { + CHECK_FATAL(false, "index out of range in AArch64CGFunc::HandleRCCall"); + } + AArch64SymbolAlloc *symLoc = + static_cast(memLayout->GetSymAllocInfo(retRef->GetStIndex())); + int32 stOffset = symLoc->GetOffset() / kOffsetAlign; + ImmOperand &retLoc = CreateImmOperand(stOffset, k64BitSize, true); + + regno_t vRegNO2 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg2 = CreateVirtualRegisterOperand(vRegNO2); + SelectCopyImm(vReg2, retLoc, PTY_i64); + + RegOperand &parmRegOpnd3 = GetOrCreatePhysicalRegisterOperand(R2, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd3); + SelectCopy(parmRegOpnd3, PTY_a64, vReg2, PTY_a64); + + std::string funcName("MCC_CleanupLocalStackRefSkip_NaiveRCFast"); + sym->SetNameStrIdx(funcName); + } + sym->SetStorageClass(kScText); + sym->SetSKind(kStFunc); + + Insn &callInsn = AppendCall(*sym, *srcOpnds); + callInsn.SetRefSkipIdx(refSkipIndex); + if (frequency != 0) { + GetCurBB()->SetFrequency(frequency); + } + GetCurBB()->SetHasCall(); + if (begin) { + /* Insert Yield Point just after localrefvar are initialized. */ + GenerateYieldpoint(*GetCurBB()); + yieldPointInsn = GetCurBB()->GetLastInsn(); + } +} + +void AArch64CGFunc::SelectParmListDreadSmallAggregate(const MIRSymbol &sym, MIRType &structType, ListOperand &srcOpnds, + int32 offset, AArch64CallConvImpl &parmLocator, FieldID fieldID) +{ + /* + * in two param regs if possible + * If struct is <= 8 bytes, then it fits into one param reg. + * If struct is <= 16 bytes, then it fits into two param regs. + * Otherwise, it goes onto the stack. + * If the number of available param reg is less than what is + * needed to fit the entire struct into them, then the param + * reg is skipped and the struct goes onto the stack. + * Example 1. + * struct size == 8 bytes. + * param regs x0 to x6 are used. + * struct is passed in x7. + * Example 2. + * struct is 16 bytes. + * param regs x0 to x5 are used. + * struct is passed in x6 and x7. + * Example 3. + * struct is 16 bytes. + * param regs x0 to x6 are used. x7 alone is not enough to pass the struct. + * struct is passed on the stack. + * x7 is not used, as the following param will go onto the stack also. + */ + int32 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); + CCLocInfo ploc; + CHECK_FATAL(GetFunction().GetAttr(FUNCATTR_ccall), "only c calling convention support here"); + parmLocator.LocateNextParm(structType, ploc); + if (ploc.reg0 == 0) { + /* No param regs available, pass on stack. */ + /* If symSize is <= 8 bytes then use 1 reg, else 2 */ + CreateCallStructParamPassByStack(symSize, &sym, nullptr, ploc.memOffset); + } else { + /* pass by param regs. */ + RegOperand *parmOpnd0 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 0); + srcOpnds.PushOpnd(*parmOpnd0); + if (ploc.reg1) { + RegOperand *parmOpnd1 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 1); + srcOpnds.PushOpnd(*parmOpnd1); + } + if (ploc.reg2) { + RegOperand *parmOpnd2 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 2); + srcOpnds.PushOpnd(*parmOpnd2); + } + if (ploc.reg3) { + RegOperand *parmOpnd3 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 3); + srcOpnds.PushOpnd(*parmOpnd3); + } + } +} + +void AArch64CGFunc::SelectParmListIreadSmallAggregate(const IreadNode &iread, MIRType &structType, + ListOperand &srcOpnds, int32 offset, + AArch64CallConvImpl &parmLocator) +{ + int32 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); + RegOperand *addrOpnd0 = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); + RegOperand *addrOpnd1 = &LoadIntoRegister(*addrOpnd0, iread.Opnd(0)->GetPrimType()); + CCLocInfo ploc; + CHECK_FATAL(GetFunction().GetAttr(FUNCATTR_ccall), "only c calling convention support here"); + parmLocator.LocateNextParm(structType, ploc); + if (ploc.reg0 == 0) { + /* No param regs available, pass on stack. */ + CreateCallStructParamPassByStack(symSize, nullptr, addrOpnd1, ploc.memOffset); + } else { + /* pass by param regs. */ + fpParamState state = kStateUnknown; + uint32 memSize = 0; + switch (ploc.fpSize) { + case k0BitSize: + state = kNotFp; + memSize = k64BitSize; + break; + case k4BitSize: + state = kFp32Bit; + memSize = k32BitSize; + break; + case k8BitSize: + state = kFp64Bit; + memSize = k64BitSize; + break; + default: + break; + } + OfstOperand *offOpnd0 = &GetOrCreateOfstOpnd(static_cast(static_cast(offset)), k32BitSize); + MemOperand *mopnd = + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd0, nullptr); + CreateCallStructParamPassByReg(ploc.reg0, *mopnd, srcOpnds, state); + if (ploc.reg1) { + OfstOperand *offOpnd1 = &GetOrCreateOfstOpnd( + ((ploc.fpSize ? ploc.fpSize : GetPointerSize()) + static_cast(offset)), k32BitSize); + mopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd1, nullptr); + CreateCallStructParamPassByReg(ploc.reg1, *mopnd, srcOpnds, state); + } + if (ploc.reg2) { + OfstOperand *offOpnd2 = &GetOrCreateOfstOpnd( + ((ploc.fpSize ? (ploc.fpSize * k4BitShift) : GetPointerSize()) + static_cast(offset)), + k32BitSize); + mopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd2, nullptr); + CreateCallStructParamPassByReg(ploc.reg2, *mopnd, srcOpnds, state); + } + if (ploc.reg3) { + OfstOperand *offOpnd3 = &GetOrCreateOfstOpnd( + ((ploc.fpSize ? (ploc.fpSize * k8BitShift) : GetPointerSize()) + static_cast(offset)), + k32BitSize); + mopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd3, nullptr); + CreateCallStructParamPassByReg(ploc.reg3, *mopnd, srcOpnds, state); + } + } +} + +void AArch64CGFunc::SelectParmListDreadLargeAggregate(const MIRSymbol &sym, MIRType &structType, ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, + int32 fromOffset) +{ + /* + * Pass larger sized struct on stack. + * Need to copy the entire structure onto the stack. + * The pointer to the starting address of the copied struct is then + * used as the parameter for the struct. + * This pointer is passed as the next parameter. + * Example 1: + * struct is 23 bytes. + * param regs x0 to x5 are used. + * First around up 23 to 24, so 3 of 8-byte slots. + * Copy struct to a created space on the stack. + * Pointer of copied struct is passed in x6. + * Example 2: + * struct is 25 bytes. + * param regs x0 to x7 are used. + * First around up 25 to 32, so 4 of 8-byte slots. + * Copy struct to a created space on the stack. + * Pointer of copied struct is passed on stack as the 9th parameter. + */ + uint64 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); + CCLocInfo ploc; + CHECK_FATAL(GetFunction().GetAttr(FUNCATTR_ccall), "only c calling convention support here"); + parmLocator.LocateNextParm(structType, ploc); + uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); /* round up */ + /* Create the struct copies. */ + RegOperand *parmOpnd = + CreateCallStructParamCopyToStack(numMemOp, &sym, nullptr, structCopyOffset, fromOffset, ploc); + if (parmOpnd) { + srcOpnds.PushOpnd(*parmOpnd); + } + structCopyOffset += static_cast(numMemOp * GetPointerSize()); +} + +void AArch64CGFunc::SelectParmListIreadLargeAggregate(const IreadNode &iread, MIRType &structType, + ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, + int32 &structCopyOffset, int32 fromOffset) +{ + uint64 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); + RegOperand *addrOpnd0 = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); + RegOperand *addrOpnd1 = &LoadIntoRegister(*addrOpnd0, iread.Opnd(0)->GetPrimType()); + CCLocInfo ploc; + CHECK_FATAL(GetFunction().GetAttr(FUNCATTR_ccall), "only c calling convention support here"); + parmLocator.LocateNextParm(structType, ploc); + uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); /* round up */ + RegOperand *parmOpnd = + CreateCallStructParamCopyToStack(numMemOp, nullptr, addrOpnd1, structCopyOffset, fromOffset, ploc); + structCopyOffset += static_cast(numMemOp * GetPointerSize()); + if (parmOpnd) { + srcOpnds.PushOpnd(*parmOpnd); + } +} + +void AArch64CGFunc::CreateCallStructParamPassByStack(int32 symSize, const MIRSymbol *sym, RegOperand *addrOpnd, + int32 baseOffset) +{ + MemOperand *ldMopnd = nullptr; + MemOperand *stMopnd = nullptr; + uint32 numRegNeeded = (static_cast(symSize) <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (int j = 0; j < static_cast(numRegNeeded); j++) { + if (sym) { + if (CGOptions::IsArm64ilp32()) { + ldMopnd = &GetOrCreateMemOpnd(*sym, (j * static_cast(k8ByteSize)), k64BitSize); + } else { + ldMopnd = &GetOrCreateMemOpnd(*sym, (j * static_cast(GetPointerSize())), k64BitSize); + } + } else { + if (CGOptions::IsArm64ilp32()) { + ldMopnd = + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, addrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(j) * k8ByteSize, k32BitSize), nullptr); + } else { + ldMopnd = &GetOrCreateMemOpnd( + MemOperand::kAddrModeBOi, k64BitSize, addrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(j) * GetPointerSize(), k32BitSize), nullptr); + } + } + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *ldMopnd)); + if (CGOptions::IsArm64ilp32()) { + stMopnd = + &CreateMemOpnd(RSP, (static_cast(baseOffset) + (j * static_cast(k8ByteSize))), k64BitSize); + } else { + stMopnd = &CreateMemOpnd(RSP, (static_cast(baseOffset) + (j * GetPointerSize())), k64BitSize); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), *vreg, *stMopnd)); + } +} + +RegOperand *AArch64CGFunc::SelectParmListDreadAccessField(const MIRSymbol &sym, FieldID fieldID, const CCLocInfo &ploc, + int32 offset, uint32 parmNum) +{ + uint32 memSize; + PrimType primType; + RegOperand *parmOpnd; + uint32 dataSizeBits; + AArch64reg reg; + switch (parmNum) { + case 0: + reg = static_cast(ploc.reg0); + break; + case 1: + reg = static_cast(ploc.reg1); + break; + case 2: + reg = static_cast(ploc.reg2); + break; + case 3: + reg = static_cast(ploc.reg3); + break; + default: + CHECK_FATAL(false, "Exceeded maximum allowed fp parameter registers for struct passing"); + } + if (ploc.fpSize == 0) { + memSize = k64BitSize; + primType = PTY_i64; + dataSizeBits = GetPrimTypeSize(PTY_i64) * kBitsPerByte; + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyInt); + } else if (ploc.fpSize == k4ByteSize) { + memSize = k32BitSize; + primType = PTY_f32; + dataSizeBits = GetPrimTypeSize(PTY_f32) * kBitsPerByte; + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k32BitSize, kRegTyFloat); + } else if (ploc.fpSize == k8ByteSize) { + memSize = k64BitSize; + primType = PTY_f64; + dataSizeBits = GetPrimTypeSize(PTY_i64) * kBitsPerByte; + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyFloat); + } else { + CHECK_FATAL(false, "Unknown call parameter state"); + } + MemOperand *memOpnd; + if (sym.GetStorageClass() == kScFormal && fieldID > 0) { + MemOperand &baseOpnd = GetOrCreateMemOpnd(sym, 0, memSize); + RegOperand &base = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), base, baseOpnd)); + memOpnd = &CreateMemOpnd(base, (static_cast(offset) + parmNum * GetPointerSize()), memSize); + } else if (ploc.fpSize) { + memOpnd = &GetOrCreateMemOpnd(sym, (ploc.fpSize * parmNum + static_cast(offset)), memSize); + } else { + if (CGOptions::IsArm64ilp32()) { + memOpnd = &GetOrCreateMemOpnd(sym, (k8ByteSize * parmNum + static_cast(offset)), memSize); + } else { + memOpnd = &GetOrCreateMemOpnd(sym, (GetPointerSize() * parmNum + static_cast(offset)), memSize); + } + } + MOperator selectedMop = PickLdInsn(dataSizeBits, primType); + if ((memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) && + !IsOperandImmValid(selectedMop, memOpnd, kInsnSecondOpnd)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSizeBits); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(selectedMop, *parmOpnd, *memOpnd)); + + return parmOpnd; +} + +void AArch64CGFunc::CreateCallStructParamPassByReg(regno_t regno, MemOperand &memOpnd, ListOperand &srcOpnds, + fpParamState state) +{ + RegOperand *parmOpnd; + uint32 dataSizeBits = 0; + PrimType pType = PTY_void; + parmOpnd = nullptr; + AArch64reg reg = static_cast(regno); + if (state == kNotFp) { + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyInt); + dataSizeBits = GetPrimTypeSize(PTY_i64) * kBitsPerByte; + pType = PTY_i64; + } else if (state == kFp32Bit) { + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k32BitSize, kRegTyFloat); + dataSizeBits = GetPrimTypeSize(PTY_f32) * kBitsPerByte; + pType = PTY_f32; + } else if (state == kFp64Bit) { + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyFloat); + dataSizeBits = GetPrimTypeSize(PTY_f64) * kBitsPerByte; + pType = PTY_f64; + } else { + DEBUG_ASSERT(0, "CreateCallStructParamPassByReg: Unknown state"); + } + + MOperator selectedMop = PickLdInsn(dataSizeBits, pType); + if (!IsOperandImmValid(selectedMop, &memOpnd, kInsnSecondOpnd)) { + memOpnd = SplitOffsetWithAddInstruction(memOpnd, dataSizeBits); + } + DEBUG_ASSERT(parmOpnd != nullptr, "parmOpnd should not be nullptr"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(selectedMop, *parmOpnd, memOpnd)); + srcOpnds.PushOpnd(*parmOpnd); +} + +void AArch64CGFunc::CreateCallStructParamMemcpy(const MIRSymbol *sym, RegOperand *addropnd, uint32 structSize, + int32 copyOffset, int32 fromOffset) +{ + std::vector opndVec; + + RegOperand *vreg1 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + opndVec.push_back(vreg1); /* result */ + + RegOperand *parmOpnd = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + RegOperand *spReg = &GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + ImmOperand *offsetOpnd0 = &CreateImmOperand(copyOffset, k64BitSize, false); + SelectAdd(*parmOpnd, *spReg, *offsetOpnd0, PTY_a64); + opndVec.push_back(parmOpnd); /* param 0 */ + + if (sym != nullptr) { + if (sym->GetStorageClass() == kScGlobal || sym->GetStorageClass() == kScExtern) { + StImmOperand &stopnd = CreateStImmOperand(*sym, fromOffset, 0); + RegOperand &staddropnd = static_cast(CreateRegisterOperandOfType(PTY_u64)); + SelectAddrof(staddropnd, stopnd); + opndVec.push_back(&staddropnd); /* param 1 */ + } else if (sym->GetStorageClass() == kScAuto || sym->GetStorageClass() == kScFormal) { + RegOperand *parm1Reg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + AArch64SymbolAlloc *symloc = + static_cast(GetMemlayout()->GetSymAllocInfo(sym->GetStIndex())); + RegOperand *baseOpnd = static_cast(GetBaseReg(*symloc)); + int32 stoffset = GetBaseOffset(*symloc); + ImmOperand *offsetOpnd1 = &CreateImmOperand(static_cast(stoffset), k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *parm1Reg, *baseOpnd, *offsetOpnd1)); + if (sym->GetStorageClass() == kScFormal) { + MemOperand *ldmopnd = + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, parm1Reg, nullptr, + &GetOrCreateOfstOpnd(0, k32BitSize), static_cast(nullptr)); + RegOperand *tmpreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + RegOperand *vreg2 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), *tmpreg, *ldmopnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *vreg2, *tmpreg, + CreateImmOperand(fromOffset, k64BitSize, false))); + parm1Reg = vreg2; + } + opndVec.push_back(parm1Reg); /* param 1 */ + } else if (sym->GetStorageClass() == kScPstatic || sym->GetStorageClass() == kScFstatic) { + CHECK_FATAL(sym->GetSKind() != kStConst, "Unsupported sym const for struct param"); + StImmOperand *stopnd = &CreateStImmOperand(*sym, 0, 0); + RegOperand &staddropnd = static_cast(CreateRegisterOperandOfType(PTY_u64)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, staddropnd, *stopnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, staddropnd, staddropnd, *stopnd)); + opndVec.push_back(&staddropnd); /* param 1 */ + } else { + CHECK_FATAL(0, "Unsupported sym for struct param"); + } + } else { + opndVec.push_back(addropnd); /* param 1 */ + } + + RegOperand &vreg3 = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + ImmOperand &sizeOpnd = CreateImmOperand(structSize, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, vreg3, sizeOpnd)); + opndVec.push_back(&vreg3); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); +} + +RegOperand *AArch64CGFunc::CreateCallStructParamCopyToStack(uint32 numMemOp, const MIRSymbol *sym, RegOperand *addrOpd, + int32 copyOffset, int32 fromOffset, const CCLocInfo &ploc) +{ + /* Create the struct copies. */ + MemOperand *ldMopnd = nullptr; + MemOperand *stMopnd = nullptr; + for (uint32 j = 0; j < numMemOp; j++) { + if (sym != nullptr) { + if (sym->GetStorageClass() == kScFormal) { + MemOperand &base = GetOrCreateMemOpnd(*sym, 0, k64BitSize); + RegOperand &vreg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + Insn &ldInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), vreg, base); + GetCurBB()->AppendInsn(ldInsn); + ldMopnd = &GetOrCreateMemOpnd( + MemOperand::kAddrModeBOi, k64BitSize, &vreg, nullptr, + &GetOrCreateOfstOpnd((j * GetPointerSize() + static_cast(fromOffset)), k32BitSize), + nullptr); + } else { + if (CGOptions::IsArm64ilp32()) { + ldMopnd = + &GetOrCreateMemOpnd(*sym, (j * GetPointerSize() + static_cast(fromOffset)), k32BitSize); + } else { + ldMopnd = + &GetOrCreateMemOpnd(*sym, (j * GetPointerSize() + static_cast(fromOffset)), k64BitSize); + } + } + } else { + ldMopnd = &GetOrCreateMemOpnd( + MemOperand::kAddrModeBOi, k64BitSize, addrOpd, nullptr, + &GetOrCreateOfstOpnd((j * GetPointerSize() + static_cast(fromOffset)), k32BitSize), nullptr); + } + if (CGOptions::IsArm64ilp32()) { + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k4ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k32BitSize, PTY_i32), *vreg, *ldMopnd)); + + stMopnd = &CreateMemOpnd(RSP, (static_cast(copyOffset) + (j * GetPointerSize())), k32BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k32BitSize, PTY_i32), *vreg, *stMopnd)); + } else { + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *ldMopnd)); + + stMopnd = &CreateMemOpnd(RSP, (static_cast(copyOffset) + (j * GetPointerSize())), k64BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), *vreg, *stMopnd)); + } + } + /* Create the copy address parameter for the struct */ + RegOperand *fpopnd = &GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + ImmOperand *offset = &CreateImmOperand(copyOffset, k64BitSize, false); + if (ploc.reg0 == kRinvalid) { + RegOperand &res = CreateRegisterOperandOfType(PTY_u64); + SelectAdd(res, *fpopnd, *offset, PTY_u64); + MemOperand &stMopnd2 = CreateMemOpnd(RSP, ploc.memOffset, k64BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), res, stMopnd2)); + return nullptr; + } else { + RegOperand *parmOpnd = + &GetOrCreatePhysicalRegisterOperand(static_cast(ploc.reg0), k64BitSize, kRegTyInt); + SelectAdd(*parmOpnd, *fpopnd, *offset, PTY_a64); + return parmOpnd; + } +} + +void AArch64CGFunc::CreateCallStructMemcpyToParamReg(MIRType &structType, int32 structCopyOffset, + AArch64CallConvImpl &parmLocator, ListOperand &srcOpnds) +{ + RegOperand &spReg = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + ImmOperand &offsetOpnd = CreateImmOperand(structCopyOffset, k64BitSize, false); + + CCLocInfo ploc; + parmLocator.LocateNextParm(structType, ploc); + CHECK_FATAL(GetFunction().GetAttr(FUNCATTR_ccall), "only c calling convention support here"); + if (ploc.reg0 != 0) { + RegOperand &res = GetOrCreatePhysicalRegisterOperand(static_cast(ploc.reg0), k64BitSize, kRegTyInt); + SelectAdd(res, spReg, offsetOpnd, PTY_a64); + srcOpnds.PushOpnd(res); + } else { + RegOperand &parmOpnd = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + SelectAdd(parmOpnd, spReg, offsetOpnd, PTY_a64); + MemOperand &stmopnd = CreateMemOpnd(RSP, ploc.memOffset, k64BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), parmOpnd, stmopnd)); + } +} + +void AArch64CGFunc::SelectParmListForAggregate(BaseNode &argExpr, ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset) +{ + uint64 symSize; + int32 rhsOffset = 0; + if (argExpr.GetOpCode() == OP_dread) { + DreadNode &dread = static_cast(argExpr); + MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread.GetStIdx()); + MIRType *ty = sym->GetType(); + if (dread.GetFieldID() != 0) { + MIRStructType *structty = static_cast(ty); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(dread.GetFieldID())); + rhsOffset = GetBecommon().GetFieldOffset(*structty, dread.GetFieldID()).first; + } + symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); + if (symSize <= k16ByteSize) { + SelectParmListDreadSmallAggregate(*sym, *ty, srcOpnds, rhsOffset, parmLocator, dread.GetFieldID()); + } else if (symSize > kParmMemcpySize) { + CreateCallStructMemcpyToParamReg(*ty, structCopyOffset, parmLocator, srcOpnds); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } else { + SelectParmListDreadLargeAggregate(*sym, *ty, srcOpnds, parmLocator, structCopyOffset, rhsOffset); + } + } else if (argExpr.GetOpCode() == OP_iread) { + IreadNode &iread = static_cast(argExpr); + MIRPtrType *pointerty = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread.GetTyIdx())); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx()); + if (iread.GetFieldID() != 0) { + MIRStructType *structty = static_cast(ty); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(iread.GetFieldID())); + rhsOffset = GetBecommon().GetFieldOffset(*structty, iread.GetFieldID()).first; + } + symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); + if (symSize <= k16ByteSize) { + SelectParmListIreadSmallAggregate(iread, *ty, srcOpnds, rhsOffset, parmLocator); + } else if (symSize > kParmMemcpySize) { + RegOperand *ireadOpnd = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); + if (rhsOffset > 0) { + RegOperand *addrOpnd = &LoadIntoRegister(*ireadOpnd, iread.Opnd(0)->GetPrimType()); + regno_t vRegNO = NewVReg(kRegTyInt, k8ByteSize); + RegOperand *result = &CreateVirtualRegisterOperand(vRegNO); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *result, *addrOpnd, + CreateImmOperand(rhsOffset, k64BitSize, false))); + } + + CreateCallStructMemcpyToParamReg(*ty, structCopyOffset, parmLocator, srcOpnds); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } else { + SelectParmListIreadLargeAggregate(iread, *ty, srcOpnds, parmLocator, structCopyOffset, rhsOffset); + } + } else { + CHECK_FATAL(0, "NYI"); + } +} + +size_t AArch64CGFunc::SelectParmListGetStructReturnSize(StmtNode &naryNode) +{ + if (naryNode.GetOpCode() == OP_call) { + CallNode &callNode = static_cast(naryNode); + MIRFunction *callFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); + TyIdx retIdx = callFunc->GetReturnTyIdx(); + if (callFunc->IsFirstArgReturn()) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(callFunc->GetFormalDefVec()[0].formalTyIdx); + return GetBecommon().GetTypeSize(static_cast(ty)->GetPointedTyIdx()); + } + size_t retSize = GetBecommon().GetTypeSize(retIdx.GetIdx()); + if ((retSize == 0) && callFunc->IsReturnStruct()) { + TyIdx tyIdx = callFunc->GetFuncRetStructTyIdx(); + return GetBecommon().GetTypeSize(tyIdx); + } + return retSize; + } else if (naryNode.GetOpCode() == OP_icall) { + IcallNode &icallNode = static_cast(naryNode); + CallReturnVector *p2nrets = &icallNode.GetReturnVec(); + if (p2nrets->size() == k1ByteSize) { + StIdx stIdx = (*p2nrets)[0].first; + MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + if (sym != nullptr) { + return GetBecommon().GetTypeSize(sym->GetTyIdx().GetIdx()); + } + } + } else if (naryNode.GetOpCode() == OP_icallproto) { + IcallNode &icallProto = static_cast(naryNode); + MIRFuncType *funcTy = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallProto.GetRetTyIdx())); + if (funcTy->FirstArgReturn()) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcTy->GetNthParamType(0)); + return GetBecommon().GetTypeSize(static_cast(ty)->GetPointedTyIdx()); + } + return GetBecommon().GetTypeSize(funcTy->GetRetTyIdx()); + } + return 0; +} + +void AArch64CGFunc::SelectParmListPreprocessLargeStruct(BaseNode &argExpr, int32 &structCopyOffset) +{ + uint64 symSize; + int32 rhsOffset = 0; + if (argExpr.GetOpCode() == OP_dread) { + DreadNode &dread = static_cast(argExpr); + MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread.GetStIdx()); + MIRType *ty = sym->GetType(); + if (dread.GetFieldID() != 0) { + MIRStructType *structty = static_cast(ty); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(dread.GetFieldID())); + rhsOffset = GetBecommon().GetFieldOffset(*structty, dread.GetFieldID()).first; + } + symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); + if (symSize > kParmMemcpySize) { + CreateCallStructParamMemcpy(sym, nullptr, static_cast(symSize), structCopyOffset, rhsOffset); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } else if (symSize > k16ByteSize) { + uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); + structCopyOffset += static_cast(numMemOp * GetPointerSize()); + } + } else if (argExpr.GetOpCode() == OP_iread) { + IreadNode &iread = static_cast(argExpr); + MIRPtrType *pointerty = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread.GetTyIdx())); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx()); + if (iread.GetFieldID() != 0) { + MIRStructType *structty = static_cast(ty); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(iread.GetFieldID())); + rhsOffset = GetBecommon().GetFieldOffset(*structty, iread.GetFieldID()).first; + } + symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); + if (symSize > kParmMemcpySize) { + RegOperand *ireadOpnd = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); + RegOperand *addrOpnd = &LoadIntoRegister(*ireadOpnd, iread.Opnd(0)->GetPrimType()); + if (rhsOffset > 0) { + regno_t vRegNO = NewVReg(kRegTyInt, k8ByteSize); + RegOperand *result = &CreateVirtualRegisterOperand(vRegNO); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *result, *addrOpnd, + CreateImmOperand(rhsOffset, k64BitSize, false))); + addrOpnd = result; + } + + CreateCallStructParamMemcpy(nullptr, addrOpnd, static_cast(symSize), structCopyOffset, rhsOffset); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } else if (symSize > k16ByteSize) { + uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); + structCopyOffset += static_cast(numMemOp * GetPointerSize()); + } + } +} + +/* preprocess call in parmlist */ +bool AArch64CGFunc::MarkParmListCall(BaseNode &expr) +{ + if (!CGOptions::IsPIC()) { + return false; + } + switch (expr.GetOpCode()) { + case OP_addrof: { + auto &addrNode = static_cast(expr); + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(addrNode.GetStIdx()); + if (symbol->IsThreadLocal()) { + return true; + } + break; + } + default: { + for (auto i = 0; i < expr.GetNumOpnds(); i++) { + if (expr.Opnd(i)) { + if (MarkParmListCall(*expr.Opnd(i))) { + return true; + } + } + } + break; + } + } + return false; +} + +void AArch64CGFunc::SelectParmListPreprocess(const StmtNode &naryNode, size_t start, std::set &specialArgs) +{ + size_t i = start; + int32 structCopyOffset = GetMaxParamStackSize() - GetStructCopySize(); + for (; i < naryNode.NumOpnds(); ++i) { + BaseNode *argExpr = naryNode.Opnd(i); + PrimType primType = argExpr->GetPrimType(); + if (MarkParmListCall(*argExpr)) { + (void)specialArgs.emplace(i); + } + DEBUG_ASSERT(primType != PTY_void, "primType should not be void"); + if (primType != PTY_agg) { + continue; + } + SelectParmListPreprocessLargeStruct(*argExpr, structCopyOffset); + } +} + +/* + SelectParmList generates an instrunction for each of the parameters + to load the parameter value into the corresponding register. + We return a list of registers to the call instruction because + they may be needed in the register allocation phase. + */ +void AArch64CGFunc::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative) +{ + size_t i = 0; + if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto || isCallNative) { + i++; + } + std::set specialArgs; + SelectParmListPreprocess(naryNode, i, specialArgs); + bool specialArg = false; + bool firstArgReturn = false; + MIRFunction *callee = nullptr; + if (dynamic_cast(&naryNode) != nullptr) { + auto calleePuIdx = static_cast(naryNode).GetPUIdx(); + callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx); + firstArgReturn = callee->IsFirstArgReturn(); + } else if (naryNode.GetOpCode() == OP_icallproto) { + IcallNode *icallnode = &static_cast(naryNode); + MIRFuncType *funcType = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallnode->GetRetTyIdx())); + firstArgReturn = funcType->FirstArgReturn(); + } + BB *curBBrecord = GetCurBB(); + BB *tmpBB = nullptr; + if (!specialArgs.empty()) { + tmpBB = CreateNewBB(); + specialArg = true; + } + AArch64CallConvImpl parmLocator(GetBecommon()); + CCLocInfo ploc; + int32 structCopyOffset = GetMaxParamStackSize() - GetStructCopySize(); + std::vector insnForStackArgs; + uint32 stackArgsCount = 0; + for (uint32 pnum = 0; i < naryNode.NumOpnds(); ++i, ++pnum) { + if (specialArg) { + DEBUG_ASSERT(tmpBB, "need temp bb for lower priority args"); + SetCurBB(specialArgs.count(i) ? *curBBrecord : *tmpBB); + } + bool is64x1vec = false; + MIRType *ty = nullptr; + BaseNode *argExpr = naryNode.Opnd(i); + PrimType primType = argExpr->GetPrimType(); + DEBUG_ASSERT(primType != PTY_void, "primType should not be void"); + if (callee != nullptr && pnum < callee->GetFormalCount() && callee->GetFormal(pnum) != nullptr) { + is64x1vec = callee->GetFormal(pnum)->GetAttr(ATTR_oneelem_simd); + } + switch (argExpr->op) { + case OP_dread: { + DreadNode *dNode = static_cast(argExpr); + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(dNode->GetStIdx()); + if (dNode->GetFieldID() != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "SelectParmList: non-zero fieldID for non-structure"); + FieldAttrs fa = structType->GetFieldAttrs(dNode->GetFieldID()); + is64x1vec = fa.GetAttr(FLDATTR_oneelem_simd); + } else { + is64x1vec = symbol->GetAttr(ATTR_oneelem_simd); + } + break; + } + case OP_iread: { + IreadNode *iNode = static_cast(argExpr); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iNode->GetTyIdx()); + MIRPtrType *ptrTyp = static_cast(type); + DEBUG_ASSERT(ptrTyp != nullptr, "expect a pointer type at iread node"); + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrTyp->GetPointedTyIdx()); + if (iNode->GetFieldID() != 0) { + MIRStructType *structType = static_cast(pointedTy); + FieldAttrs fa = structType->GetFieldAttrs(iNode->GetFieldID()); + is64x1vec = fa.GetAttr(FLDATTR_oneelem_simd); + } else { + TypeAttrs ta = static_cast(ptrTyp)->GetTypeAttrs(); + is64x1vec = ta.GetAttr(ATTR_oneelem_simd); + } + break; + } + case OP_constval: { + CallNode *call = safe_cast(&naryNode); + if (call == nullptr) { + break; + } + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(call->GetPUIdx()); + if (fn == nullptr || fn->GetFormalCount() == 0 || fn->GetFormalCount() <= pnum) { + break; + } + is64x1vec = fn->GetFormalDefAt(pnum).formalAttrs.GetAttr(ATTR_oneelem_simd); + break; + } + default: + break; + } + /* use alloca */ + if (primType == PTY_agg) { + SelectParmListForAggregate(*argExpr, srcOpnds, parmLocator, structCopyOffset); + continue; + } + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(primType)]; + RegOperand *expRegOpnd = nullptr; + Operand *opnd = HandleExpr(naryNode, *argExpr); + if (opnd->GetKind() == Operand::kOpdRegister && static_cast(opnd)->GetIF64Vec()) { + is64x1vec = true; + } + if (!opnd->IsRegister()) { + opnd = &LoadIntoRegister(*opnd, primType); + } + expRegOpnd = static_cast(opnd); + + if ((pnum == 0) && firstArgReturn) { + parmLocator.InitCCLocInfo(ploc); + ploc.reg0 = R8; + } else { + parmLocator.LocateNextParm(*ty, ploc); + } + /* is64x1vec should be an int64 value in an FP/simd reg for ABI compliance, + convert R-reg to equivalent V-reg */ + PrimType destPrimType = primType; + if (is64x1vec && ploc.reg0 != kRinvalid && ploc.reg0 < R7) { + ploc.reg0 = AArch64Abi::floatParmRegs[static_cast(ploc.reg0) - 1]; + destPrimType = PTY_f64; + } + + /* skip unused args */ + if (callee != nullptr && callee->GetFuncDesc().IsArgUnused(pnum)) + continue; + + if (ploc.reg0 != kRinvalid) { /* load to the register. */ + CHECK_FATAL(expRegOpnd != nullptr, "null ptr check"); + RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( + static_cast(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(destPrimType)); + SelectCopy(parmRegOpnd, destPrimType, *expRegOpnd, primType); + srcOpnds.PushOpnd(parmRegOpnd); + } else { /* store to the memory segment for stack-passsed arguments. */ + if (CGOptions::IsBigEndian()) { + if (GetPrimTypeBitSize(primType) < k64BitSize) { + ploc.memOffset = ploc.memOffset + static_cast(k4BitSize); + } + } + MemOperand &actMemOpnd = CreateMemOpnd(RSP, ploc.memOffset, GetPrimTypeBitSize(primType)); + Insn &strInsn = GetInsnBuilder()->BuildInsn(PickStInsn(GetPrimTypeBitSize(primType), primType), *expRegOpnd, + actMemOpnd); + actMemOpnd.SetStackArgMem(true); + if (Globals::GetInstance()->GetOptimLevel() == 2 && stackArgsCount < kShiftAmount12) { + (void)insnForStackArgs.emplace_back(&strInsn); + stackArgsCount++; + } else { + GetCurBB()->AppendInsn(strInsn); + } + } + DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } + if (specialArg) { + DEBUG_ASSERT(tmpBB, "need temp bb for lower priority args"); + curBBrecord->InsertAtEnd(*tmpBB); + SetCurBB(*curBBrecord); + } + for (auto &strInsn : insnForStackArgs) { + GetCurBB()->AppendInsn(*strInsn); + } +} + +void AArch64CGFunc::SelectParmListNotC(StmtNode &naryNode, ListOperand &srcOpnds) +{ + size_t i = 0; + if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto) { + i++; + } + + CCImpl &parmLocator = *GetOrCreateLocator(CCImpl::GetCallConvKind(naryNode)); + CCLocInfo ploc; + std::vector insnForStackArgs; + uint32 stackArgsCount = 0; + for (uint32 pnum = 0; i < naryNode.NumOpnds(); ++i, ++pnum) { + MIRType *ty = nullptr; + BaseNode *argExpr = naryNode.Opnd(i); + PrimType primType = argExpr->GetPrimType(); + DEBUG_ASSERT(primType != PTY_void, "primType should not be void"); + /* use alloca */ + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(primType)]; + RegOperand *expRegOpnd = nullptr; + Operand *opnd = HandleExpr(naryNode, *argExpr); + if (!opnd->IsRegister()) { + opnd = &LoadIntoRegister(*opnd, primType); + } + expRegOpnd = static_cast(opnd); + + parmLocator.LocateNextParm(*ty, ploc); + PrimType destPrimType = primType; + if (ploc.reg0 != kRinvalid) { /* load to the register. */ + CHECK_FATAL(expRegOpnd != nullptr, "null ptr check"); + RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( + static_cast(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(destPrimType)); + SelectCopy(parmRegOpnd, destPrimType, *expRegOpnd, primType); + srcOpnds.PushOpnd(parmRegOpnd); + } else { /* store to the memory segment for stack-passsed arguments. */ + if (CGOptions::IsBigEndian()) { + if (GetPrimTypeBitSize(primType) < k64BitSize) { + ploc.memOffset = ploc.memOffset + static_cast(k4BitSize); + } + } + MemOperand &actMemOpnd = CreateMemOpnd(RSP, ploc.memOffset, GetPrimTypeBitSize(primType)); + Insn &strInsn = GetInsnBuilder()->BuildInsn(PickStInsn(GetPrimTypeBitSize(primType), primType), *expRegOpnd, + actMemOpnd); + actMemOpnd.SetStackArgMem(true); + if (Globals::GetInstance()->GetOptimLevel() == 2 && stackArgsCount < kShiftAmount12) { + (void)insnForStackArgs.emplace_back(&strInsn); + stackArgsCount++; + } else { + GetCurBB()->AppendInsn(strInsn); + } + } + DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } + for (auto &strInsn : insnForStackArgs) { + GetCurBB()->AppendInsn(*strInsn); + } +} + +// based on call conv, choose how to prepare args +void AArch64CGFunc::SelectParmListWrapper(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative) +{ + if (CCImpl::GetCallConvKind(naryNode) == kCCall) { + SelectParmList(naryNode, srcOpnds, isCallNative); + } else if (CCImpl::GetCallConvKind(naryNode) == kWebKitJS || CCImpl::GetCallConvKind(naryNode) == kGHC) { + SelectParmListNotC(naryNode, srcOpnds); + } else { + CHECK_FATAL(false, "niy"); + } +} +/* + * for MCC_DecRefResetPair(addrof ptr %Reg17_R5592, addrof ptr %Reg16_R6202) or + * MCC_ClearLocalStackRef(addrof ptr %Reg17_R5592), the parameter (addrof ptr xxx) is converted to asm as follow: + * add vreg, x29, #imm + * mov R0/R1, vreg + * this function is used to prepare parameters, the generated vreg is returned, and #imm is saved in offsetValue. + */ +Operand *AArch64CGFunc::SelectClearStackCallParam(const AddrofNode &expr, int64 &offsetValue) +{ + MIRSymbol *symbol = GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(expr.GetStIdx()); + PrimType ptype = expr.GetPrimType(); + regno_t vRegNO = NewVReg(kRegTyInt, GetPrimTypeSize(ptype)); + Operand &result = CreateVirtualRegisterOperand(vRegNO); + CHECK_FATAL(expr.GetFieldID() == 0, "the fieldID of parameter in clear stack reference call must be 0"); + if (!CGOptions::IsQuiet()) { + maple::LogInfo::MapleLogger(kLlErr) + << "Warning: we expect AddrOf with StImmOperand is not used for local variables"; + } + auto *symLoc = static_cast(GetMemlayout()->GetSymAllocInfo(symbol->GetStIndex())); + ImmOperand *offset = nullptr; + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offset = &CreateImmOperand(GetBaseOffset(*symLoc), k64BitSize, false, kUnAdjustVary); + } else if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsRefLocals) { + auto it = immOpndsRequiringOffsetAdjustmentForRefloc.find(symLoc); + if (it != immOpndsRequiringOffsetAdjustmentForRefloc.end()) { + offset = (*it).second; + } else { + offset = &CreateImmOperand(GetBaseOffset(*symLoc), k64BitSize, false); + immOpndsRequiringOffsetAdjustmentForRefloc[symLoc] = offset; + } + } else { + CHECK_FATAL(false, "the symLoc of parameter in clear stack reference call is unreasonable"); + } + DEBUG_ASSERT(offset != nullptr, "offset should not be nullptr"); + offsetValue = offset->GetValue(); + SelectAdd(result, *GetBaseReg(*symLoc), *offset, PTY_u64); + if (GetCG()->GenerateVerboseCG()) { + /* Add a comment */ + Insn *insn = GetCurBB()->GetLastInsn(); + std::string comm = "local/formal var: "; + comm.append(symbol->GetName()); + insn->SetComment(comm); + } + return &result; +} + +/* select paramters for MCC_DecRefResetPair and MCC_ClearLocalStackRef function */ +void AArch64CGFunc::SelectClearStackCallParmList(const StmtNode &naryNode, ListOperand &srcOpnds, + std::vector &stackPostion) +{ + CHECK_FATAL(false, "should not go here"); + AArch64CallConvImpl parmLocator(GetBecommon()); + CCLocInfo ploc; + for (size_t i = 0; i < naryNode.NumOpnds(); ++i) { + MIRType *ty = nullptr; + BaseNode *argExpr = naryNode.Opnd(i); + PrimType primType = argExpr->GetPrimType(); + DEBUG_ASSERT(primType != PTY_void, "primType check"); + /* use alloc */ + CHECK_FATAL(primType != PTY_agg, "the type of argument is unreasonable"); + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(primType)]; + CHECK_FATAL(argExpr->GetOpCode() == OP_addrof, "the argument of clear stack call is unreasonable"); + auto *expr = static_cast(argExpr); + int64 offsetValue = 0; + Operand *opnd = SelectClearStackCallParam(*expr, offsetValue); + stackPostion.emplace_back(offsetValue); + auto *expRegOpnd = static_cast(opnd); + CHECK_FATAL(GetFunction().GetAttr(FUNCATTR_ccall), "only c calling convention support here"); + parmLocator.LocateNextParm(*ty, ploc); + CHECK_FATAL(ploc.reg0 != 0, "the parameter of ClearStackCall must be passed by register"); + CHECK_FATAL(expRegOpnd != nullptr, "null ptr check"); + RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( + static_cast(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(primType)); + SelectCopy(parmRegOpnd, primType, *expRegOpnd, primType); + srcOpnds.PushOpnd(parmRegOpnd); + DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } +} + +/* + * intrinsify Unsafe.getAndAddInt and Unsafe.getAndAddLong + * generate an intrinsic instruction instead of a function call + * intrinsic_get_add_int w0, xt, ws, ws, x1, x2, w3, label + */ +void AArch64CGFunc::IntrinsifyGetAndAddInt(ListOperand &srcOpnds, PrimType pty) +{ + MapleList &opnds = srcOpnds.GetOperands(); + /* Unsafe.getAndAddInt has more than 4 parameters */ + DEBUG_ASSERT(opnds.size() >= 4, "ensure the operands number"); + auto iter = opnds.begin(); + RegOperand *objOpnd = *(++iter); + RegOperand *offOpnd = *(++iter); + RegOperand *deltaOpnd = *(++iter); + auto &retVal = static_cast(GetTargetRetOperand(pty, -1)); + LabelIdx labIdx = CreateLabel(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(labIdx); + RegOperand &tempOpnd0 = CreateRegisterOperandOfType(PTY_i64); + RegOperand &tempOpnd1 = CreateRegisterOperandOfType(pty); + RegOperand &tempOpnd2 = CreateRegisterOperandOfType(PTY_i32); + MOperator mOp = (pty == PTY_i64) ? MOP_get_and_addL : MOP_get_and_addI; + std::vector intrnOpnds; + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&tempOpnd0); + intrnOpnds.emplace_back(&tempOpnd1); + intrnOpnds.emplace_back(&tempOpnd2); + intrnOpnds.emplace_back(objOpnd); + intrnOpnds.emplace_back(offOpnd); + intrnOpnds.emplace_back(deltaOpnd); + intrnOpnds.emplace_back(&targetOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, intrnOpnds)); +} + +/* + * intrinsify Unsafe.getAndSetInt and Unsafe.getAndSetLong + * generate an intrinsic instruction instead of a function call + */ +void AArch64CGFunc::IntrinsifyGetAndSetInt(ListOperand &srcOpnds, PrimType pty) +{ + MapleList &opnds = srcOpnds.GetOperands(); + /* Unsafe.getAndSetInt has 4 parameters */ + DEBUG_ASSERT(opnds.size() == 4, "ensure the operands number"); + auto iter = opnds.begin(); + RegOperand *objOpnd = *(++iter); + RegOperand *offOpnd = *(++iter); + RegOperand *newValueOpnd = *(++iter); + auto &retVal = static_cast(GetTargetRetOperand(pty, -1)); + LabelIdx labIdx = CreateLabel(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(labIdx); + RegOperand &tempOpnd0 = CreateRegisterOperandOfType(PTY_i64); + RegOperand &tempOpnd1 = CreateRegisterOperandOfType(PTY_i32); + + MOperator mOp = (pty == PTY_i64) ? MOP_get_and_setL : MOP_get_and_setI; + std::vector intrnOpnds; + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&tempOpnd0); + intrnOpnds.emplace_back(&tempOpnd1); + intrnOpnds.emplace_back(objOpnd); + intrnOpnds.emplace_back(offOpnd); + intrnOpnds.emplace_back(newValueOpnd); + intrnOpnds.emplace_back(&targetOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, intrnOpnds)); +} + +/* + * intrinsify Unsafe.compareAndSwapInt and Unsafe.compareAndSwapLong + * generate an intrinsic instruction instead of a function call + */ +void AArch64CGFunc::IntrinsifyCompareAndSwapInt(ListOperand &srcOpnds, PrimType pty) +{ + MapleList &opnds = srcOpnds.GetOperands(); + /* Unsafe.compareAndSwapInt has more than 5 parameters */ + DEBUG_ASSERT(opnds.size() >= 5, "ensure the operands number"); + auto iter = opnds.begin(); + RegOperand *objOpnd = *(++iter); + RegOperand *offOpnd = *(++iter); + RegOperand *expectedValueOpnd = *(++iter); + RegOperand *newValueOpnd = *(++iter); + auto &retVal = static_cast(GetTargetRetOperand(PTY_i64, -1)); + RegOperand &tempOpnd0 = CreateRegisterOperandOfType(PTY_i64); + RegOperand &tempOpnd1 = CreateRegisterOperandOfType(pty); + LabelIdx labIdx1 = CreateLabel(); + LabelOperand &label1Opnd = GetOrCreateLabelOperand(labIdx1); + LabelIdx labIdx2 = CreateLabel(); + LabelOperand &label2Opnd = GetOrCreateLabelOperand(labIdx2); + MOperator mOp = (pty == PTY_i32) ? MOP_compare_and_swapI : MOP_compare_and_swapL; + std::vector intrnOpnds; + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&tempOpnd0); + intrnOpnds.emplace_back(&tempOpnd1); + intrnOpnds.emplace_back(objOpnd); + intrnOpnds.emplace_back(offOpnd); + intrnOpnds.emplace_back(expectedValueOpnd); + intrnOpnds.emplace_back(newValueOpnd); + intrnOpnds.emplace_back(&label1Opnd); + intrnOpnds.emplace_back(&label2Opnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, intrnOpnds)); +} + +/* + * the lowest bit of count field is used to indicate whether or not the string is compressed + * if the string is not compressed, jump to jumpLabIdx + */ +RegOperand *AArch64CGFunc::CheckStringIsCompressed(BB &bb, RegOperand &str, int32 countOffset, PrimType countPty, + LabelIdx jumpLabIdx) +{ + MemOperand &memOpnd = CreateMemOpnd(str, countOffset, str.GetSize()); + uint32 bitSize = GetPrimTypeBitSize(countPty); + MOperator loadOp = PickLdInsn(bitSize, countPty); + RegOperand &countOpnd = CreateRegisterOperandOfType(countPty); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(loadOp, countOpnd, memOpnd)); + ImmOperand &immValueOne = CreateImmOperand(countPty, 1); + RegOperand &countLowestBitOpnd = CreateRegisterOperandOfType(countPty); + MOperator andOp = bitSize == k64BitSize ? MOP_xandrri13 : MOP_wandrri12; + bb.AppendInsn(GetInsnBuilder()->BuildInsn(andOp, countLowestBitOpnd, countOpnd, immValueOne)); + RegOperand &wzr = GetZeroOpnd(bitSize); + MOperator cmpOp = (bitSize == k64BitSize) ? MOP_xcmprr : MOP_wcmprr; + Operand &rflag = GetOrCreateRflag(); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(cmpOp, rflag, wzr, countLowestBitOpnd)); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(MOP_beq, rflag, GetOrCreateLabelOperand(jumpLabIdx))); + bb.SetKind(BB::kBBIf); + return &countOpnd; +} + +/* + * count field stores the length shifted one bit to the left + * if the length is less than eight, jump to jumpLabIdx + */ +RegOperand *AArch64CGFunc::CheckStringLengthLessThanEight(BB &bb, RegOperand &countOpnd, PrimType countPty, + LabelIdx jumpLabIdx) +{ + RegOperand &lengthOpnd = CreateRegisterOperandOfType(countPty); + uint32 bitSize = GetPrimTypeBitSize(countPty); + MOperator lsrOp = (bitSize == k64BitSize) ? MOP_xlsrrri6 : MOP_wlsrrri5; + ImmOperand &immValueOne = CreateImmOperand(countPty, 1); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(lsrOp, lengthOpnd, countOpnd, immValueOne)); + constexpr int kConstIntEight = 8; + ImmOperand &immValueEight = CreateImmOperand(countPty, kConstIntEight); + MOperator cmpImmOp = (bitSize == k64BitSize) ? MOP_xcmpri : MOP_wcmpri; + Operand &rflag = GetOrCreateRflag(); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(cmpImmOp, rflag, lengthOpnd, immValueEight)); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(MOP_blt, rflag, GetOrCreateLabelOperand(jumpLabIdx))); + bb.SetKind(BB::kBBIf); + return &lengthOpnd; +} + +void AArch64CGFunc::GenerateIntrnInsnForStrIndexOf(BB &bb, RegOperand &srcString, RegOperand &patternString, + RegOperand &srcCountOpnd, RegOperand &patternLengthOpnd, + PrimType countPty, LabelIdx jumpLabIdx) +{ + RegOperand &srcLengthOpnd = CreateRegisterOperandOfType(countPty); + ImmOperand &immValueOne = CreateImmOperand(countPty, 1); + uint32 bitSize = GetPrimTypeBitSize(countPty); + MOperator lsrOp = (bitSize == k64BitSize) ? MOP_xlsrrri6 : MOP_wlsrrri5; + bb.AppendInsn(GetInsnBuilder()->BuildInsn(lsrOp, srcLengthOpnd, srcCountOpnd, immValueOne)); +#ifdef USE_32BIT_REF + const int64 stringBaseObjSize = 16; /* shadow(4)+monitor(4)+count(4)+hash(4) */ +#else + const int64 stringBaseObjSize = 20; /* shadow(8)+monitor(4)+count(4)+hash(4) */ +#endif /* USE_32BIT_REF */ + PrimType pty = (srcString.GetSize() == k64BitSize) ? PTY_i64 : PTY_i32; + ImmOperand &immStringBaseOffset = CreateImmOperand(pty, stringBaseObjSize); + MOperator addOp = (pty == PTY_i64) ? MOP_xaddrri12 : MOP_waddrri12; + RegOperand &srcStringBaseOpnd = CreateRegisterOperandOfType(pty); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(addOp, srcStringBaseOpnd, srcString, immStringBaseOffset)); + RegOperand &patternStringBaseOpnd = CreateRegisterOperandOfType(pty); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(addOp, patternStringBaseOpnd, patternString, immStringBaseOffset)); + auto &retVal = static_cast(GetTargetRetOperand(PTY_i32, -1)); + std::vector intrnOpnds; + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&srcStringBaseOpnd); + intrnOpnds.emplace_back(&srcLengthOpnd); + intrnOpnds.emplace_back(&patternStringBaseOpnd); + intrnOpnds.emplace_back(&patternLengthOpnd); + const uint32 tmpRegOperandNum = 6; + for (uint32 i = 0; i < tmpRegOperandNum - 1; ++i) { + RegOperand &tmpOpnd = CreateRegisterOperandOfType(PTY_i64); + intrnOpnds.emplace_back(&tmpOpnd); + } + intrnOpnds.emplace_back(&CreateRegisterOperandOfType(PTY_i32)); + const uint32 labelNum = 7; + for (uint32 i = 0; i < labelNum; ++i) { + LabelIdx labIdx = CreateLabel(); + LabelOperand &labelOpnd = GetOrCreateLabelOperand(labIdx); + intrnOpnds.emplace_back(&labelOpnd); + } + bb.AppendInsn(GetInsnBuilder()->BuildInsn(MOP_string_indexof, intrnOpnds)); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, GetOrCreateLabelOperand(jumpLabIdx))); + bb.SetKind(BB::kBBGoto); +} + +/* + * intrinsify String.indexOf + * generate an intrinsic instruction instead of a function call if both the source string and the specified substring + * are compressed and the length of the substring is not less than 8, i.e. + * bl String.indexOf, srcString, patternString ===>> + * + * ldr srcCountOpnd, [srcString, offset] + * and srcCountLowestBitOpnd, srcCountOpnd, #1 + * cmp wzr, srcCountLowestBitOpnd + * beq Label.call + * ldr patternCountOpnd, [patternString, offset] + * and patternCountLowestBitOpnd, patternCountOpnd, #1 + * cmp wzr, patternCountLowestBitOpnd + * beq Label.call + * lsr patternLengthOpnd, patternCountOpnd, #1 + * cmp patternLengthOpnd, #8 + * blt Label.call + * lsr srcLengthOpnd, srcCountOpnd, #1 + * add srcStringBaseOpnd, srcString, immStringBaseOffset + * add patternStringBaseOpnd, patternString, immStringBaseOffset + * intrinsic_string_indexof retVal, srcStringBaseOpnd, srcLengthOpnd, patternStringBaseOpnd, patternLengthOpnd, + * tmpOpnd1, tmpOpnd2, tmpOpnd3, tmpOpnd4, tmpOpnd5, tmpOpnd6, + * label1, label2, label3, lable3, label4, label5, label6, label7 + * b Label.joint + * Label.call: + * bl String.indexOf, srcString, patternString + * Label.joint: + */ +void AArch64CGFunc::IntrinsifyStringIndexOf(ListOperand &srcOpnds, const MIRSymbol &funcSym) +{ + MapleList &opnds = srcOpnds.GetOperands(); + /* String.indexOf opnd size must be more than 2 */ + DEBUG_ASSERT(opnds.size() >= 2, "ensure the operands number"); + auto iter = opnds.begin(); + RegOperand *srcString = *iter; + RegOperand *patternString = *(++iter); + GStrIdx gStrIdx = GlobalTables::GetStrTable().GetStrIdxFromName(namemangler::kJavaLangStringStr); + MIRType *type = + GlobalTables::GetTypeTable().GetTypeFromTyIdx(GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(gStrIdx)); + auto stringType = static_cast(type); + CHECK_FATAL(stringType != nullptr, "Ljava_2Flang_2FString_3B type can not be null"); + FieldID fieldID = GetMirModule().GetMIRBuilder()->GetStructFieldIDFromFieldNameParentFirst(stringType, "count"); + MIRType *fieldType = stringType->GetFieldType(fieldID); + PrimType countPty = fieldType->GetPrimType(); + int32 offset = GetBecommon().GetFieldOffset(*stringType, fieldID).first; + LabelIdx callBBLabIdx = CreateLabel(); + RegOperand *srcCountOpnd = CheckStringIsCompressed(*GetCurBB(), *srcString, offset, countPty, callBBLabIdx); + + BB *srcCompressedBB = CreateNewBB(); + GetCurBB()->AppendBB(*srcCompressedBB); + RegOperand *patternCountOpnd = + CheckStringIsCompressed(*srcCompressedBB, *patternString, offset, countPty, callBBLabIdx); + + BB *patternCompressedBB = CreateNewBB(); + RegOperand *patternLengthOpnd = + CheckStringLengthLessThanEight(*patternCompressedBB, *patternCountOpnd, countPty, callBBLabIdx); + + BB *intrinsicBB = CreateNewBB(); + LabelIdx jointLabIdx = CreateLabel(); + GenerateIntrnInsnForStrIndexOf(*intrinsicBB, *srcString, *patternString, *srcCountOpnd, *patternLengthOpnd, + countPty, jointLabIdx); + + BB *callBB = CreateNewBB(); + callBB->AddLabel(callBBLabIdx); + SetLab2BBMap(callBBLabIdx, *callBB); + SetCurBB(*callBB); + Insn &callInsn = AppendCall(funcSym, srcOpnds); + MIRType *retType = funcSym.GetFunction()->GetReturnType(); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + } + GetFunction().SetHasCall(); + + BB *jointBB = CreateNewBB(); + jointBB->AddLabel(jointLabIdx); + SetLab2BBMap(jointLabIdx, *jointBB); + srcCompressedBB->AppendBB(*patternCompressedBB); + patternCompressedBB->AppendBB(*intrinsicBB); + intrinsicBB->AppendBB(*callBB); + callBB->AppendBB(*jointBB); + SetCurBB(*jointBB); +} + +/* Lmbc calls have no argument, they are all explicit iassignspoff or + blkassign. Info collected and to be emitted here */ +void AArch64CGFunc::LmbcSelectParmList(ListOperand *srcOpnds, bool isArgReturn) +{ + if (GetLmbcArgInfo() == nullptr) { + return; /* no arg */ + } + CHECK_FATAL(GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc, "To be called for Lmbc model only"); + MapleVector &args = GetLmbcCallArgs(); + MapleVector &types = GetLmbcCallArgTypes(); + MapleVector &offsets = GetLmbcCallArgOffsets(); + MapleVector ®s = GetLmbcCallArgNumOfRegs(); + int iCnt = 0; + int fCnt = 0; + for (size_t i = isArgReturn ? 1 : 0; i < args.size(); i++) { + RegType ty = args[i]->GetRegisterType(); + PrimType pTy = types[i]; + AArch64reg reg; + if (args[i]->IsOfIntClass() && (iCnt + regs[i]) <= static_cast(k8ByteSize)) { + reg = static_cast(R0 + iCnt++); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand(reg, GetPrimTypeSize(pTy) * kBitsPerByte, ty); + SelectCopy(*res, pTy, *args[i], pTy); + srcOpnds->PushOpnd(*res); + } else if (!args[i]->IsOfIntClass() && (fCnt + regs[i]) <= static_cast(k8ByteSize)) { + reg = static_cast(V0 + fCnt++); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand(reg, GetPrimTypeSize(pTy) * kBitsPerByte, ty); + SelectCopy(*res, pTy, *args[i], pTy); + srcOpnds->PushOpnd(*res); + } else { + uint32 pSize = GetPrimTypeSize(pTy); + Operand &memOpd = CreateMemOpnd(RSP, offsets[i], pSize); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(PickStInsn(pSize * kBitsPerByte, pTy), *args[i], memOpd)); + } + } + /* Load x8 if 1st arg is for agg return */ + if (isArgReturn) { + AArch64reg reg = static_cast(R8); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand(reg, GetPrimTypeSize(PTY_a64) * kBitsPerByte, kRegTyInt); + SelectCopy(*res, PTY_a64, *args[0], PTY_a64); + srcOpnds->PushOpnd(*res); + } + ResetLmbcArgInfo(); /* reset */ + ResetLmbcArgsInRegs(); + ResetLmbcTotalArgs(); +} + +void AArch64CGFunc::SelectCall(CallNode &callNode) +{ + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); + MIRSymbol *fsym = GetFunction().GetLocalOrGlobalSymbol(fn->GetStIdx(), false); + MIRType *retType = fn->GetReturnType(); + + if (GetCG()->GenerateVerboseCG()) { + const std::string &comment = fsym->GetName(); + GetCurBB()->AppendInsn(CreateCommentInsn(comment)); + } + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + SetLmbcCallReturnType(nullptr); + bool largeStructRet = false; + if (fn->IsFirstArgReturn()) { + MIRPtrType *ptrTy = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(fn->GetFormalDefVec()[0].formalTyIdx)); + MIRType *sTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrTy->GetPointedTyIdx()); + largeStructRet = sTy->GetSize() > k16ByteSize; + SetLmbcCallReturnType(sTy); + } else { + MIRType *ty = fn->GetReturnType(); + SetLmbcCallReturnType(ty); + } + LmbcSelectParmList(srcOpnds, largeStructRet); + } + bool callNative = false; + if ((fsym->GetName() == "MCC_CallFastNative") || (fsym->GetName() == "MCC_CallFastNativeExt") || + (fsym->GetName() == "MCC_CallSlowNative0") || (fsym->GetName() == "MCC_CallSlowNative1") || + (fsym->GetName() == "MCC_CallSlowNative2") || (fsym->GetName() == "MCC_CallSlowNative3") || + (fsym->GetName() == "MCC_CallSlowNative4") || (fsym->GetName() == "MCC_CallSlowNative5") || + (fsym->GetName() == "MCC_CallSlowNative6") || (fsym->GetName() == "MCC_CallSlowNative7") || + (fsym->GetName() == "MCC_CallSlowNative8") || (fsym->GetName() == "MCC_CallSlowNativeExt")) { + callNative = true; + } + + std::vector stackPosition; + if ((fsym->GetName() == "MCC_DecRefResetPair") || (fsym->GetName() == "MCC_ClearLocalStackRef")) { + SelectClearStackCallParmList(callNode, *srcOpnds, stackPosition); + } else { + SelectParmListWrapper(callNode, *srcOpnds, callNative); + } + if (callNative) { + GetCurBB()->AppendInsn(CreateCommentInsn("call native func")); + + BaseNode *funcArgExpr = callNode.Opnd(0); + PrimType ptype = funcArgExpr->GetPrimType(); + Operand *funcOpnd = HandleExpr(callNode, *funcArgExpr); + RegOperand &livein = + GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, GetRegTyFromPrimTy(PTY_a64)); + SelectCopy(livein, ptype, *funcOpnd, ptype); + + RegOperand &extraOpnd = GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, kRegTyInt); + srcOpnds->PushOpnd(extraOpnd); + } + const std::string &funcName = fsym->GetName(); + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2 && + funcName == "Ljava_2Flang_2FString_3B_7CindexOf_7C_28Ljava_2Flang_2FString_3B_29I") { + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(funcName); + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx, true); + IntrinsifyStringIndexOf(*srcOpnds, *st); + return; + } + Insn &callInsn = AppendCall(*fsym, *srcOpnds); + GetCurBB()->SetHasCall(); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + const auto &deoptBundleInfo = callNode.GetDeoptBundleInfo(); + for (const auto &elem : deoptBundleInfo) { + auto valueKind = elem.second.GetMapleValueKind(); + if (valueKind == MapleValue::kPregKind) { + auto *opnd = GetOpndFromPregIdx(elem.second.GetPregIdx()); + CHECK_FATAL(opnd != nullptr, "pregIdx has not been assigned Operand"); + callInsn.AddDeoptBundleInfo(elem.first, *opnd); + } else if (valueKind == MapleValue::kConstKind) { + auto *opnd = SelectIntConst(static_cast(elem.second.GetConstValue())); + callInsn.AddDeoptBundleInfo(elem.first, *opnd); + } else { + CHECK_FATAL(false, "not supported currently"); + } + } + AppendStackMapInsn(callInsn); + + /* check if this call use stack slot to return */ + if (fn->IsFirstArgReturn()) { + SetStackProtectInfo(kRetureStackSlot); + } + + GetFunction().SetHasCall(); + if (GetMirModule().IsCModule()) { /* do not mark abort BB in C at present */ + if (fsym->GetName() == "__builtin_unreachable") { + GetCurBB()->ClearInsns(); + GetCurBB()->SetUnreachable(true); + } + return; + } + if ((fsym->GetName() == "MCC_ThrowException") || (fsym->GetName() == "MCC_RethrowException") || + (fsym->GetName() == "MCC_ThrowArithmeticException") || + (fsym->GetName() == "MCC_ThrowArrayIndexOutOfBoundsException") || + (fsym->GetName() == "MCC_ThrowNullPointerException") || + (fsym->GetName() == "MCC_ThrowStringIndexOutOfBoundsException") || (fsym->GetName() == "abort") || + (fsym->GetName() == "exit") || (fsym->GetName() == "MCC_Array_Boundary_Check")) { + callInsn.SetIsThrow(true); + GetCurBB()->SetKind(BB::kBBThrow); + } else if ((fsym->GetName() == "MCC_DecRefResetPair") || (fsym->GetName() == "MCC_ClearLocalStackRef")) { + for (size_t i = 0; i < stackPosition.size(); ++i) { + callInsn.SetClearStackOffset(i, stackPosition[i]); + } + } +} + +void AArch64CGFunc::SelectIcall(IcallNode &icallNode, Operand &srcOpnd) +{ + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + SelectParmListWrapper(icallNode, *srcOpnds, false); + + Operand *fptrOpnd = &srcOpnd; + if (fptrOpnd->GetKind() != Operand::kOpdRegister) { + PrimType ty = icallNode.Opnd(0)->GetPrimType(); + fptrOpnd = &SelectCopy(srcOpnd, ty, ty); + } + DEBUG_ASSERT(fptrOpnd->IsRegister(), "SelectIcall: function pointer not RegOperand"); + RegOperand *regOpnd = static_cast(fptrOpnd); + Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_xblr, *regOpnd, *srcOpnds); + + MIRType *retType = icallNode.GetCallReturnType(); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + + /* check if this icall use stack slot to return */ + CallReturnVector *p2nrets = &icallNode.GetReturnVec(); + if (p2nrets->size() == k1ByteSize) { + StIdx stIdx = (*p2nrets)[0].first; + MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + if (sym != nullptr && (GetBecommon().GetTypeSize(sym->GetTyIdx().GetIdx()) > k16ByteSize)) { + SetStackProtectInfo(kRetureStackSlot); + } + } + + GetCurBB()->AppendInsn(callInsn); + GetCurBB()->SetHasCall(); + DEBUG_ASSERT(GetCurBB()->GetLastInsn()->IsCall(), "lastInsn should be a call"); + GetFunction().SetHasCall(); + const auto &deoptBundleInfo = icallNode.GetDeoptBundleInfo(); + for (const auto &elem : deoptBundleInfo) { + auto valueKind = elem.second.GetMapleValueKind(); + if (valueKind == MapleValue::kPregKind) { + auto *opnd = GetOpndFromPregIdx(elem.second.GetPregIdx()); + CHECK_FATAL(opnd != nullptr, "pregIdx has not been assigned Operand"); + callInsn.AddDeoptBundleInfo(elem.first, *opnd); + } else if (valueKind == MapleValue::kConstKind) { + auto *opnd = SelectIntConst(static_cast(elem.second.GetConstValue())); + callInsn.AddDeoptBundleInfo(elem.first, *opnd); + } else { + CHECK_FATAL(false, "not supported currently"); + } + } + AppendStackMapInsn(callInsn); +} + +void AArch64CGFunc::HandleCatch() +{ + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel1) { + regno_t regNO = uCatch.regNOCatch; + RegOperand &vregOpnd = GetOrCreateVirtualRegisterOperand(regNO); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + MOP_xmovrr, vregOpnd, GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt))); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + PickStInsn(uCatch.opndCatch->GetSize(), PTY_a64), + GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt), *uCatch.opndCatch)); + } +} + +void AArch64CGFunc::SelectMembar(StmtNode &membar) +{ + switch (membar.GetOpCode()) { + case OP_membaracquire: + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ishld, AArch64CG::kMd[MOP_dmb_ishld])); + break; + case OP_membarrelease: + case OP_membarstoreload: + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ish, AArch64CG::kMd[MOP_dmb_ish])); + break; + case OP_membarstorestore: + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ishst, AArch64CG::kMd[MOP_dmb_ishst])); + break; + default: + DEBUG_ASSERT(false, "NYI"); + break; + } +} + +void AArch64CGFunc::SelectComment(CommentNode &comment) +{ + GetCurBB()->AppendInsn(CreateCommentInsn(comment.GetComment())); +} + +void AArch64CGFunc::SelectReturn(Operand *opnd0) +{ + bool is64x1vec = GetFunction().GetAttr(FUNCATTR_oneelem_simd) ? true : false; + MIRType *floatType = GlobalTables::GetTypeTable().GetDouble(); + MIRType *retTyp = is64x1vec ? floatType : GetFunction().GetReturnType(); + CCImpl &retLocator = *GetOrCreateLocator(GetCurCallConvKind()); + CCLocInfo retMech; + retLocator.InitReturnInfo(*retTyp, retMech); + if ((retMech.GetRegCount() > 0) && (opnd0 != nullptr)) { + RegType regTyp = is64x1vec ? kRegTyFloat : GetRegTyFromPrimTy(retMech.GetPrimTypeOfReg0()); + PrimType oriPrimType = is64x1vec ? GetFunction().GetReturnType()->GetPrimType() : retMech.GetPrimTypeOfReg0(); + AArch64reg retReg = static_cast(retMech.GetReg0()); + if (opnd0->IsRegister()) { + RegOperand *regOpnd = static_cast(opnd0); + if (regOpnd->GetRegisterNumber() != retMech.GetReg0()) { + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, regOpnd->GetSize(), regTyp); + SelectCopy(retOpnd, retMech.GetPrimTypeOfReg0(), *regOpnd, oriPrimType); + } + } else if (opnd0->IsMemoryAccessOperand()) { + auto *memopnd = static_cast(opnd0); + RegOperand &retOpnd = + GetOrCreatePhysicalRegisterOperand(retReg, GetPrimTypeBitSize(retMech.GetPrimTypeOfReg0()), regTyp); + MOperator mOp = PickLdInsn(memopnd->GetSize(), retMech.GetPrimTypeOfReg0()); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, retOpnd, *memopnd)); + } else if (opnd0->IsConstImmediate()) { + ImmOperand *immOpnd = static_cast(opnd0); + if (!is64x1vec) { + RegOperand &retOpnd = + GetOrCreatePhysicalRegisterOperand(retReg, GetPrimTypeBitSize(retMech.GetPrimTypeOfReg0()), + GetRegTyFromPrimTy(retMech.GetPrimTypeOfReg0())); + SelectCopy(retOpnd, retMech.GetPrimTypeOfReg0(), *immOpnd, retMech.GetPrimTypeOfReg0()); + } else { + PrimType rType = GetFunction().GetReturnType()->GetPrimType(); + RegOperand *reg = &CreateRegisterOperandOfType(rType); + SelectCopy(*reg, rType, *immOpnd, rType); + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, GetPrimTypeBitSize(PTY_f64), + GetRegTyFromPrimTy(PTY_f64)); + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_xvmovdr, retOpnd, *reg); + GetCurBB()->AppendInsn(insn); + } + } else { + CHECK_FATAL(false, "nyi"); + } + } + GetExitBBsVec().emplace_back(GetCurBB()); +} + +RegOperand &AArch64CGFunc::GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, PrimType primType) +{ + AArch64reg reg = R0; + switch (sregIdx) { + case kSregSp: + reg = RSP; + break; + case kSregFp: + reg = RFP; + break; + case kSregGp: { + MIRSymbol *sym = GetCG()->GetGP(); + if (sym == nullptr) { + sym = GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + std::string strBuf("__file__local__GP"); + sym->SetNameStrIdx(GetMirModule().GetMIRBuilder()->GetOrCreateStringIndex(strBuf)); + GetCG()->SetGP(sym); + } + RegOperand &result = GetOrCreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + SelectAddrof(result, CreateStImmOperand(*sym, 0, 0)); + return result; + } + case kSregThrownval: { /* uses x0 == R0 */ + DEBUG_ASSERT(uCatch.regNOCatch > 0, "regNOCatch should greater than 0."); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + RegOperand ®Opnd = GetOrCreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(uCatch.opndCatch->GetSize(), PTY_a64), + regOpnd, *uCatch.opndCatch)); + return regOpnd; + } else { + return GetOrCreateVirtualRegisterOperand(uCatch.regNOCatch); + } + } + case kSregRetval0: + if (!IsPrimitiveInteger(primType) || IsPrimitiveVectorFloat(primType)) { + reg = V0; + } + break; + case kSregMethodhdl: + if (methodHandleVreg == regno_t(-1)) { + methodHandleVreg = NewVReg(kRegTyInt, k8BitSize); + } + return GetOrCreateVirtualRegisterOperand(methodHandleVreg); + default: + DEBUG_ASSERT(false, "Special pseudo registers NYI"); + break; + } + return GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyInt); +} + +RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(std::string &asmAttr) +{ + DEBUG_ASSERT(!asmAttr.empty(), "Get inline asm string failed in GetOrCreatePhysicalRegisterOperand"); + RegType rKind = kRegTyUndef; + uint32 rSize = 0; + /* Get Register Type and Size */ + switch (asmAttr[0]) { + case 'x': { + rKind = kRegTyInt; + rSize = k64BitSize; + break; + } + case 'w': { + rKind = kRegTyInt; + rSize = k32BitSize; + break; + } + default: { + LogInfo::MapleLogger() << "Unsupport asm string : " << asmAttr << "\n"; + CHECK_FATAL(false, "Have not support this kind of register "); + } + } + AArch64reg rNO = kRinvalid; + /* Get Register Number */ + uint32 regNumPos = 1; + char numberChar = asmAttr[regNumPos++]; + if (numberChar >= '0' && numberChar <= '9') { + uint32 val = static_cast(numberChar - '0'); + if (regNumPos < asmAttr.length()) { + char numberCharSecond = asmAttr[regNumPos++]; + DEBUG_ASSERT(regNumPos == asmAttr.length(), "Invalid asm attribute"); + if (numberCharSecond >= '0' && numberCharSecond <= '9') { + val = val * kDecimalMax + static_cast((numberCharSecond - '0')); + } + } + rNO = static_cast(static_cast(R0) + val); + if (val > (kAsmInputRegPrefixOpnd + 1)) { + LogInfo::MapleLogger() << "Unsupport asm string : " << asmAttr << "\n"; + CHECK_FATAL(false, "have not support this kind of register "); + } + } else if (numberChar == 0) { + return CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + } else { + CHECK_FATAL(false, "Unexpect input in GetOrCreatePhysicalRegisterOperand"); + } + return GetOrCreatePhysicalRegisterOperand(rNO, rSize, rKind); +} + +RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(AArch64reg regNO, uint32 size, RegType kind, uint32 flag) +{ + uint64 aarch64PhyRegIdx = regNO; + DEBUG_ASSERT(flag == 0, "Do not expect flag here"); + if (size <= k32BitSize) { + size = k32BitSize; + aarch64PhyRegIdx = aarch64PhyRegIdx << 1; + } else if (size <= k64BitSize) { + size = k64BitSize; + aarch64PhyRegIdx = (aarch64PhyRegIdx << 1) + 1; + } else { + size = (size == k128BitSize) ? k128BitSize : k64BitSize; + aarch64PhyRegIdx = aarch64PhyRegIdx << 2; + } + RegOperand *phyRegOpnd = nullptr; + auto phyRegIt = phyRegOperandTable.find(aarch64PhyRegIdx); + if (phyRegIt != phyRegOperandTable.end()) { + phyRegOpnd = phyRegOperandTable[aarch64PhyRegIdx]; + } else { + phyRegOpnd = memPool->New(regNO, size, kind, flag); + phyRegOperandTable.emplace(aarch64PhyRegIdx, phyRegOpnd); + } + return *phyRegOpnd; +} + +const LabelOperand *AArch64CGFunc::GetLabelOperand(LabelIdx labIdx) const +{ + const MapleUnorderedMap::const_iterator it = hashLabelOpndTable.find(labIdx); + if (it != hashLabelOpndTable.end()) { + return it->second; + } + return nullptr; +} + +LabelOperand &AArch64CGFunc::GetOrCreateLabelOperand(LabelIdx labIdx) +{ + MapleUnorderedMap::iterator it = hashLabelOpndTable.find(labIdx); + if (it != hashLabelOpndTable.end()) { + return *(it->second); + } + const char *funcName = GetShortFuncName().c_str(); + LabelOperand *res = memPool->New(funcName, labIdx); + hashLabelOpndTable[labIdx] = res; + return *res; +} + +LabelOperand &AArch64CGFunc::GetOrCreateLabelOperand(BB &bb) +{ + LabelIdx labelIdx = bb.GetLabIdx(); + if (labelIdx == MIRLabelTable::GetDummyLabel()) { + labelIdx = CreateLabel(); + bb.AddLabel(labelIdx); + } + return GetOrCreateLabelOperand(labelIdx); +} + +uint32 AArch64CGFunc::GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const +{ + /* Generating a larger sized mem op than alignment if allowed by aggregate starting address */ + uint32 offsetAlign1 = (offset1 == 0) ? k8ByteSize : offset1; + uint32 offsetAlign2 = (offset2 == 0) ? k8ByteSize : offset2; + uint32 alignOffset = + 1U << (std::min(__builtin_ffs(static_cast(offsetAlign1)), __builtin_ffs(static_cast(offsetAlign2))) - + 1); + if (alignOffset == k8ByteSize || alignOffset == k4ByteSize || alignOffset == k2ByteSize) { + return alignOffset; + } else if (alignOffset > k8ByteSize) { + return k8ByteSize; + } else { + return alignment; + } +} + +OfstOperand &AArch64CGFunc::GetOrCreateOfstOpnd(uint64 offset, uint32 size) +{ + uint64 aarch64OfstRegIdx = offset; + aarch64OfstRegIdx = (aarch64OfstRegIdx << 1); + if (size == k64BitSize) { + ++aarch64OfstRegIdx; + } + DEBUG_ASSERT(size == k32BitSize || size == k64BitSize, "ofStOpnd size check"); + auto it = hashOfstOpndTable.find(aarch64OfstRegIdx); + if (it != hashOfstOpndTable.end()) { + return *it->second; + } + OfstOperand *res = &CreateOfstOpnd(offset, size); + hashOfstOpndTable[aarch64OfstRegIdx] = res; + return *res; +} + +void AArch64CGFunc::SelectAddrofAfterRa(Operand &result, StImmOperand &stImm, std::vector &rematInsns) +{ + const MIRSymbol *symbol = stImm.GetSymbol(); + DEBUG_ASSERT((symbol->GetStorageClass() != kScAuto) || (symbol->GetStorageClass() != kScFormal), ""); + Operand *srcOpnd = &result; + rematInsns.emplace_back(&GetInsnBuilder()->BuildInsn(MOP_xadrp, result, stImm)); + if (CGOptions::IsPIC() && symbol->NeedPIC()) { + /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ + OfstOperand &offset = CreateOfstOpnd(*stImm.GetSymbol(), stImm.GetOffset(), stImm.GetRelocs()); + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPointerSize() * kBitsPerByte, + static_cast(srcOpnd), nullptr, &offset, nullptr); + rematInsns.emplace_back( + &GetInsnBuilder()->BuildInsn(memOpnd.GetSize() == k64BitSize ? MOP_xldr : MOP_wldr, result, memOpnd)); + + if (stImm.GetOffset() > 0) { + ImmOperand &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); + rematInsns.emplace_back(&GetInsnBuilder()->BuildInsn(MOP_xaddrri12, result, result, immOpnd)); + return; + } + } else { + rematInsns.emplace_back(&GetInsnBuilder()->BuildInsn(MOP_xadrpl12, result, *srcOpnd, stImm)); + } +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpndAfterRa(const MIRSymbol &symbol, int32 offset, uint32 size, bool needLow12, + RegOperand *regOp, std::vector &rematInsns) +{ + MIRStorageClass storageClass = symbol.GetStorageClass(); + if ((storageClass == kScGlobal) || (storageClass == kScExtern)) { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + RegOperand &stAddrOpnd = *regOp; + SelectAddrofAfterRa(stAddrOpnd, stOpnd, rematInsns); + /* MemOperand::AddrMode_B_OI */ + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, nullptr, + &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + } else if ((storageClass == kScPstatic) || (storageClass == kScFstatic)) { + if (symbol.GetSKind() == kStConst) { + DEBUG_ASSERT(offset == 0, "offset should be 0 for constant literals"); + return *CreateMemOperand(MemOperand::kAddrModeLiteral, size, symbol); + } else { + if (needLow12) { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + RegOperand &stAddrOpnd = *regOp; + SelectAddrofAfterRa(stAddrOpnd, stOpnd, rematInsns); + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, nullptr, + &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + } else { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + RegOperand &stAddrOpnd = *regOp; + /* adrp x1, _PTR__cinf_Ljava_2Flang_2FSystem_3B */ + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_xadrp, stAddrOpnd, stOpnd); + rematInsns.emplace_back(&insn); + /* ldr x1, [x1, #:lo12:_PTR__cinf_Ljava_2Flang_2FSystem_3B] */ + return *CreateMemOperand(MemOperand::kAddrModeLo12Li, size, stAddrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize), &symbol); + } + } + } else { + CHECK_FATAL(false, "NYI"); + } +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(const MIRSymbol &symbol, int64 offset, uint32 size, bool forLocalRef, + bool needLow12, RegOperand *regOp) +{ + MIRStorageClass storageClass = symbol.GetStorageClass(); + if ((storageClass == kScAuto) || (storageClass == kScFormal)) { + AArch64SymbolAlloc *symLoc = + static_cast(GetMemlayout()->GetSymAllocInfo(symbol.GetStIndex())); + if (forLocalRef) { + auto p = GetMemlayout()->GetLocalRefLocMap().find(symbol.GetStIdx()); + CHECK_FATAL(p != GetMemlayout()->GetLocalRefLocMap().end(), "sym loc should have been defined"); + symLoc = static_cast(p->second); + } + DEBUG_ASSERT(symLoc != nullptr, "sym loc should have been defined"); + /* At this point, we don't know which registers the callee needs to save. */ + DEBUG_ASSERT((IsFPLRAddedToCalleeSavedList() || (SizeOfCalleeSaved() == 0)), + "CalleeSaved won't be known until after Register Allocation"); + StIdx idx = symbol.GetStIdx(); + auto it = memOpndsRequiringOffsetAdjustment.find(idx); + DEBUG_ASSERT((!IsFPLRAddedToCalleeSavedList() || + ((it != memOpndsRequiringOffsetAdjustment.end()) || (storageClass == kScFormal))), + "Memory operand of this symbol should have been added to the hash table"); + int32 stOffset = GetBaseOffset(*symLoc); + if (it != memOpndsRequiringOffsetAdjustment.end()) { + if (GetMemlayout()->IsLocalRefLoc(symbol)) { + if (!forLocalRef) { + return *(it->second); + } + } else if (mirModule.IsJavaModule()) { + return *(it->second); + } else { + Operand *offOpnd = (it->second)->GetOffset(); + if (((static_cast(offOpnd))->GetOffsetValue() == (stOffset + offset)) && + (it->second->GetSize() == size)) { + return *(it->second); + } + } + } + it = memOpndsForStkPassedArguments.find(idx); + if (it != memOpndsForStkPassedArguments.end()) { + if (GetMemlayout()->IsLocalRefLoc(symbol)) { + if (!forLocalRef) { + return *(it->second); + } + } else { + return *(it->second); + } + } + + RegOperand *baseOpnd = static_cast(GetBaseReg(*symLoc)); + int32 totalOffset = stOffset + static_cast(offset); + /* needs a fresh copy of ImmOperand as we may adjust its offset at a later stage. */ + OfstOperand *offsetOpnd = nullptr; + if (CGOptions::IsBigEndian()) { + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed && size < k64BitSize) { + offsetOpnd = &CreateOfstOpnd(k4BitSize + static_cast(totalOffset), k64BitSize); + } else { + offsetOpnd = &CreateOfstOpnd(static_cast(static_cast(totalOffset)), k64BitSize); + } + } else { + offsetOpnd = &CreateOfstOpnd(static_cast(static_cast(totalOffset)), k64BitSize); + } + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed && + MemOperand::IsPIMMOffsetOutOfRange(totalOffset, size)) { + ImmOperand *offsetOprand; + offsetOprand = &CreateImmOperand(totalOffset, k64BitSize, true, kUnAdjustVary); + Operand *resImmOpnd = &SelectCopy(*offsetOprand, PTY_i64, PTY_i64); + return *CreateMemOperand(MemOperand::kAddrModeBOrX, size, *baseOpnd, static_cast(*resImmOpnd), + nullptr, symbol, true); + } else { + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offsetOpnd->SetVary(kUnAdjustVary); + } + MemOperand *res = CreateMemOperand(MemOperand::kAddrModeBOi, size, *baseOpnd, nullptr, offsetOpnd, &symbol); + if ((symbol.GetType()->GetKind() != kTypeClass) && !forLocalRef) { + memOpndsRequiringOffsetAdjustment[idx] = res; + } + return *res; + } + } else if ((storageClass == kScGlobal) || (storageClass == kScExtern)) { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + if (!regOp) { + regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); + } + RegOperand &stAddrOpnd = *regOp; + SelectAddrof(stAddrOpnd, stOpnd); + /* MemOperand::AddrMode_B_OI */ + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, nullptr, + &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + } else if ((storageClass == kScPstatic) || (storageClass == kScFstatic)) { + if (symbol.GetSKind() == kStConst) { + DEBUG_ASSERT(offset == 0, "offset should be 0 for constant literals"); + return *CreateMemOperand(MemOperand::kAddrModeLiteral, size, symbol); + } else { + /* not guaranteed align for uninitialized symbol */ + if (needLow12 || (!symbol.IsConst() && CGOptions::IsPIC())) { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + if (!regOp) { + regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); + } + RegOperand &stAddrOpnd = *regOp; + SelectAddrof(stAddrOpnd, stOpnd); + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, nullptr, + &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + } else { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + if (!regOp) { + regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); + } + RegOperand &stAddrOpnd = *regOp; + /* adrp x1, _PTR__cinf_Ljava_2Flang_2FSystem_3B */ + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_xadrp, stAddrOpnd, stOpnd); + GetCurBB()->AppendInsn(insn); + /* ldr x1, [x1, #:lo12:_PTR__cinf_Ljava_2Flang_2FSystem_3B] */ + return *CreateMemOperand(MemOperand::kAddrModeLo12Li, size, stAddrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize), &symbol); + } + } + } else { + CHECK_FATAL(false, "NYI"); + } +} + +MemOperand &AArch64CGFunc::HashMemOpnd(MemOperand &tMemOpnd) +{ + auto it = hashMemOpndTable.find(tMemOpnd); + if (it != hashMemOpndTable.end()) { + return *(it->second); + } + auto *res = memPool->New(tMemOpnd); + hashMemOpndTable[tMemOpnd] = res; + return *res; +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand *base, + RegOperand *index, ImmOperand *offset, const MIRSymbol *st) +{ + DEBUG_ASSERT(base != nullptr, "nullptr check"); + MemOperand tMemOpnd(mode, size, *base, index, offset, st); + if (base->GetRegisterNumber() == RFP || base->GetRegisterNumber() == RSP) { + tMemOpnd.SetStackMem(true); + } + return HashMemOpnd(tMemOpnd); +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand *base, + RegOperand *index, int32 shift, bool isSigned) +{ + DEBUG_ASSERT(base != nullptr, "nullptr check"); + MemOperand tMemOpnd(mode, size, *base, *index, shift, isSigned); + if (base->GetRegisterNumber() == RFP || base->GetRegisterNumber() == RSP) { + tMemOpnd.SetStackMem(true); + } + return HashMemOpnd(tMemOpnd); +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand &oldMem) +{ + return HashMemOpnd(oldMem); +} + +/* offset: base offset from FP or SP */ +MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size) +{ + OfstOperand &offsetOpnd = CreateOfstOpnd(static_cast(offset), k32BitSize); + /* do not need to check bit size rotate of sign immediate */ + bool checkSimm = (offset > kMinSimm64 && offset < kMaxSimm64Pair); + if (!checkSimm && !ImmOperand::IsInBitSizeRot(kMaxImmVal12Bits, offset)) { + Operand *resImmOpnd = &SelectCopy(CreateImmOperand(offset, k32BitSize, true), PTY_i32, PTY_i32); + return *CreateMemOperand(MemOperand::kAddrModeBOrX, size, baseOpnd, static_cast(resImmOpnd), + nullptr, nullptr); + } else { + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, baseOpnd, nullptr, &offsetOpnd, nullptr); + } +} + +/* offset: base offset + #:lo12:Label+immediate */ +MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size, const MIRSymbol &sym) +{ + OfstOperand &offsetOpnd = CreateOfstOpnd(static_cast(offset), k32BitSize); + DEBUG_ASSERT(ImmOperand::IsInBitSizeRot(kMaxImmVal12Bits, offset), ""); + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, baseOpnd, nullptr, &offsetOpnd, &sym); +} + +RegOperand &AArch64CGFunc::GenStructParamIndex(RegOperand &base, const BaseNode &indexExpr, int shift, + PrimType baseType, PrimType targetType) +{ + RegOperand *index = &LoadIntoRegister(*HandleExpr(indexExpr, *(indexExpr.Opnd(0))), PTY_a64); + RegOperand *srcOpnd = &CreateRegisterOperandOfType(PTY_a64); + ImmOperand *imm = &CreateImmOperand(PTY_a64, shift); + SelectShift(*srcOpnd, *index, *imm, kShiftLeft, PTY_a64); + RegOperand *result = &CreateRegisterOperandOfType(PTY_a64); + SelectAdd(*result, base, *srcOpnd, PTY_a64); + + OfstOperand *offopnd = &CreateOfstOpnd(0, k32BitSize); + MemOperand &mo = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, result, nullptr, offopnd, nullptr); + RegOperand &structAddr = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(baseType), targetType), structAddr, mo)); + return structAddr; +} + +/* + * case 1: iread a64 <* <* void>> 0 (add a64 ( + * addrof a64 $__reg_jni_func_tab$$libcore_all_bytecode, + * mul a64 ( + * cvt a64 i32 (constval i32 21), + * constval a64 8))) + * + * case 2 : iread u32 <* u8> 0 (add a64 (regread a64 %61, constval a64 3)) + * case 3 : iread u32 <* u8> 0 (add a64 (regread a64 %61, regread a64 %65)) + * case 4 : iread u32 <* u8> 0 (add a64 (cvt a64 i32(regread %n))) + */ +MemOperand *AArch64CGFunc::CheckAndCreateExtendMemOpnd(PrimType ptype, const BaseNode &addrExpr, int64 offset, + AArch64isa::MemoryOrdering memOrd) +{ + aggParamReg = nullptr; + if (memOrd != AArch64isa::kMoNone || addrExpr.GetOpCode() != OP_add || offset != 0) { + return nullptr; + } + BaseNode *baseExpr = addrExpr.Opnd(0); + BaseNode *addendExpr = addrExpr.Opnd(1); + + if (baseExpr->GetOpCode() == OP_regread) { + /* case 2 */ + if (addendExpr->GetOpCode() == OP_constval) { + DEBUG_ASSERT(addrExpr.GetNumOpnds() == 2, "Unepect expr operand in CheckAndCreateExtendMemOpnd"); + ConstvalNode *constOfstNode = static_cast(addendExpr); + DEBUG_ASSERT(constOfstNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst"); + MIRIntConst *intOfst = safe_cast(constOfstNode->GetConstVal()); + CHECK_FATAL(intOfst != nullptr, "just checking"); + /* discard large offset and negative offset */ + if (intOfst->GetExtValue() > INT32_MAX || intOfst->IsNegative()) { + return nullptr; + } + uint32 scale = static_cast(intOfst->GetExtValue()); + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(scale, k32BitSize); + uint32 dsize = GetPrimTypeBitSize(ptype); + MemOperand *memOpnd = + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(ptype), + SelectRegread(*static_cast(baseExpr)), nullptr, &ofstOpnd, nullptr); + return IsOperandImmValid(PickLdInsn(dsize, ptype), memOpnd, kInsnSecondOpnd) ? memOpnd : nullptr; + /* case 3 */ + } else if (addendExpr->GetOpCode() == OP_regread) { + CHECK_FATAL(addrExpr.GetNumOpnds() == 2, "Unepect expr operand in CheckAndCreateExtendMemOpnd"); + if (GetPrimTypeSize(baseExpr->GetPrimType()) != GetPrimTypeSize(addendExpr->GetPrimType())) { + return nullptr; + } + + auto *baseReg = SelectRegread(*static_cast(baseExpr)); + auto *indexReg = SelectRegread(*static_cast(addendExpr)); + MemOperand *memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), baseReg, + indexReg, nullptr, nullptr); + return memOpnd; + /* case 4 */ + } else if (addendExpr->GetOpCode() == OP_cvt && addendExpr->GetNumOpnds() == 1) { + int shiftAmount = 0; + BaseNode *cvtRegreadNode = addendExpr->Opnd(kInsnFirstOpnd); + if (cvtRegreadNode->GetOpCode() == OP_regread && cvtRegreadNode->IsLeaf()) { + uint32 fromSize = GetPrimTypeBitSize(cvtRegreadNode->GetPrimType()); + uint32 toSize = GetPrimTypeBitSize(addendExpr->GetPrimType()); + + if (toSize < fromSize) { + return nullptr; + } + + MemOperand *memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), + SelectRegread(*static_cast(baseExpr)), + SelectRegread(*static_cast(cvtRegreadNode)), + shiftAmount, toSize != fromSize); + return memOpnd; + } + } + } + if (addendExpr->GetOpCode() != OP_mul || !IsPrimitiveInteger(ptype)) { + return nullptr; + } + BaseNode *indexExpr, *scaleExpr; + indexExpr = addendExpr->Opnd(0); + scaleExpr = addendExpr->Opnd(1); + if (scaleExpr->GetOpCode() != OP_constval) { + return nullptr; + } + ConstvalNode *constValNode = static_cast(scaleExpr); + CHECK_FATAL(constValNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst"); + MIRIntConst *mirIntConst = safe_cast(constValNode->GetConstVal()); + CHECK_FATAL(mirIntConst != nullptr, "just checking"); + int32 scale = mirIntConst->GetExtValue(); + if (scale < 0) { + return nullptr; + } + uint32 unsignedScale = static_cast(scale); + if (unsignedScale != GetPrimTypeSize(ptype) || indexExpr->GetOpCode() != OP_cvt) { + return nullptr; + } + /* 8 is 1 << 3; 4 is 1 << 2; 2 is 1 << 1; 1 is 1 << 0 */ + int32 shift = (unsignedScale == 8) ? 3 : ((unsignedScale == 4) ? 2 : ((unsignedScale == 2) ? 1 : 0)); + RegOperand &base = static_cast(LoadIntoRegister(*HandleExpr(addrExpr, *baseExpr), PTY_a64)); + TypeCvtNode *typeCvtNode = static_cast(indexExpr); + PrimType fromType = typeCvtNode->FromType(); + PrimType toType = typeCvtNode->GetPrimType(); + if (isAggParamInReg) { + aggParamReg = &GenStructParamIndex(base, *indexExpr, shift, ptype, fromType); + return nullptr; + } + MemOperand *memOpnd = nullptr; + if ((fromType == PTY_i32) && (toType == PTY_a64)) { + RegOperand &index = + static_cast(LoadIntoRegister(*HandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_i32)); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), &base, &index, shift, true); + } else if ((fromType == PTY_u32) && (toType == PTY_a64)) { + RegOperand &index = + static_cast(LoadIntoRegister(*HandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_u32)); + memOpnd = + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), &base, &index, shift, false); + } + return memOpnd; +} + +MemOperand &AArch64CGFunc::CreateNonExtendMemOpnd(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, + int64 offset) +{ + Operand *addrOpnd = nullptr; + if ((addrExpr.GetOpCode() == OP_add || addrExpr.GetOpCode() == OP_sub) && + addrExpr.Opnd(1)->GetOpCode() == OP_constval) { + addrOpnd = HandleExpr(addrExpr, *addrExpr.Opnd(0)); + ConstvalNode *constOfstNode = static_cast(addrExpr.Opnd(1)); + DEBUG_ASSERT(constOfstNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst"); + MIRIntConst *intOfst = safe_cast(constOfstNode->GetConstVal()); + CHECK_FATAL(intOfst != nullptr, "just checking"); + offset = (addrExpr.GetOpCode() == OP_add) ? offset + intOfst->GetSXTValue() : offset - intOfst->GetSXTValue(); + } else { + addrOpnd = HandleExpr(parent, addrExpr); + } + addrOpnd = static_cast(&LoadIntoRegister(*addrOpnd, PTY_a64)); + Insn *lastInsn = GetCurBB() == nullptr ? nullptr : GetCurBB()->GetLastInsn(); + if ((addrExpr.GetOpCode() == OP_CG_array_elem_add) && (offset == 0) && lastInsn && + (lastInsn->GetMachineOpcode() == MOP_xadrpl12) && + (&lastInsn->GetOperand(kInsnFirstOpnd) == &lastInsn->GetOperand(kInsnSecondOpnd))) { + Operand &opnd = lastInsn->GetOperand(kInsnThirdOpnd); + StImmOperand &stOpnd = static_cast(opnd); + + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(stOpnd.GetOffset()), k32BitSize); + MemOperand &tmpMemOpnd = + GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, GetPrimTypeBitSize(ptype), + static_cast(addrOpnd), nullptr, &ofstOpnd, stOpnd.GetSymbol()); + GetCurBB()->RemoveInsn(*GetCurBB()->GetLastInsn()); + return tmpMemOpnd; + } else { + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(offset), k64BitSize); + return GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(ptype), + static_cast(addrOpnd), nullptr, &ofstOpnd, nullptr); + } +} + +/* + * Create a memory operand with specified data type and memory ordering, making + * use of aarch64 extend register addressing mode when possible. + */ +MemOperand &AArch64CGFunc::CreateMemOpnd(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset, + AArch64isa::MemoryOrdering memOrd) +{ + MemOperand *memOpnd = CheckAndCreateExtendMemOpnd(ptype, addrExpr, offset, memOrd); + if (memOpnd != nullptr) { + return *memOpnd; + } + return CreateNonExtendMemOpnd(ptype, parent, addrExpr, offset); +} + +MemOperand *AArch64CGFunc::CreateMemOpndOrNull(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset, + AArch64isa::MemoryOrdering memOrd) +{ + MemOperand *memOpnd = CheckAndCreateExtendMemOpnd(ptype, addrExpr, offset, memOrd); + if (memOpnd != nullptr) { + return memOpnd; + } else if (aggParamReg != nullptr) { + return nullptr; + } + return &CreateNonExtendMemOpnd(ptype, parent, addrExpr, offset); +} + +Operand &AArch64CGFunc::GetOrCreateFuncNameOpnd(const MIRSymbol &symbol) const +{ + return *memPool->New(symbol); +} + +Operand &AArch64CGFunc::GetOrCreateRflag() +{ + if (rcc == nullptr) { + rcc = &CreateRflagOperand(); + } + return *rcc; +} + +const Operand *AArch64CGFunc::GetRflag() const +{ + return rcc; +} + +Operand &AArch64CGFunc::GetOrCreatevaryreg() +{ + if (vary == nullptr) { + regno_t vRegNO = NewVReg(kRegTyVary, k8ByteSize); + vary = &CreateVirtualRegisterOperand(vRegNO); + } + return *vary; +} + +/* the first operand in opndvec is return opnd */ +void AArch64CGFunc::SelectLibCall(const std::string &funcName, std::vector &opndVec, PrimType primType, + PrimType retPrimType, bool is2ndRet) +{ + std::vector pt; + pt.push_back(retPrimType); + for (size_t i = 0; i < opndVec.size(); ++i) { + pt.push_back(primType); + } + SelectLibCallNArg(funcName, opndVec, pt, retPrimType, is2ndRet); + return; +} + +void AArch64CGFunc::SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt, PrimType retPrimType, bool is2ndRet) +{ + std::string newName = funcName; + // Check whether we have a maple version of libcall and we want to use it instead. + if (!CGOptions::IsDuplicateAsmFileEmpty() && asmMap.find(funcName) != asmMap.end()) { + newName = asmMap.at(funcName); + } + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(newName); + st->SetStorageClass(kScExtern); + st->SetSKind(kStFunc); + /* setup the type of the callee function */ + std::vector vec; + std::vector vecAt; + for (size_t i = 1; i < opndVec.size(); ++i) { + (void)vec.emplace_back(GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]->GetTypeIndex()); + vecAt.emplace_back(TypeAttrs()); + } + + MIRType *retType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast(retPrimType)); + st->SetTyIdx(GetBecommon().BeGetOrCreateFunctionType(retType->GetTypeIndex(), vec, vecAt)->GetTypeIndex()); + + if (GetCG()->GenerateVerboseCG()) { + const std::string &comment = "lib call : " + newName; + GetCurBB()->AppendInsn(CreateCommentInsn(comment)); + } + // only create c lib call here + AArch64CallConvImpl parmLocator(GetBecommon()); + CCLocInfo ploc; + /* setup actual parameters */ + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + for (size_t i = 1; i < opndVec.size(); ++i) { + DEBUG_ASSERT(pt[i] != PTY_void, "primType check"); + MIRType *ty; + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]; + Operand *stOpnd = opndVec[i]; + if (stOpnd->GetKind() != Operand::kOpdRegister) { + stOpnd = &SelectCopy(*stOpnd, pt[i], pt[i]); + } + RegOperand *expRegOpnd = static_cast(stOpnd); + parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { /* load to the register */ + RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( + static_cast(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(pt[i])); + SelectCopy(parmRegOpnd, pt[i], *expRegOpnd, pt[i]); + srcOpnds->PushOpnd(parmRegOpnd); + } + DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } + + MIRSymbol *sym = GetFunction().GetLocalOrGlobalSymbol(st->GetStIdx(), false); + Insn &callInsn = AppendCall(*sym, *srcOpnds); + MIRType *callRetType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast(retPrimType)); + if (callRetType != nullptr) { + callInsn.SetRetSize(static_cast(callRetType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(callRetType->GetPrimType())); + } + GetFunction().SetHasCall(); + /* get return value */ + Operand *opnd0 = opndVec[0]; + CCLocInfo retMech; + parmLocator.InitReturnInfo(*(GlobalTables::GetTypeTable().GetTypeTable().at(retPrimType)), retMech); + if (retMech.GetRegCount() <= 0) { + CHECK_FATAL(false, "should return from register"); + } + if (!opnd0->IsRegister()) { + CHECK_FATAL(false, "nyi"); + } + RegOperand *regOpnd = static_cast(opnd0); + AArch64reg regNum = static_cast(is2ndRet ? retMech.GetReg1() : retMech.GetReg0()); + if (regOpnd->GetRegisterNumber() != regNum) { + RegOperand &retOpnd = + GetOrCreatePhysicalRegisterOperand(regNum, regOpnd->GetSize(), GetRegTyFromPrimTy(retPrimType)); + SelectCopy(*opnd0, retPrimType, retOpnd, retPrimType); + } +} + +Operand *AArch64CGFunc::GetBaseReg(const AArch64SymbolAlloc &symAlloc) +{ + MemSegmentKind sgKind = symAlloc.GetMemSegment()->GetMemSegmentKind(); + DEBUG_ASSERT(((sgKind == kMsArgsRegPassed) || (sgKind == kMsLocals) || (sgKind == kMsRefLocals) || + (sgKind == kMsArgsToStkPass) || (sgKind == kMsArgsStkPassed)), + "NYI"); + + if (sgKind == kMsArgsStkPassed) { + return &GetOrCreatevaryreg(); + } + + if (fsp == nullptr) { + fsp = &GetOrCreatePhysicalRegisterOperand(RFP, GetPointerSize() * kBitsPerByte, kRegTyInt); + } + return fsp; +} + +int32 AArch64CGFunc::GetBaseOffset(const SymbolAlloc &sa) +{ + const AArch64SymbolAlloc *symAlloc = static_cast(&sa); + /* Call Frame layout of AArch64 + * Refer to V2 in aarch64_memlayout.h. + * Do Not change this unless you know what you do + */ + const int32 sizeofFplr = 2 * kIntregBytelen; + MemSegmentKind sgKind = symAlloc->GetMemSegment()->GetMemSegmentKind(); + AArch64MemLayout *memLayout = static_cast(this->GetMemlayout()); + if (sgKind == kMsArgsStkPassed) { /* for callees */ + int32 offset = static_cast(symAlloc->GetOffset()); + return offset; + } else if (sgKind == kMsArgsRegPassed) { + int32 baseOffset = memLayout->GetSizeOfLocals() + symAlloc->GetOffset() + memLayout->GetSizeOfRefLocals(); + return baseOffset + sizeofFplr; + } else if (sgKind == kMsRefLocals) { + int32 baseOffset = symAlloc->GetOffset() + memLayout->GetSizeOfLocals(); + return baseOffset + sizeofFplr; + } else if (sgKind == kMsLocals) { + int32 baseOffset = symAlloc->GetOffset(); + return baseOffset + sizeofFplr; + } else if (sgKind == kMsSpillReg) { + if (GetCG()->IsLmbc()) { + return symAlloc->GetOffset() + memLayout->SizeOfArgsToStackPass(); + } + int32 baseOffset = symAlloc->GetOffset() + memLayout->SizeOfArgsRegisterPassed() + + memLayout->GetSizeOfLocals() + memLayout->GetSizeOfRefLocals(); + return baseOffset + sizeofFplr; + } else if (sgKind == kMsArgsToStkPass) { /* this is for callers */ + return static_cast(symAlloc->GetOffset()); + } else { + CHECK_FATAL(false, "sgKind check"); + } + return 0; +} + +void AArch64CGFunc::AppendCall(const MIRSymbol &funcSymbol) +{ + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + AppendCall(funcSymbol, *srcOpnds); +} + +void AArch64CGFunc::DBGFixCallFrameLocationOffsets() +{ + for (DBGExprLoc *el : GetDbgCallFrameLocations()) { + if (el->GetSimpLoc()->GetDwOp() == DW_OP_fbreg) { + SymbolAlloc *symloc = static_cast(el->GetSymLoc()); + int32 offset = GetBaseOffset(*symloc) - GetDbgCallFrameOffset(); + el->SetFboffset(offset); + } + } +} + +void AArch64CGFunc::SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, bool isDest, + Insn &insn) +{ + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + DEBUG_ASSERT(opnd0.GetKind() == Operand::kOpdRegister, "Spill memory operand should based on register"); + DEBUG_ASSERT((opnd1.GetKind() == Operand::kOpdImmediate || opnd1.GetKind() == Operand::kOpdOffset), + "Spill memory operand should be with a immediate offset."); + + ImmOperand *immOpnd = static_cast(&opnd1); + + MOperator mOpCode = MOP_undef; + Insn *curInsn = &insn; + /* lower 24 bits has 1, higher bits are all 0 */ + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { + /* lower 12 bits and higher 12 bits both has 1 */ + Operand *newOpnd0 = &opnd0; + if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + /* process higher 12 bits */ + ImmOperand &immOpnd2 = + CreateImmOperand(static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits), + immOpnd->GetSize(), immOpnd->IsSignedValue()); + mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24; + BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, immOpnd2, shiftopnd); + DEBUG_ASSERT(IsOperandImmValid(mOpCode, &immOpnd2, kInsnThirdOpnd), "immOpnd2 appears invalid"); + if (isDest) { + insn.GetBB()->InsertInsnAfter(insn, newInsn); + } else { + insn.GetBB()->InsertInsnBefore(insn, newInsn); + } + /* get lower 12 bits value */ + immOpnd->ModuloByPow2(static_cast(kMaxImmVal12Bits)); + newOpnd0 = &resOpnd; + curInsn = &newInsn; + } + /* process lower 12 bits value */ + mOpCode = is64Bits ? MOP_xaddrri12 : MOP_waddrri12; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *newOpnd0, *immOpnd); + DEBUG_ASSERT(IsOperandImmValid(mOpCode, immOpnd, kInsnThirdOpnd), "immOpnd appears invalid"); + if (isDest) { + insn.GetBB()->InsertInsnAfter(*curInsn, newInsn); + } else { + insn.GetBB()->InsertInsnBefore(insn, newInsn); + } + } else { + /* load into register */ + RegOperand &movOpnd = GetOrCreatePhysicalRegisterOperand(R16, dsize, kRegTyInt); + mOpCode = is64Bits ? MOP_xmovri64 : MOP_wmovri32; + Insn &movInsn = GetInsnBuilder()->BuildInsn(mOpCode, movOpnd, *immOpnd); + mOpCode = is64Bits ? MOP_xaddrrr : MOP_waddrrr; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, movOpnd); + if (isDest) { + (void)insn.GetBB()->InsertInsnAfter(insn, newInsn); + (void)insn.GetBB()->InsertInsnAfter(insn, movInsn); + } else { + (void)insn.GetBB()->InsertInsnBefore(insn, movInsn); + (void)insn.GetBB()->InsertInsnBefore(insn, newInsn); + } + } +} + +MemOperand *AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, bool isDest, + Insn &insn, AArch64reg regNum, bool &isOutOfRange) +{ + if (vrNum >= vRegTable.size()) { + CHECK_FATAL(false, "index out of range in AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange"); + } + uint32 dataSize = GetOrCreateVirtualRegisterOperand(vrNum).GetSize(); + if (IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) { + if (CheckIfSplitOffsetWithAdd(*memOpnd, dataSize)) { + isOutOfRange = true; + } + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize, regNum, isDest, &insn); + } else { + isOutOfRange = false; + } + return memOpnd; +} + +void AArch64CGFunc::FreeSpillRegMem(regno_t vrNum) +{ + MemOperand *memOpnd = nullptr; + + auto p = spillRegMemOperands.find(vrNum); + if (p != spillRegMemOperands.end()) { + memOpnd = p->second; + } + + if ((memOpnd == nullptr) && IsVRegNOForPseudoRegister(vrNum)) { + auto pSecond = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum)); + if (pSecond != pRegSpillMemOperands.end()) { + memOpnd = pSecond->second; + } + } + + if (memOpnd == nullptr) { + DEBUG_ASSERT(false, "free spillreg have no mem"); + return; + } + + uint32 size = memOpnd->GetSize(); + MapleUnorderedMap::iterator iter; + if ((iter = reuseSpillLocMem.find(size)) != reuseSpillLocMem.end()) { + iter->second->Add(*memOpnd); + } else { + reuseSpillLocMem[size] = memPool->New(*GetFuncScopeAllocator()); + reuseSpillLocMem[size]->Add(*memOpnd); + } +} + +MemOperand *AArch64CGFunc::GetOrCreatSpillMem(regno_t vrNum, uint32 memSize) +{ + /* NOTES: must used in RA, not used in other place. */ + if (IsVRegNOForPseudoRegister(vrNum)) { + auto p = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum)); + if (p != pRegSpillMemOperands.end()) { + return p->second; + } + } + + auto p = spillRegMemOperands.find(vrNum); + if (p == spillRegMemOperands.end()) { + if (vrNum >= vRegTable.size()) { + CHECK_FATAL(false, "index out of range in AArch64CGFunc::FreeSpillRegMem"); + } + uint32 memBitSize = (memSize <= k32BitSize) ? k32BitSize : + (memSize <= k64BitSize) ? k64BitSize : k128BitSize; + auto it = reuseSpillLocMem.find(memBitSize); + if (it != reuseSpillLocMem.end()) { + MemOperand *memOpnd = it->second->GetOne(); + if (memOpnd != nullptr) { + (void)spillRegMemOperands.emplace(std::pair(vrNum, memOpnd)); + return memOpnd; + } + } + + RegOperand &baseOpnd = GetOrCreateStackBaseRegOperand(); + int64 offset = GetOrCreatSpillRegLocation(vrNum, memBitSize / kBitsPerByte); + MemOperand *memOpnd = nullptr; + OfstOperand *offsetOpnd = &CreateOfstOpnd(static_cast(offset), k64BitSize); + memOpnd = CreateMemOperand(MemOperand::kAddrModeBOi, memBitSize, baseOpnd, nullptr, offsetOpnd, nullptr); + (void)spillRegMemOperands.emplace(std::pair(vrNum, memOpnd)); + return memOpnd; + } else { + return p->second; + } +} + +MemOperand *AArch64CGFunc::GetPseudoRegisterSpillMemoryOperand(PregIdx i) +{ + MapleUnorderedMap::iterator p; + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + p = pRegSpillMemOperands.end(); + } else { + p = pRegSpillMemOperands.find(i); + } + if (p != pRegSpillMemOperands.end()) { + return p->second; + } + int64 offset = GetPseudoRegisterSpillLocation(i); + MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(i); + uint32 bitLen = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte; + RegOperand &base = GetOrCreateFramePointerRegOperand(); + + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(offset), k32BitSize); + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitLen, &base, nullptr, &ofstOpnd, nullptr); + if (IsImmediateOffsetOutOfRange(memOpnd, bitLen)) { + MemOperand &newMemOpnd = SplitOffsetWithAddInstruction(memOpnd, bitLen); + (void)pRegSpillMemOperands.emplace(std::pair(i, &newMemOpnd)); + return &newMemOpnd; + } + (void)pRegSpillMemOperands.emplace(std::pair(i, &memOpnd)); + return &memOpnd; +} + +MIRPreg *AArch64CGFunc::GetPseudoRegFromVirtualRegNO(const regno_t vRegNO, bool afterSSA) const +{ + PregIdx pri = afterSSA ? VRegNOToPRegIdx(vRegNO) : GetPseudoRegIdxFromVirtualRegNO(vRegNO); + if (pri == -1) + return nullptr; + return GetFunction().GetPregTab()->PregFromPregIdx(pri); +} + +/* Get the number of return register of current function. */ +AArch64reg AArch64CGFunc::GetReturnRegisterNumber() +{ + CCImpl &retLocator = *GetOrCreateLocator(GetCurCallConvKind()); + CCLocInfo retMech; + retLocator.InitReturnInfo(*(GetFunction().GetReturnType()), retMech); + if (retMech.GetRegCount() > 0) { + return static_cast(retMech.GetReg0()); + } + return kRinvalid; +} + +bool AArch64CGFunc::CanLazyBinding(const Insn &ldrInsn) const +{ + Operand &memOpnd = ldrInsn.GetOperand(1); + auto &aarchMemOpnd = static_cast(memOpnd); + if (aarchMemOpnd.GetAddrMode() != MemOperand::kAddrModeLo12Li) { + return false; + } + + const MIRSymbol *sym = aarchMemOpnd.GetSymbol(); + CHECK_FATAL(sym != nullptr, "sym can't be nullptr"); + if (sym->IsMuidFuncDefTab() || sym->IsMuidFuncUndefTab() || sym->IsMuidDataDefTab() || sym->IsMuidDataUndefTab() || + (sym->IsReflectionClassInfo() && !sym->IsReflectionArrayClassInfo())) { + return true; + } + + return false; +} + +/* + * add reg, reg, __PTR_C_STR_... + * ldr reg1, [reg] + * => + * ldr reg1, [reg, #:lo12:__Ptr_C_STR_...] + */ +void AArch64CGFunc::ConvertAdrpl12LdrToLdr() +{ + FOR_ALL_BB(bb, this) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + nextInsn = insn->GetNextMachineInsn(); + if (nextInsn == nullptr) { + break; + } + if (!insn->IsMachineInstruction()) { + continue; + } + /* check first insn */ + MOperator thisMop = insn->GetMachineOpcode(); + if (thisMop != MOP_xadrpl12) { + continue; + } + /* check second insn */ + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (!(((nextMop >= MOP_wldrsb) && (nextMop <= MOP_dldp)) || + ((nextMop >= MOP_wstrb) && (nextMop <= MOP_dstp)))) { + continue; + } + + /* Check if base register of nextInsn and the dest operand of insn are identical. */ + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + CHECK_FATAL(memOpnd != nullptr, "memOpnd can't be nullptr"); + + /* Only for AddrMode_B_OI addressing mode. */ + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { + continue; + } + + /* Only for intact memory addressing. */ + if (!memOpnd->IsIntactIndexed()) { + continue; + } + + auto ®Opnd = static_cast(insn->GetOperand(0)); + + /* Check if dest operand of insn is idential with base register of nextInsn. */ + RegOperand *baseReg = memOpnd->GetBaseRegister(); + CHECK_FATAL(baseReg != nullptr, "baseReg can't be nullptr"); + if (baseReg->GetRegisterNumber() != regOpnd.GetRegisterNumber()) { + continue; + } + + StImmOperand &stImmOpnd = static_cast(insn->GetOperand(kInsnThirdOpnd)); + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd( + static_cast(stImmOpnd.GetOffset() + memOpnd->GetOffsetImmediate()->GetOffsetValue()), + k32BitSize); + RegOperand &newBaseOpnd = static_cast(insn->GetOperand(kInsnSecondOpnd)); + MemOperand &newMemOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, memOpnd->GetSize(), &newBaseOpnd, + nullptr, &ofstOpnd, stImmOpnd.GetSymbol()); + nextInsn->SetOperand(1, newMemOpnd); + bb->RemoveInsn(*insn); + } + } +} + +/* + * adrp reg1, __muid_func_undef_tab.. + * ldr reg2, [reg1, #:lo12:__muid_func_undef_tab..] + * => + * intrinsic_adrp_ldr reg2, __muid_func_undef_tab... + */ +void AArch64CGFunc::ConvertAdrpLdrToIntrisic() +{ + FOR_ALL_BB(bb, this) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + nextInsn = insn->GetNextMachineInsn(); + if (nextInsn == nullptr) { + break; + } + if (!insn->IsMachineInstruction()) { + continue; + } + + MOperator firstMop = insn->GetMachineOpcode(); + MOperator secondMop = nextInsn->GetMachineOpcode(); + if (!((firstMop == MOP_xadrp) && ((secondMop == MOP_wldr) || (secondMop == MOP_xldr)))) { + continue; + } + + if (CanLazyBinding(*nextInsn)) { + bb->ReplaceInsn( + *insn, GetInsnBuilder()->BuildInsn(MOP_adrp_ldr, nextInsn->GetOperand(0), insn->GetOperand(1))); + bb->RemoveInsn(*nextInsn); + } + } + } +} + +void AArch64CGFunc::ProcessLazyBinding() +{ + ConvertAdrpl12LdrToLdr(); + ConvertAdrpLdrToIntrisic(); +} + +/* + * Generate global long call + * adrp VRx, symbol + * ldr VRx, [VRx, #:lo12:symbol] + * blr VRx + * + * Input: + * insn : insert new instruction after the 'insn' + * func : the symbol of the function need to be called + * srcOpnds : list operand of the function need to be called + * isCleanCall: when generate clean call insn, set isCleanCall as true + * Return: the 'blr' instruction + */ +Insn &AArch64CGFunc::GenerateGlobalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds) +{ + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(func.GetStIdx()); + symbol->SetStorageClass(kScGlobal); + RegOperand &tmpReg = CreateRegisterOperandOfType(PTY_u64); + StImmOperand &stOpnd = CreateStImmOperand(*symbol, 0, 0); + OfstOperand &offsetOpnd = CreateOfstOpnd(*symbol, 0); + Insn &adrpInsn = GetInsnBuilder()->BuildInsn(MOP_xadrp, tmpReg, stOpnd); + GetCurBB()->AppendInsn(adrpInsn); + MemOperand &memOrd = GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, GetPointerSize() * kBitsPerByte, + static_cast(&tmpReg), nullptr, &offsetOpnd, symbol); + Insn &ldrInsn = GetInsnBuilder()->BuildInsn(memOrd.GetSize() == k64BitSize ? MOP_xldr : MOP_wldr, tmpReg, memOrd); + GetCurBB()->AppendInsn(ldrInsn); + + Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_xblr, tmpReg, srcOpnds); + GetCurBB()->AppendInsn(callInsn); + GetCurBB()->SetHasCall(); + return callInsn; +} + +/* + * Generate local long call + * adrp VRx, symbol + * add VRx, VRx, #:lo12:symbol + * blr VRx + * + * Input: + * insn : insert new instruction after the 'insn' + * func : the symbol of the function need to be called + * srcOpnds : list operand of the function need to be called + * isCleanCall: when generate clean call insn, set isCleanCall as true + * Return: the 'blr' instruction + */ +Insn &AArch64CGFunc::GenerateLocalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds) +{ + RegOperand &tmpReg = CreateRegisterOperandOfType(PTY_u64); + StImmOperand &stOpnd = CreateStImmOperand(func, 0, 0); + Insn &adrpInsn = GetInsnBuilder()->BuildInsn(MOP_xadrp, tmpReg, stOpnd); + GetCurBB()->AppendInsn(adrpInsn); + Insn &addInsn = GetInsnBuilder()->BuildInsn(MOP_xadrpl12, tmpReg, tmpReg, stOpnd); + GetCurBB()->AppendInsn(addInsn); + Insn *callInsn = &GetInsnBuilder()->BuildInsn(MOP_xblr, tmpReg, srcOpnds); + GetCurBB()->AppendInsn(*callInsn); + GetCurBB()->SetHasCall(); + return *callInsn; +} + +Insn &AArch64CGFunc::AppendCall(const MIRSymbol &sym, ListOperand &srcOpnds) +{ + Insn *callInsn = nullptr; + if (CGOptions::IsLongCalls()) { + MIRFunction *mirFunc = sym.GetFunction(); + if (IsDuplicateAsmList(sym) || (mirFunc && mirFunc->GetAttr(FUNCATTR_local))) { + callInsn = &GenerateLocalLongCallAfterInsn(sym, srcOpnds); + } else { + callInsn = &GenerateGlobalLongCallAfterInsn(sym, srcOpnds); + } + } else { + Operand &targetOpnd = GetOrCreateFuncNameOpnd(sym); + callInsn = &GetInsnBuilder()->BuildInsn(MOP_xbl, targetOpnd, srcOpnds); + GetCurBB()->AppendInsn(*callInsn); + GetCurBB()->SetHasCall(); + } + return *callInsn; +} + +bool AArch64CGFunc::IsDuplicateAsmList(const MIRSymbol &sym) const +{ + if (CGOptions::IsDuplicateAsmFileEmpty()) { + return false; + } + + const std::string &name = sym.GetName(); + if ((name == "strlen") || (name == "strncmp") || (name == "memcpy") || (name == "memmove") || (name == "strcmp") || + (name == "memcmp") || (name == "memcmpMpl")) { + return true; + } + return false; +} + +void AArch64CGFunc::SelectMPLProfCounterInc(const IntrinsiccallNode &intrnNode) +{ + if (Options::profileGen) { + DEBUG_ASSERT(intrnNode.NumOpnds() == 1, "must be 1 operand"); + BaseNode *arg1 = intrnNode.Opnd(0); + DEBUG_ASSERT(arg1 != nullptr, "nullptr check"); + regno_t vRegNO1 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg1 = CreateVirtualRegisterOperand(vRegNO1); + vReg1.SetRegNotBBLocal(); + static const MIRSymbol *bbProfileTab = nullptr; + + // Ref: MeProfGen::InstrumentFunc on ctrTbl namiLogicalShiftLeftOperandng + std::string ctrTblName = namemangler::kprefixProfCtrTbl + GetMirModule().GetFileName() + "_" + GetName(); + std::replace(ctrTblName.begin(), ctrTblName.end(), '.', '_'); + std::replace(ctrTblName.begin(), ctrTblName.end(), '-', '_'); + std::replace(ctrTblName.begin(), ctrTblName.end(), '/', '_'); + + if (!bbProfileTab || bbProfileTab->GetName() != ctrTblName) { + bbProfileTab = GetMirModule().GetMIRBuilder()->GetGlobalDecl(ctrTblName); + CHECK_FATAL(bbProfileTab != nullptr, "expect counter table"); + } + + ConstvalNode *constvalNode = static_cast(arg1); + MIRConst *mirConst = constvalNode->GetConstVal(); + DEBUG_ASSERT(mirConst != nullptr, "nullptr check"); + CHECK_FATAL(mirConst->GetKind() == kConstInt, "expect MIRIntConst type"); + MIRIntConst *mirIntConst = safe_cast(mirConst); + int64 offset = GetPrimTypeSize(PTY_u64) * mirIntConst->GetExtValue(); + + if (!CGOptions::IsQuiet()) { + maple::LogInfo::MapleLogger(kLlInfo) << "At counter table offset: " << offset << std::endl; + } + MemOperand *memOpnd = &GetOrCreateMemOpnd(*bbProfileTab, offset, k64BitSize); + if (IsImmediateOffsetOutOfRange(*memOpnd, k64BitSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, k64BitSize); + } + Operand *reg = &SelectCopy(*memOpnd, PTY_u64, PTY_u64); + ImmOperand &one = CreateImmOperand(1, k64BitSize, false); + SelectAdd(*reg, *reg, one, PTY_u64); + SelectCopy(*memOpnd, PTY_u64, *reg, PTY_u64); + return; + } + + DEBUG_ASSERT(intrnNode.NumOpnds() == 1, "must be 1 operand"); + BaseNode *arg1 = intrnNode.Opnd(0); + DEBUG_ASSERT(arg1 != nullptr, "nullptr check"); + regno_t vRegNO1 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg1 = CreateVirtualRegisterOperand(vRegNO1); + vReg1.SetRegNotBBLocal(); + static const MIRSymbol *bbProfileTab = nullptr; + if (!bbProfileTab) { + std::string bbProfileName = namemangler::kBBProfileTabPrefixStr + GetMirModule().GetFileNameAsPostfix(); + bbProfileTab = GetMirModule().GetMIRBuilder()->GetGlobalDecl(bbProfileName); + CHECK_FATAL(bbProfileTab != nullptr, "expect bb profile tab"); + } + ConstvalNode *constvalNode = static_cast(arg1); + MIRConst *mirConst = constvalNode->GetConstVal(); + DEBUG_ASSERT(mirConst != nullptr, "nullptr check"); + CHECK_FATAL(mirConst->GetKind() == kConstInt, "expect MIRIntConst type"); + MIRIntConst *mirIntConst = safe_cast(mirConst); + int64 idx = GetPrimTypeSize(PTY_u32) * mirIntConst->GetExtValue(); + if (!CGOptions::IsQuiet()) { + maple::LogInfo::MapleLogger(kLlErr) << "Id index " << idx << std::endl; + } + StImmOperand &stOpnd = CreateStImmOperand(*bbProfileTab, idx, 0); + Insn &newInsn = GetInsnBuilder()->BuildInsn(MOP_counter, vReg1, stOpnd); + newInsn.SetDoNotRemove(true); + GetCurBB()->AppendInsn(newInsn); +} + +void AArch64CGFunc::SelectMPLClinitCheck(const IntrinsiccallNode &intrnNode) +{ + DEBUG_ASSERT(intrnNode.NumOpnds() == 1, "must be 1 operand"); + BaseNode *arg = intrnNode.Opnd(0); + Operand *stOpnd = nullptr; + bool bClinitSeperate = false; + DEBUG_ASSERT(CGOptions::IsPIC(), "must be doPIC"); + if (arg->GetOpCode() == OP_addrof) { + AddrofNode *addrof = static_cast(arg); + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(addrof->GetStIdx()); + DEBUG_ASSERT(symbol->GetName().find(CLASSINFO_PREFIX_STR) == 0, "must be a symbol with __classinfo__"); + + if (!symbol->IsMuidDataUndefTab()) { + std::string ptrName = namemangler::kPtrPrefixStr + symbol->GetName(); + MIRType *ptrType = GlobalTables::GetTypeTable().GetPtr(); + symbol = GetMirModule().GetMIRBuilder()->GetOrCreateGlobalDecl(ptrName, *ptrType); + bClinitSeperate = true; + symbol->SetStorageClass(kScFstatic); + } + stOpnd = &CreateStImmOperand(*symbol, 0, 0); + } else { + arg = arg->Opnd(0); + BaseNode *arg0 = arg->Opnd(0); + BaseNode *arg1 = arg->Opnd(1); + DEBUG_ASSERT(arg0 != nullptr, "nullptr check"); + DEBUG_ASSERT(arg1 != nullptr, "nullptr check"); + DEBUG_ASSERT(arg0->GetOpCode() == OP_addrof, "expect the operand to be addrof"); + AddrofNode *addrof = static_cast(arg0); + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(addrof->GetStIdx()); + DEBUG_ASSERT(addrof->GetFieldID() == 0, "For debug SelectMPLClinitCheck."); + ConstvalNode *constvalNode = static_cast(arg1); + MIRConst *mirConst = constvalNode->GetConstVal(); + DEBUG_ASSERT(mirConst != nullptr, "nullptr check"); + CHECK_FATAL(mirConst->GetKind() == kConstInt, "expect MIRIntConst type"); + MIRIntConst *mirIntConst = safe_cast(mirConst); + stOpnd = &CreateStImmOperand(*symbol, mirIntConst->GetExtValue(), 0); + } + + regno_t vRegNO2 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg2 = CreateVirtualRegisterOperand(vRegNO2); + vReg2.SetRegNotBBLocal(); + if (bClinitSeperate) { + /* Seperate MOP_clinit to MOP_adrp_ldr + MOP_clinit_tail. */ + Insn &newInsn = GetInsnBuilder()->BuildInsn(MOP_adrp_ldr, vReg2, *stOpnd); + GetCurBB()->AppendInsn(newInsn); + newInsn.SetDoNotRemove(true); + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_clinit_tail, vReg2); + insn.SetDoNotRemove(true); + GetCurBB()->AppendInsn(insn); + } else { + Insn &newInsn = GetInsnBuilder()->BuildInsn(MOP_clinit, vReg2, *stOpnd); + GetCurBB()->AppendInsn(newInsn); + } +} +void AArch64CGFunc::GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize) +{ + /* FPLR only pushed in regalloc() after intrin function */ + Operand &stkOpnd = GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); + + /* __stack */ + ImmOperand *offsOpnd; + if (GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + offsOpnd = &CreateImmOperand(0, k64BitSize, true, kUnAdjustVary); /* isvary reset StackFrameSize */ + } else { + offsOpnd = &CreateImmOperand(0, k64BitSize, true); + } + ImmOperand *offsOpnd2 = &CreateImmOperand(stkSize, k64BitSize, false); + RegOperand &vReg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, GetPrimTypeSize(GetLoweredPtrType()))); + if (stkSize) { + SelectAdd(vReg, *offsOpnd, *offsOpnd2, GetLoweredPtrType()); + SelectAdd(vReg, stkOpnd, vReg, GetLoweredPtrType()); + } else { + SelectAdd(vReg, stkOpnd, *offsOpnd, GetLoweredPtrType()); /* stack pointer */ + } + OfstOperand *offOpnd = &GetOrCreateOfstOpnd(0, k64BitSize); /* va_list ptr */ + /* mem operand in va_list struct (lhs) */ + MemOperand *strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, offOpnd, + static_cast(nullptr)); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(vReg.GetSize() == k64BitSize ? MOP_xstr : MOP_wstr, vReg, *strOpnd)); + + /* __gr_top ; it's the same as __stack before the 1st va_arg */ + if (CGOptions::IsArm64ilp32()) { + offOpnd = &GetOrCreateOfstOpnd(GetPointerSize(), k64BitSize); + } else { + offOpnd = &GetOrCreateOfstOpnd(k8BitSize, k64BitSize); + } + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, offOpnd, + static_cast(nullptr)); + SelectAdd(vReg, stkOpnd, *offsOpnd, GetLoweredPtrType()); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(vReg.GetSize() == k64BitSize ? MOP_xstr : MOP_wstr, vReg, *strOpnd)); + + /* __vr_top */ + int32 grAreaSize = static_cast(static_cast(GetMemlayout())->GetSizeOfGRSaveArea()); + if (CGOptions::IsArm64ilp32()) { + offsOpnd2 = &CreateImmOperand(static_cast(RoundUp(static_cast(grAreaSize), k8ByteSize * 2)), + k64BitSize, false); + } else { + offsOpnd2 = &CreateImmOperand( + static_cast(RoundUp(static_cast(grAreaSize), GetPointerSize() * 2)), k64BitSize, false); + } + SelectSub(vReg, *offsOpnd, *offsOpnd2, GetLoweredPtrType()); /* if 1st opnd is register => sub */ + SelectAdd(vReg, stkOpnd, vReg, GetLoweredPtrType()); + offOpnd = &GetOrCreateOfstOpnd(GetPointerSize() * 2, k64BitSize); + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, offOpnd, + static_cast(nullptr)); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(vReg.GetSize() == k64BitSize ? MOP_xstr : MOP_wstr, vReg, *strOpnd)); + + /* __gr_offs */ + int32 offs = 0 - grAreaSize; + offsOpnd = &CreateImmOperand(offs, k32BitSize, false); + RegOperand *tmpReg = &CreateRegisterOperandOfType(PTY_i32); /* offs value to be assigned (rhs) */ + SelectCopyImm(*tmpReg, *offsOpnd, PTY_i32); + offOpnd = &GetOrCreateOfstOpnd(GetPointerSize() * 3, k32BitSize); + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k32BitSize, &opnd, nullptr, offOpnd, + static_cast(nullptr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wstr, *tmpReg, *strOpnd)); + + /* __vr_offs */ + offs = + static_cast(UINT32_MAX - (static_cast(GetMemlayout())->GetSizeOfVRSaveArea() - 1UL)); + offsOpnd = &CreateImmOperand(offs, k32BitSize, false); + tmpReg = &CreateRegisterOperandOfType(PTY_i32); + SelectCopyImm(*tmpReg, *offsOpnd, PTY_i32); + offOpnd = &GetOrCreateOfstOpnd((GetPointerSize() * 3 + sizeof(int32)), k32BitSize); + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k32BitSize, &opnd, nullptr, offOpnd, + static_cast(nullptr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wstr, *tmpReg, *strOpnd)); +} + +void AArch64CGFunc::SelectCVaStart(const IntrinsiccallNode &intrnNode) +{ + DEBUG_ASSERT(intrnNode.NumOpnds() == 2, "must be 2 operands"); + /* 2 operands, but only 1 needed. Don't need to emit code for second operand + * + * va_list is a passed struct with an address, load its address + */ + isIntrnCallForC = true; + BaseNode *argExpr = intrnNode.Opnd(0); + Operand *opnd = HandleExpr(intrnNode, *argExpr); + RegOperand &opnd0 = LoadIntoRegister(*opnd, GetLoweredPtrType()); /* first argument of intrinsic */ + + /* Find beginning of unnamed arg on stack. + * Ex. void foo(int i1, int i2, ... int i8, struct S r, struct S s, ...) + * where struct S has size 32, address of r and s are on stack but they are named. + */ + AArch64CallConvImpl parmLocator(GetBecommon()); + CCLocInfo pLoc; + uint32 stkSize = 0; + uint32 inReg = 0; + for (uint32 i = 0; i < GetFunction().GetFormalCount(); i++) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(GetFunction().GetNthParamTyIdx(i)); + CHECK_FATAL(GetFunction().GetAttr(FUNCATTR_ccall), "only c calling convention support here"); + parmLocator.LocateNextParm(*ty, pLoc); + if (pLoc.reg0 == kRinvalid) { /* on stack */ + stkSize = static_cast(pLoc.memOffset + pLoc.memSize); + } else { + inReg++; + } + } + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + stkSize += (inReg * k8ByteSize); + } + if (CGOptions::IsArm64ilp32()) { + stkSize = static_cast(RoundUp(stkSize, k8ByteSize)); + } else { + stkSize = static_cast(RoundUp(stkSize, GetPointerSize())); + } + + GenCVaStartIntrin(opnd0, stkSize); + + return; +} + +// output +// add_with_overflow/ sub_with_overflow: +// w1: parm1 +// w2: parm2 +// adds/subs w0, w1, w2 +// cset w3, vs + +// mul_with_overflow: +// w1: parm1 +// w2: parm2 +// smull x0, w0, w1 +// cmp x0, w0, sxtw +// cset w4, ne +void AArch64CGFunc::SelectOverFlowCall(const IntrinsiccallNode &intrnNode) +{ + DEBUG_ASSERT(intrnNode.NumOpnds() == 2, "must be 2 operands"); + MIRIntrinsicID intrinsic = intrnNode.GetIntrinsic(); + PrimType type = intrnNode.Opnd(0)->GetPrimType(); + PrimType type2 = intrnNode.Opnd(1)->GetPrimType(); + CHECK_FATAL(type == PTY_i32 || type == PTY_u32, "only support i32 or u32 here"); + CHECK_FATAL(type2 == PTY_i32 || type == PTY_u32, "only support i32 or u32 here"); + // deal with parms + RegOperand &opnd0 = LoadIntoRegister(*HandleExpr(intrnNode, *intrnNode.Opnd(0)), + intrnNode.Opnd(0)->GetPrimType()); /* first argument of intrinsic */ + RegOperand &opnd1 = LoadIntoRegister(*HandleExpr(intrnNode, *intrnNode.Opnd(1)), + intrnNode.Opnd(1)->GetPrimType()); /* first argument of intrinsic */ + RegOperand &resReg = GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(type), GetRegTyFromPrimTy(type)); + RegOperand &resReg2 = GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(PTY_u8), GetRegTyFromPrimTy(PTY_u8)); + Operand &rflag = GetOrCreateRflag(); + // arith operation with set flag + if (intrinsic == INTRN_ADD_WITH_OVERFLOW) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_waddsrrr, rflag, resReg, opnd0, opnd1)); + SelectAArch64CSet(resReg2, GetCondOperand(CC_VS), false); + } else if (intrinsic == INTRN_SUB_WITH_OVERFLOW) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wsubsrrr, rflag, resReg, opnd0, opnd1)); + SelectAArch64CSet(resReg2, GetCondOperand(CC_VS), false); + } else if (intrinsic == INTRN_MUL_WITH_OVERFLOW) { + // smull + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xsmullrrr, resReg, opnd0, opnd1)); + Operand &sxtw = CreateExtendShiftOperand(ExtendShiftOperand::kSXTW, 0, 3); + Insn &cmpInsn = GetInsnBuilder()->BuildInsn(MOP_xwcmprre, rflag, resReg, resReg, sxtw); + GetCurBB()->AppendInsn(cmpInsn); + SelectAArch64CSet(resReg2, GetCondOperand(CC_NE), false); + } else { + CHECK_FATAL(false, "niy"); + } + // store back + auto *retVals = &intrnNode.GetReturnVec(); + auto &pair = retVals->at(0); + stIdx2OverflowResult[pair.first] = std::pair(&resReg, &resReg2); + return; +} + +/* + * intrinsiccall C___Atomic_store_N(ptr, val, memorder)) + * ====> *ptr = val + * let ptr -> x0 + * let val -> x1 + * implement to asm: str/stlr x1, [x0] + * a store-release would replace str if memorder is not 0 + */ +void AArch64CGFunc::SelectCAtomicStoreN(const IntrinsiccallNode &intrinsiccallNode) +{ + auto primType = intrinsiccallNode.Opnd(1)->GetPrimType(); + auto *addr = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(0)); + auto *value = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(1)); + auto *memOrderOpnd = intrinsiccallNode.Opnd(kInsnThirdOpnd); + auto *memOrderConst = static_cast(static_cast(memOrderOpnd)->GetConstVal()); + auto memOrder = static_cast(memOrderConst->GetExtValue()); + SelectAtomicStore(*value, *addr, primType, PickMemOrder(memOrder, false)); +} + +void AArch64CGFunc::SelectAtomicStore(Operand &srcOpnd, Operand &addrOpnd, PrimType primType, + AArch64isa::MemoryOrdering memOrder) +{ + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(addrOpnd, PTY_a64), 0, k64BitSize); + auto mOp = PickStInsn(GetPrimTypeBitSize(primType), primType, memOrder); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, LoadIntoRegister(srcOpnd, primType), memOpnd)); +} + +void AArch64CGFunc::SelectAddrofThreadLocal(Operand &result, StImmOperand &stImm) +{ + if (CGOptions::IsPIC()) { + SelectCTlsGlobalDesc(result, stImm); + } else { + SelectCTlsLocalDesc(result, stImm); + } + if (stImm.GetOffset() > 0) { + auto &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); + SelectAdd(result, result, immOpnd, PTY_u64); + } +} + +void AArch64CGFunc::SelectCTlsLocalDesc(Operand &result, StImmOperand &stImm) +{ + auto tpidr = &CreateCommentOperand("tpidr_el0"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_mrs, result, *tpidr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_tls_desc_rel, result, result, stImm)); +} + +void AArch64CGFunc::SelectCTlsGlobalDesc(Operand &result, StImmOperand &stImm) +{ + /* according to AArch64 Machine Directives */ + auto &r0opnd = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_u64)); + RegOperand *tlsAddr = &CreateRegisterOperandOfType(PTY_u64); + RegOperand *specialFunc = &CreateRegisterOperandOfType(PTY_u64); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_tls_desc_call, r0opnd, *tlsAddr, stImm)); + /* release tls address */ + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_pseduo_tls_release, *tlsAddr)); + // mrs xn, tpidr_el0 + // add x0, x0, xn + auto tpidr = &CreateCommentOperand("tpidr_el0"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_mrs, *specialFunc, *tpidr)); + SelectAdd(result, r0opnd, *specialFunc, PTY_u64); +} + +void AArch64CGFunc::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) +{ + MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic(); + + if (GetCG()->GenerateVerboseCG()) { + std::string comment = GetIntrinsicName(intrinsic); + GetCurBB()->AppendInsn(CreateCommentInsn(comment)); + } + + /* + * At this moment, we eagerly evaluates all argument expressions. In theory, + * there could be intrinsics that extract meta-information of variables, such as + * their locations, rather than computing their values. Applications + * include building stack maps that help runtime libraries to find the values + * of local variables (See @stackmap in LLVM), in which case knowing their + * locations will suffice. + */ + if (intrinsic == INTRN_MPL_CLINIT_CHECK) { /* special case */ + SelectMPLClinitCheck(intrinsiccallNode); + return; + } + if (intrinsic == INTRN_MPL_PROF_COUNTER_INC) { /* special case */ + SelectMPLProfCounterInc(intrinsiccallNode); + return; + } + if ((intrinsic == INTRN_MPL_CLEANUP_LOCALREFVARS) || (intrinsic == INTRN_MPL_CLEANUP_LOCALREFVARS_SKIP) || + (intrinsic == INTRN_MPL_CLEANUP_NORETESCOBJS)) { + return; + } + // js + if (intrinsic == INTRN_ADD_WITH_OVERFLOW || intrinsic == INTRN_SUB_WITH_OVERFLOW || + intrinsic == INTRN_MUL_WITH_OVERFLOW) { + SelectOverFlowCall(intrinsiccallNode); + return; + } + switch (intrinsic) { + case INTRN_C_va_start: + SelectCVaStart(intrinsiccallNode); + return; + case INTRN_C___sync_lock_release_1: + SelectCSyncLockRelease(intrinsiccallNode, PTY_u8); + return; + case INTRN_C___sync_lock_release_2: + SelectCSyncLockRelease(intrinsiccallNode, PTY_u16); + return; + case INTRN_C___sync_lock_release_4: + SelectCSyncLockRelease(intrinsiccallNode, PTY_u32); + return; + case INTRN_C___sync_lock_release_8: + SelectCSyncLockRelease(intrinsiccallNode, PTY_u64); + return; + case INTRN_C___atomic_store_n: + SelectCAtomicStoreN(intrinsiccallNode); + return; + case INTRN_vector_zip_v8u8: + case INTRN_vector_zip_v8i8: + case INTRN_vector_zip_v4u16: + case INTRN_vector_zip_v4i16: + case INTRN_vector_zip_v2u32: + case INTRN_vector_zip_v2i32: + SelectVectorZip(intrinsiccallNode.Opnd(0)->GetPrimType(), + HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(0)), + HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(1))); + return; + case INTRN_C_stack_save: + return; + case INTRN_C_stack_restore: + return; + default: + break; + } + std::vector operands; /* Temporary. Deallocated on return. */ + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + for (size_t i = 0; i < intrinsiccallNode.NumOpnds(); i++) { + BaseNode *argExpr = intrinsiccallNode.Opnd(i); + Operand *opnd = HandleExpr(intrinsiccallNode, *argExpr); + operands.emplace_back(opnd); + if (!opnd->IsRegister()) { + opnd = &LoadIntoRegister(*opnd, argExpr->GetPrimType()); + } + RegOperand *expRegOpnd = static_cast(opnd); + srcOpnds->PushOpnd(*expRegOpnd); + } + CallReturnVector *retVals = &intrinsiccallNode.GetReturnVec(); + + switch (intrinsic) { + case INTRN_MPL_ATOMIC_EXCHANGE_PTR: { + BB *origFtBB = GetCurBB()->GetNext(); + Operand *loc = operands[kInsnFirstOpnd]; + Operand *newVal = operands[kInsnSecondOpnd]; + Operand *memOrd = operands[kInsnThirdOpnd]; + + MemOrd ord = OperandToMemOrd(*memOrd); + bool isAcquire = MemOrdIsAcquire(ord); + bool isRelease = MemOrdIsRelease(ord); + + const PrimType kValPrimType = PTY_a64; + + RegOperand &locReg = LoadIntoRegister(*loc, PTY_a64); + /* Because there is no live analysis when -O1 */ + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + locReg.SetRegNotBBLocal(); + } + MemOperand &locMem = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &locReg, nullptr, + &GetOrCreateOfstOpnd(0, k32BitSize), nullptr); + RegOperand &newValReg = LoadIntoRegister(*newVal, PTY_a64); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + newValReg.SetRegNotBBLocal(); + } + GetCurBB()->SetKind(BB::kBBFallthru); + + LabelIdx retryLabIdx = CreateLabeledBB(intrinsiccallNode); + + RegOperand *oldVal = SelectLoadExcl(kValPrimType, locMem, isAcquire); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + oldVal->SetRegNotBBLocal(); + } + RegOperand *succ = SelectStoreExcl(kValPrimType, locMem, newValReg, isRelease); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + succ->SetRegNotBBLocal(); + } + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wcbnz, *succ, GetOrCreateLabelOperand(retryLabIdx))); + GetCurBB()->SetKind(BB::kBBIntrinsic); + GetCurBB()->SetNext(origFtBB); + + SaveReturnValueInLocal(*retVals, 0, kValPrimType, *oldVal, intrinsiccallNode); + break; + } + case INTRN_GET_AND_ADDI: { + IntrinsifyGetAndAddInt(*srcOpnds, PTY_i32); + break; + } + case INTRN_GET_AND_ADDL: { + IntrinsifyGetAndAddInt(*srcOpnds, PTY_i64); + break; + } + case INTRN_GET_AND_SETI: { + IntrinsifyGetAndSetInt(*srcOpnds, PTY_i32); + break; + } + case INTRN_GET_AND_SETL: { + IntrinsifyGetAndSetInt(*srcOpnds, PTY_i64); + break; + } + case INTRN_COMP_AND_SWAPI: { + IntrinsifyCompareAndSwapInt(*srcOpnds, PTY_i32); + break; + } + case INTRN_COMP_AND_SWAPL: { + IntrinsifyCompareAndSwapInt(*srcOpnds, PTY_i64); + break; + } + default: { + CHECK_FATAL(false, "Intrinsic %d: %s not implemented by the AArch64 CG.", intrinsic, + GetIntrinsicName(intrinsic)); + break; + } + } +} + +Operand *AArch64CGFunc::SelectCclz(IntrinsicopNode &intrnNode) +{ + BaseNode *argexpr = intrnNode.Opnd(0); + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + MOperator mop; + + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + if (opnd->IsMemoryAccessOperand()) { + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } else if (opnd->IsImmediate()) { + SelectCopyImm(ldDest, *static_cast(opnd), ptype); + opnd = &ldDest; + } + + if (GetPrimTypeSize(ptype) == k4ByteSize) { + mop = MOP_wclz; + } else { + mop = MOP_xclz; + } + RegOperand &dst = CreateRegisterOperandOfType(ptype); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, dst, *opnd)); + return &dst; +} + +Operand *AArch64CGFunc::SelectCctz(IntrinsicopNode &intrnNode) +{ + BaseNode *argexpr = intrnNode.Opnd(0); + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + if (opnd->IsMemoryAccessOperand()) { + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } else if (opnd->IsImmediate()) { + SelectCopyImm(ldDest, *static_cast(opnd), ptype); + opnd = &ldDest; + } + + MOperator clzmop; + MOperator rbitmop; + if (GetPrimTypeSize(ptype) == k4ByteSize) { + clzmop = MOP_wclz; + rbitmop = MOP_wrbit; + } else { + clzmop = MOP_xclz; + rbitmop = MOP_xrbit; + } + RegOperand &dst1 = CreateRegisterOperandOfType(ptype); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(rbitmop, dst1, *opnd)); + RegOperand &dst2 = CreateRegisterOperandOfType(ptype); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(clzmop, dst2, dst1)); + return &dst2; +} + +Operand *AArch64CGFunc::SelectCpopcount(IntrinsicopNode &intrnNode) +{ + CHECK_FATAL(false, "%s NIY", intrnNode.GetIntrinDesc().name); + return nullptr; +} + +Operand *AArch64CGFunc::SelectCparity(IntrinsicopNode &intrnNode) +{ + CHECK_FATAL(false, "%s NIY", intrnNode.GetIntrinDesc().name); + return nullptr; +} + +Operand *AArch64CGFunc::SelectCclrsb(IntrinsicopNode &intrnNode) +{ + BaseNode *argexpr = intrnNode.Opnd(0); + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + if (opnd->IsMemoryAccessOperand()) { + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } else if (opnd->IsImmediate()) { + SelectCopyImm(ldDest, *static_cast(opnd), ptype); + opnd = &ldDest; + } + + bool is32Bit = (GetPrimTypeSize(ptype) == k4ByteSize); + RegOperand &res = CreateRegisterOperandOfType(ptype); + SelectMvn(res, *opnd, ptype); + SelectAArch64Cmp(*opnd, GetZeroOpnd(is32Bit ? k32BitSize : k64BitSize), true, is32Bit ? k32BitSize : k64BitSize); + SelectAArch64Select(*opnd, res, *opnd, GetCondOperand(CC_LT), true, is32Bit ? k32BitSize : k64BitSize); + MOperator clzmop = (is32Bit ? MOP_wclz : MOP_xclz); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(clzmop, *opnd, *opnd)); + SelectSub(*opnd, *opnd, CreateImmOperand(1, is32Bit ? k32BitSize : k64BitSize, true), ptype); + return opnd; +} + +Operand *AArch64CGFunc::SelectCisaligned(IntrinsicopNode &intrnNode) +{ + BaseNode *argexpr0 = intrnNode.Opnd(0); + PrimType ptype0 = argexpr0->GetPrimType(); + Operand *opnd0 = HandleExpr(intrnNode, *argexpr0); + + RegOperand &ldDest0 = CreateRegisterOperandOfType(ptype0); + if (opnd0->IsMemoryAccessOperand()) { + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype0), ptype0), ldDest0, *opnd0)); + opnd0 = &ldDest0; + } else if (opnd0->IsImmediate()) { + SelectCopyImm(ldDest0, *static_cast(opnd0), ptype0); + opnd0 = &ldDest0; + } + + BaseNode *argexpr1 = intrnNode.Opnd(1); + PrimType ptype1 = argexpr1->GetPrimType(); + Operand *opnd1 = HandleExpr(intrnNode, *argexpr1); + + RegOperand &ldDest1 = CreateRegisterOperandOfType(ptype1); + if (opnd1->IsMemoryAccessOperand()) { + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype1), ptype1), ldDest1, *opnd1)); + opnd1 = &ldDest1; + } else if (opnd1->IsImmediate()) { + SelectCopyImm(ldDest1, *static_cast(opnd1), ptype1); + opnd1 = &ldDest1; + } + // mov w4, #1 + RegOperand ®0 = CreateRegisterOperandOfType(PTY_i32); + SelectCopyImm(reg0, CreateImmOperand(1, k32BitSize, true), PTY_i32); + // sxtw x4, w4 + MOperator mOp = MOP_xsxtw64; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, reg0, reg0)); + // sub x3, x3, x4 + SelectSub(*opnd1, *opnd1, reg0, ptype1); + // and x2, x2, x3 + SelectBand(*opnd0, *opnd0, *opnd1, ptype1); + // mov w3, #0 + // sxtw x3, w3 + // cmp x2, x3 + SelectAArch64Cmp(*opnd0, GetZeroOpnd(k64BitSize), true, k64BitSize); + // cset w2, EQ + SelectAArch64CSet(*opnd0, GetCondOperand(CC_EQ), false); + return opnd0; +} + +void AArch64CGFunc::SelectArithmeticAndLogical(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, + Opcode op) +{ + switch (op) { + case OP_add: + SelectAdd(resOpnd, opnd0, opnd1, primType); + break; + case OP_sub: + SelectSub(resOpnd, opnd0, opnd1, primType); + break; + case OP_band: + SelectBand(resOpnd, opnd0, opnd1, primType); + break; + case OP_bior: + SelectBior(resOpnd, opnd0, opnd1, primType); + break; + case OP_bxor: + SelectBxor(resOpnd, opnd0, opnd1, primType); + break; + default: + CHECK_FATAL(false, "unconcerned opcode for arithmetical and logical insns"); + break; + } +} + +Operand *AArch64CGFunc::SelectAArch64CSyncFetch(const IntrinsicopNode &intrinopNode, Opcode op, bool fetchBefore) +{ + auto primType = intrinopNode.GetPrimType(); + /* Create BB which includes atomic built_in function */ + LabelIdx atomicBBLabIdx = CreateLabel(); + BB *atomicBB = CreateNewBB(); + atomicBB->SetKind(BB::kBBIf); + atomicBB->SetAtomicBuiltIn(); + atomicBB->AddLabel(atomicBBLabIdx); + SetLab2BBMap(static_cast(atomicBBLabIdx), *atomicBB); + GetCurBB()->AppendBB(*atomicBB); + /* keep variables inside same BB */ + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } + /* handle built_in args */ + Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *valueOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + addrOpnd = &LoadIntoRegister(*addrOpnd, intrinopNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType()); + valueOpnd = &LoadIntoRegister(*valueOpnd, intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType()); + if (GetCG()->GetOptimizeLevel() != CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } + /* load from pointed address */ + auto primTypeP2Size = GetPrimTypeP2Size(primType); + auto *regLoaded = &CreateRegisterOperandOfType(primType); + auto &memOpnd = CreateMemOpnd(*static_cast(addrOpnd), 0, GetPrimTypeBitSize(primType)); + auto mOpLoad = PickLoadStoreExclInsn(primTypeP2Size, false, false); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpLoad, *regLoaded, memOpnd)); + /* update loaded value */ + auto *regOperated = &CreateRegisterOperandOfType(primType); + SelectArithmeticAndLogical(*regOperated, *regLoaded, *valueOpnd, primType, op); + /* store to pointed address */ + auto *accessStatus = &CreateRegisterOperandOfType(PTY_u32); + auto mOpStore = PickLoadStoreExclInsn(primTypeP2Size, true, true); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpStore, *accessStatus, *regOperated, memOpnd)); + /* check the exclusive accsess status */ + auto &atomicBBOpnd = GetOrCreateLabelOperand(*atomicBB); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wcbnz, *accessStatus, atomicBBOpnd)); + + /* Data Memory Barrier */ + BB *nextBB = CreateNewBB(); + atomicBB->AppendBB(*nextBB); + SetCurBB(*nextBB); + nextBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ish, AArch64CG::kMd[MOP_dmb_ish])); + return fetchBefore ? regLoaded : regOperated; +} + +Operand *AArch64CGFunc::SelectCSyncCmpSwap(const IntrinsicopNode &intrinopNode, bool retBool) +{ + PrimType primType = intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType(); + DEBUG_ASSERT(primType == intrinopNode.GetNopndAt(kInsnThirdOpnd)->GetPrimType(), "gcc built_in rule"); + LabelIdx atomicBBLabIdx = CreateLabel(); + BB *atomicBB = CreateNewBB(); + atomicBB->SetKind(BB::kBBIf); + atomicBB->SetAtomicBuiltIn(); + atomicBB->AddLabel(atomicBBLabIdx); + SetLab2BBMap(static_cast(atomicBBLabIdx), *atomicBB); + GetCurBB()->AppendBB(*atomicBB); + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } + /* handle built_in args */ + Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *oldVal = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + Operand *newVal = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnThirdOpnd)); + if (GetCG()->GetOptimizeLevel() != CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } + + uint32 primTypeP2Size = GetPrimTypeP2Size(primType); + /* ldxr */ + auto *regLoaded = &CreateRegisterOperandOfType(primType); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, primType), 0, GetPrimTypeBitSize(primType)); + auto mOpLoad = PickLoadStoreExclInsn(primTypeP2Size, false, false); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpLoad, *regLoaded, memOpnd)); + Operand *regExtend = &CreateRegisterOperandOfType(primType); + PrimType targetType = (oldVal->GetSize() <= k32BitSize) ? (IsSignedInteger(primType) ? PTY_i32 : PTY_u32) + : (IsSignedInteger(primType) ? PTY_i64 : PTY_u64); + SelectCvtInt2Int(nullptr, regExtend, regLoaded, primType, targetType); + /* cmp */ + SelectAArch64Cmp(*regExtend, *oldVal, true, oldVal->GetSize()); + /* bne */ + Operand &rflag = GetOrCreateRflag(); + LabelIdx nextBBLableIdx = CreateLabel(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(nextBBLableIdx); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_bne, rflag, targetOpnd)); + /* stlxr */ + BB *stlxrBB = CreateNewBB(); + stlxrBB->SetKind(BB::kBBIf); + atomicBB->AppendBB(*stlxrBB); + SetCurBB(*stlxrBB); + auto *accessStatus = &CreateRegisterOperandOfType(PTY_u32); + auto &newRegVal = LoadIntoRegister(*newVal, primType); + auto mOpStore = PickLoadStoreExclInsn(primTypeP2Size, true, true); + stlxrBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpStore, *accessStatus, newRegVal, memOpnd)); + /* cbnz ==> check the exclusive accsess status */ + auto &atomicBBOpnd = GetOrCreateLabelOperand(*atomicBB); + stlxrBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wcbnz, *accessStatus, atomicBBOpnd)); + /* Data Memory Barrier */ + BB *nextBB = CreateNewBB(); + nextBB->AddLabel(nextBBLableIdx); + nextBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ish, AArch64CG::kMd[MOP_dmb_ish])); + SetLab2BBMap(static_cast(nextBBLableIdx), *nextBB); + stlxrBB->AppendBB(*nextBB); + SetCurBB(*nextBB); + /* bool version return true if the comparison is successful and newval is written */ + if (retBool) { + auto *retOpnd = &CreateRegisterOperandOfType(PTY_u32); + SelectAArch64CSet(*retOpnd, GetCondOperand(CC_EQ), false); + return retOpnd; + } + /* type version return the contents of *addrOpnd before the operation */ + return regLoaded; +} + +Operand *AArch64CGFunc::SelectCSyncFetch(IntrinsicopNode &intrinopNode, Opcode op, bool fetchBefore) +{ + return SelectAArch64CSyncFetch(intrinopNode, op, fetchBefore); +} + +Operand *AArch64CGFunc::SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinopNode) +{ + return SelectCSyncCmpSwap(intrinopNode, true); +} + +Operand *AArch64CGFunc::SelectCSyncValCmpSwap(IntrinsicopNode &intrinopNode) +{ + return SelectCSyncCmpSwap(intrinopNode); +} + +Operand *AArch64CGFunc::SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, PrimType pty) +{ + auto primType = intrinopNode.GetPrimType(); + Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *valueOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + addrOpnd = &LoadIntoRegister(*addrOpnd, intrinopNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType()); + valueOpnd = &LoadIntoRegister(*valueOpnd, intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType()); + + /* Create BB which includes atomic built_in function */ + LabelIdx atomicBBLabIdx = CreateLabel(); + BB *atomicBB = CreateNewBB(); + atomicBB->SetKind(BB::kBBIf); + atomicBB->SetAtomicBuiltIn(); + atomicBB->AddLabel(atomicBBLabIdx); + SetLab2BBMap(static_cast(atomicBBLabIdx), *atomicBB); + GetCurBB()->AppendBB(*atomicBB); + SetCurBB(*atomicBB); + /* load from pointed address */ + auto primTypeP2Size = GetPrimTypeP2Size(primType); + auto *regLoaded = &CreateRegisterOperandOfType(primType); + auto &memOpnd = CreateMemOpnd(*static_cast(addrOpnd), 0, GetPrimTypeBitSize(primType)); + auto mOpLoad = PickLoadStoreExclInsn(primTypeP2Size, false, false); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpLoad, *regLoaded, memOpnd)); + /* store to pointed address */ + auto *accessStatus = &CreateRegisterOperandOfType(PTY_u32); + auto mOpStore = PickLoadStoreExclInsn(primTypeP2Size, true, false); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpStore, *accessStatus, *valueOpnd, memOpnd)); + /* check the exclusive accsess status */ + auto &atomicBBOpnd = GetOrCreateLabelOperand(*atomicBB); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wcbnz, *accessStatus, atomicBBOpnd)); + + /* Data Memory Barrier */ + BB *nextBB = CreateNewBB(); + atomicBB->AppendBB(*nextBB); + SetCurBB(*nextBB); + nextBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ish, AArch64CG::kMd[MOP_dmb_ish])); + return regLoaded; +} + +void AArch64CGFunc::SelectCSyncLockRelease(const IntrinsiccallNode &intrinsiccall, PrimType primType) +{ + auto *addrOpnd = HandleExpr(intrinsiccall, *intrinsiccall.GetNopndAt(kInsnFirstOpnd)); + auto primTypeBitSize = GetPrimTypeBitSize(primType); + auto mOp = PickStInsn(primTypeBitSize, primType, AArch64isa::kMoRelease); + auto &zero = GetZeroOpnd(primTypeBitSize); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, primType), 0, primTypeBitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, zero, memOpnd)); +} + +Operand *AArch64CGFunc::SelectCSyncSynchronize(IntrinsicopNode &intrinopNode) +{ + (void)intrinopNode; + CHECK_FATAL(false, "have not implement SelectCSyncSynchronize yet"); + return nullptr; +} + +AArch64isa::MemoryOrdering AArch64CGFunc::PickMemOrder(std::memory_order memOrder, bool isLdr) const +{ + switch (memOrder) { + case std::memory_order_relaxed: + return AArch64isa::kMoNone; + case std::memory_order_consume: + case std::memory_order_acquire: + return isLdr ? AArch64isa::kMoAcquire : AArch64isa::kMoNone; + case std::memory_order_release: + return isLdr ? AArch64isa::kMoNone : AArch64isa::kMoRelease; + case std::memory_order_acq_rel: + case std::memory_order_seq_cst: + return isLdr ? AArch64isa::kMoAcquire : AArch64isa::kMoRelease; + default: + CHECK_FATAL(false, "unexpected memorder"); + return AArch64isa::kMoNone; + } +} + +/* + * regassign %1 (intrinsicop C___Atomic_Load_N(ptr, memorder)) + * ====> %1 = *ptr + * let %1 -> x0 + * let ptr -> x1 + * implement to asm: ldr/ldar x0, [x1] + * a load-acquire would replace ldr if memorder is not 0 + */ +Operand *AArch64CGFunc::SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) +{ + auto *addrOpnd = HandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(0)); + auto *memOrderOpnd = intrinsicopNode.Opnd(1); + auto primType = intrinsicopNode.GetPrimType(); + auto *memOrderConst = static_cast(static_cast(memOrderOpnd)->GetConstVal()); + auto memOrder = static_cast(memOrderConst->GetExtValue()); + return SelectAtomicLoad(*addrOpnd, primType, PickMemOrder(memOrder, true)); +} + +/* + * regassign %1 (intrinsicop C___Atomic_exchange_n(ptr, val, memorder)) + * ====> %1 = *ptr; *ptr = val; + * let %1 -> x0 + * let ptr -> x1 + * let val -> x2 + * implement to asm: + * ldr/ldar x0, [x1] + * str/stlr x2, [x1] + * a load-acquire would replace ldr if acquire needed + * a store-relase would replace str if release needed + */ +Operand *AArch64CGFunc::SelectCAtomicExchangeN(IntrinsicopNode &intrinsicopNode) +{ + auto primType = intrinsicopNode.GetPrimType(); + auto *addrOpnd = HandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(0)); + auto *valueOpnd = HandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(1)); + auto *memOrderOpnd = intrinsicopNode.Opnd(kInsnThirdOpnd); + auto *memOrderConst = static_cast(static_cast(memOrderOpnd)->GetConstVal()); + auto memOrder = static_cast(memOrderConst->GetExtValue()); + auto *result = SelectAtomicLoad(*addrOpnd, primType, PickMemOrder(memOrder, true)); + SelectAtomicStore(*valueOpnd, *addrOpnd, primType, PickMemOrder(memOrder, false)); + return result; +} + +Operand *AArch64CGFunc::SelectAtomicLoad(Operand &addrOpnd, PrimType primType, AArch64isa::MemoryOrdering memOrder) +{ + auto mOp = PickLdInsn(GetPrimTypeBitSize(primType), primType, memOrder); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(addrOpnd, PTY_a64), 0, k64BitSize); + auto *resultOpnd = &CreateRegisterOperandOfType(primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resultOpnd, memOpnd)); + return resultOpnd; +} + +Operand *AArch64CGFunc::SelectCReturnAddress(IntrinsicopNode &intrinopNode) +{ + if (intrinopNode.GetIntrinsic() == INTRN_C__builtin_extract_return_addr) { + DEBUG_ASSERT(intrinopNode.GetNumOpnds() == 1, "expect one parameter"); + Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + return &LoadIntoRegister(*addrOpnd, PTY_a64); + } else if (intrinopNode.GetIntrinsic() == INTRN_C__builtin_return_address) { + BaseNode *argexpr0 = intrinopNode.Opnd(0); + while (!argexpr0->IsLeaf()) { + argexpr0 = argexpr0->Opnd(0); + } + CHECK_FATAL(argexpr0->IsConstval(), "Invalid argument of __builtin_return_address"); + auto &constNode = static_cast(*argexpr0); + DEBUG_ASSERT(constNode.GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst does not support float yet"); + MIRIntConst *mirIntConst = safe_cast(constNode.GetConstVal()); + DEBUG_ASSERT(mirIntConst != nullptr, "nullptr checking"); + int64 scale = mirIntConst->GetExtValue(); + /* + * Do not support getting return address with a nonzero argument + * inline / tail call opt will destory this behavior + */ + CHECK_FATAL(scale == 0, "Do not support recursion"); + Operand *resReg = &static_cast(CreateRegisterOperandOfType(PTY_i64)); + SelectCopy(*resReg, PTY_i64, GetOrCreatePhysicalRegisterOperand(RLR, k64BitSize, kRegTyInt), PTY_i64); + return resReg; + } + return nullptr; +} + +Operand *AArch64CGFunc::SelectCalignup(IntrinsicopNode &intrnNode) +{ + return SelectAArch64align(intrnNode, true); +} + +Operand *AArch64CGFunc::SelectCaligndown(IntrinsicopNode &intrnNode) +{ + return SelectAArch64align(intrnNode, false); +} + +Operand *AArch64CGFunc::SelectAArch64align(const IntrinsicopNode &intrnNode, bool isUp) +{ + /* Handle Two args */ + BaseNode *argexpr0 = intrnNode.Opnd(0); + PrimType ptype0 = argexpr0->GetPrimType(); + Operand *opnd0 = HandleExpr(intrnNode, *argexpr0); + PrimType resultPtype = intrnNode.GetPrimType(); + RegOperand &ldDest0 = LoadIntoRegister(*opnd0, ptype0); + + BaseNode *argexpr1 = intrnNode.Opnd(1); + PrimType ptype1 = argexpr1->GetPrimType(); + Operand *opnd1 = HandleExpr(intrnNode, *argexpr1); + RegOperand &arg1 = LoadIntoRegister(*opnd1, ptype1); + DEBUG_ASSERT(IsPrimitiveInteger(ptype0) && IsPrimitiveInteger(ptype1), "align integer type only"); + Operand *ldDest1 = &static_cast(CreateRegisterOperandOfType(ptype0)); + SelectCvtInt2Int(nullptr, ldDest1, &arg1, ptype1, ptype0); + + Operand *resultReg = &static_cast(CreateRegisterOperandOfType(ptype0)); + Operand &immReg = CreateImmOperand(1, GetPrimTypeBitSize(ptype0), true); + /* Do alignment x0 -- value to be aligned x1 -- alignment */ + if (isUp) { + /* add res, x0, x1 */ + SelectAdd(*resultReg, ldDest0, *ldDest1, ptype0); + /* sub res, res, 1 */ + SelectSub(*resultReg, *resultReg, immReg, ptype0); + } + Operand *tempReg = &static_cast(CreateRegisterOperandOfType(ptype0)); + /* sub temp, x1, 1 */ + SelectSub(*tempReg, *ldDest1, immReg, ptype0); + /* mvn temp, temp */ + SelectMvn(*tempReg, *tempReg, ptype0); + /* and res, res, temp */ + if (isUp) { + SelectBand(*resultReg, *resultReg, *tempReg, ptype0); + } else { + SelectBand(*resultReg, ldDest0, *tempReg, ptype0); + } + if (resultPtype != ptype0) { + SelectCvtInt2Int(&intrnNode, resultReg, resultReg, ptype0, resultPtype); + } + return resultReg; +} + +/* + * NOTE: consider moving the following things into aarch64_cg.cpp They may + * serve not only inrinsics, but other MapleIR instructions as well. + * Do it as if we are adding a label in straight-line assembly code. + */ +LabelIdx AArch64CGFunc::CreateLabeledBB(StmtNode &stmt) +{ + LabelIdx labIdx = CreateLabel(); + BB *newBB = StartNewBBImpl(false, stmt); + newBB->AddLabel(labIdx); + SetLab2BBMap(labIdx, *newBB); + SetCurBB(*newBB); + return labIdx; +} + +/* Save value into the local variable for the index-th return value; */ +void AArch64CGFunc::SaveReturnValueInLocal(CallReturnVector &retVals, size_t index, PrimType primType, Operand &value, + StmtNode &parentStmt) +{ + CallReturnPair &pair = retVals.at(index); + BB tempBB(static_cast(-1), *GetFuncScopeAllocator()); + BB *realCurBB = GetCurBB(); + CHECK_FATAL(!pair.second.IsReg(), "NYI"); + Operand *destOpnd = &value; + /* for O0 ,corss-BB var is not support, do extra store/load but why new BB */ + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(pair.first); + MIRType *sPty = symbol->GetType(); + PrimType ty = symbol->GetType()->GetPrimType(); + if (sPty->GetKind() == kTypeStruct || sPty->GetKind() == kTypeUnion) { + MIRStructType *structType = static_cast(sPty); + ty = structType->GetFieldType(pair.second.GetFieldID())->GetPrimType(); + } else if (sPty->GetKind() == kTypeClass) { + CHECK_FATAL(false, "unsuppotr type for inlineasm / intrinsic"); + } + RegOperand &tempReg = CreateVirtualRegisterOperand(NewVReg(GetRegTyFromPrimTy(ty), GetPrimTypeSize(ty))); + SelectCopy(tempReg, ty, value, ty); + destOpnd = &tempReg; + } + SetCurBB(tempBB); + SelectDassign(pair.first, pair.second.GetFieldID(), primType, *destOpnd); + + CHECK_FATAL(realCurBB->GetNext() == nullptr, "current BB must has not nextBB"); + realCurBB->SetLastStmt(parentStmt); + realCurBB->SetNext(StartNewBBImpl(true, parentStmt)); + realCurBB->GetNext()->SetKind(BB::kBBFallthru); + realCurBB->GetNext()->SetPrev(realCurBB); + + realCurBB->GetNext()->InsertAtBeginning(*GetCurBB()); + /* restore it */ + SetCurBB(*realCurBB->GetNext()); +} + +/* The following are translation of LL/SC and atomic RMW operations */ +MemOrd AArch64CGFunc::OperandToMemOrd(Operand &opnd) const +{ + CHECK_FATAL(opnd.IsImmediate(), "Memory order must be an int constant."); + auto immOpnd = static_cast(&opnd); + int32 val = immOpnd->GetValue(); + CHECK_FATAL(val >= 0, "val must be non-negtive"); + return MemOrdFromU32(static_cast(val)); +} + +/* + * Generate ldxr or ldaxr instruction. + * byte_p2x: power-of-2 size of operand in bytes (0: 1B, 1: 2B, 2: 4B, 3: 8B). + */ +MOperator AArch64CGFunc::PickLoadStoreExclInsn(uint32 byteP2Size, bool store, bool acqRel) const +{ + CHECK_FATAL(byteP2Size < kIntByteSizeDimension, "Illegal argument p2size: %d", byteP2Size); + + static MOperator operators[4][2][2] = {{{MOP_wldxrb, MOP_wldaxrb}, {MOP_wstxrb, MOP_wstlxrb}}, + {{MOP_wldxrh, MOP_wldaxrh}, {MOP_wstxrh, MOP_wstlxrh}}, + {{MOP_wldxr, MOP_wldaxr}, {MOP_wstxr, MOP_wstlxr}}, + {{MOP_xldxr, MOP_xldaxr}, {MOP_xstxr, MOP_xstlxr}}}; + + MOperator optr = operators[byteP2Size][store][acqRel]; + CHECK_FATAL(optr != MOP_undef, "Unsupported type p2size: %d", byteP2Size); + + return optr; +} + +RegOperand *AArch64CGFunc::SelectLoadExcl(PrimType valPrimType, MemOperand &loc, bool acquire) +{ + uint32 p2size = GetPrimTypeP2Size(valPrimType); + + RegOperand &result = CreateRegisterOperandOfType(valPrimType); + MOperator mOp = PickLoadStoreExclInsn(p2size, false, acquire); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, loc)); + + return &result; +} + +RegOperand *AArch64CGFunc::SelectStoreExcl(PrimType valPty, MemOperand &loc, RegOperand &newVal, bool release) +{ + uint32 p2size = GetPrimTypeP2Size(valPty); + + /* the result (success/fail) is to be stored in a 32-bit register */ + RegOperand &result = CreateRegisterOperandOfType(PTY_u32); + + MOperator mOp = PickLoadStoreExclInsn(p2size, true, release); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, newVal, loc)); + + return &result; +} + +RegType AArch64CGFunc::GetRegisterType(regno_t reg) const +{ + if (AArch64isa::IsPhysicalRegister(reg)) { + return AArch64isa::GetRegType(static_cast(reg)); + } else if (reg == kRFLAG) { + return kRegTyCc; + } else { + return CGFunc::GetRegisterType(reg); + } +} + +MemOperand &AArch64CGFunc::LoadStructCopyBase(const MIRSymbol &symbol, int64 offset, int dataSize) +{ + /* For struct formals > 16 bytes, this is the pointer to the struct copy. */ + /* Load the base pointer first. */ + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + MemOperand *baseMemOpnd = &GetOrCreateMemOpnd(symbol, 0, k64BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *baseMemOpnd)); + /* Create the indirect load mem opnd from the base pointer. */ + return CreateMemOpnd(*vreg, offset, static_cast(dataSize)); +} + +/* For long branch, insert an unconditional branch. + * From To + * cond_br targe_label reverse_cond_br fallthru_label + * fallthruBB unconditional br target_label + * fallthru_label: + * fallthruBB + */ +void AArch64CGFunc::InsertJumpPad(Insn *insn) +{ + BB *bb = insn->GetBB(); + DEBUG_ASSERT(bb, "instruction has no bb"); + DEBUG_ASSERT(bb->GetKind() == BB::kBBIf || bb->GetKind() == BB::kBBGoto, + "instruction is in neither if bb nor goto bb"); + if (bb->GetKind() == BB::kBBGoto) { + return; + } + DEBUG_ASSERT(bb->NumSuccs() == k2ByteSize, "if bb should have 2 successors"); + + BB *longBrBB = CreateNewBB(); + + BB *fallthruBB = bb->GetNext(); + LabelIdx fallthruLBL = fallthruBB->GetLabIdx(); + if (fallthruLBL == 0) { + fallthruLBL = CreateLabel(); + SetLab2BBMap(static_cast(fallthruLBL), *fallthruBB); + fallthruBB->AddLabel(fallthruLBL); + } + + BB *targetBB; + if (bb->GetSuccs().front() == fallthruBB) { + targetBB = bb->GetSuccs().back(); + } else { + targetBB = bb->GetSuccs().front(); + } + LabelIdx targetLBL = targetBB->GetLabIdx(); + if (targetLBL == 0) { + targetLBL = CreateLabel(); + SetLab2BBMap(static_cast(targetLBL), *targetBB); + targetBB->AddLabel(targetLBL); + } + + // Adjustment on br and CFG + bb->RemoveSuccs(*targetBB); + bb->PushBackSuccs(*longBrBB); + bb->SetNext(longBrBB); + // reverse cond br targeting fallthruBB + uint32 targetIdx = AArch64isa::GetJumpTargetIdx(*insn); + MOperator mOp = AArch64isa::FlipConditionOp(insn->GetMachineOpcode()); + insn->SetMOP(AArch64CG::kMd[mOp]); + LabelOperand &fallthruBBLBLOpnd = GetOrCreateLabelOperand(fallthruLBL); + insn->SetOperand(targetIdx, fallthruBBLBLOpnd); + + longBrBB->PushBackPreds(*bb); + longBrBB->PushBackSuccs(*targetBB); + LabelOperand &targetLBLOpnd = GetOrCreateLabelOperand(targetLBL); + longBrBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetLBLOpnd)); + longBrBB->SetPrev(bb); + longBrBB->SetNext(fallthruBB); + longBrBB->SetKind(BB::kBBGoto); + + fallthruBB->SetPrev(longBrBB); + + targetBB->RemovePreds(*bb); + targetBB->PushBackPreds(*longBrBB); +} + +RegOperand *AArch64CGFunc::AdjustOneElementVectorOperand(PrimType oType, RegOperand *opnd) +{ + RegOperand *resCvt = &CreateRegisterOperandOfType(oType); + Insn *insnCvt = &GetInsnBuilder()->BuildInsn(MOP_xvmovrd, *resCvt, *opnd); + GetCurBB()->AppendInsn(*insnCvt); + return resCvt; +} + +RegOperand *AArch64CGFunc::SelectOneElementVectorCopy(Operand *src, PrimType sType) +{ + RegOperand *res = &CreateRegisterOperandOfType(PTY_f64); + SelectCopy(*res, PTY_f64, *src, sType); + static_cast(res)->SetIF64Vec(); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorAbs(PrimType rType, Operand *o1) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vabsvv : MOP_vabsuu; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorAddLong(PrimType rType, Operand *o1, Operand *o2, PrimType otyp, bool isLow) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result type */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(otyp); /* vector operand 2 */ + MOperator mOp; + if (isLow) { + mOp = IsUnsignedInteger(rType) ? MOP_vuaddlvuu : MOP_vsaddlvuu; + } else { + mOp = IsUnsignedInteger(rType) ? MOP_vuaddl2vvv : MOP_vsaddl2vvv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorAddWiden(Operand *o1, PrimType otyp1, Operand *o2, PrimType otyp2, bool isLow) +{ + RegOperand *res = &CreateRegisterOperandOfType(otyp1); /* restype is same as o1 */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(otyp1); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp1); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(otyp2); /* vector operand 2 */ + + MOperator mOp; + if (isLow) { + mOp = IsUnsignedInteger(otyp1) ? MOP_vuaddwvvu : MOP_vsaddwvvu; + } else { + mOp = IsUnsignedInteger(otyp1) ? MOP_vuaddw2vvv : MOP_vsaddw2vvv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorImmMov(PrimType rType, Operand *src, PrimType sType) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpec = GetMemoryPool()->New(rType); + int64 val = static_cast(src)->GetValue(); + /* copy the src imm operand to a reg if out of range */ + if ((GetVecEleSize(rType) >= k64BitSize) || (GetPrimTypeSize(sType) > k4ByteSize && val != 0) || + (val < kMinImmVal || val > kMaxImmVal)) { + Operand *reg = &CreateRegisterOperandOfType(sType); + SelectCopy(*reg, sType, *src, sType); + return SelectVectorRegMov(rType, reg, sType); + } + + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vmovvi : MOP_vmovui; + if (GetVecEleSize(rType) == k8BitSize && val < 0) { + src = &CreateImmOperand(static_cast(val), k8BitSize, true); + } else if (val < 0) { + src = &CreateImmOperand(-(val + 1), k8BitSize, true); + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vnotvi : MOP_vnotui; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*src); + vInsn.PushRegSpecEntry(vecSpec); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorRegMov(PrimType rType, Operand *src, PrimType sType) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpec = GetMemoryPool()->New(rType); + + MOperator mOp; + if (GetPrimTypeSize(sType) > k4ByteSize) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vxdupvr : MOP_vxdupur; + } else { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vwdupvr : MOP_vwdupur; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*src); + vInsn.PushRegSpecEntry(vecSpec); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorFromScalar(PrimType rType, Operand *src, PrimType sType) +{ + if (!IsPrimitiveVector(rType)) { + return SelectOneElementVectorCopy(src, sType); + } else if (src->IsConstImmediate()) { + return SelectVectorImmMov(rType, src, sType); + } else { + return SelectVectorRegMov(rType, src, sType); + } +} + +RegOperand *AArch64CGFunc::SelectVectorDup(PrimType rType, Operand *src, bool getLow) +{ + PrimType oType = rType; + rType = FilterOneElementVectorType(oType); + RegOperand *res = &CreateRegisterOperandOfType(rType); + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(k2ByteSize, k64BitSize, getLow ? 0 : 1); + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(MOP_vduprv, AArch64CG::kMd[MOP_vduprv]); + vInsn.AddOpndChain(*res).AddOpndChain(*src); + vInsn.PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + if (oType != rType) { + res = AdjustOneElementVectorOperand(oType, res); + static_cast(res)->SetIF64Vec(); + } + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorGetElement(PrimType rType, Operand *src, PrimType sType, int32 lane) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(sType, lane); /* vector operand */ + + MOperator mop; + if (!IsPrimitiveVector(sType)) { + mop = MOP_xmovrr; + } else if (GetPrimTypeBitSize(rType) >= k64BitSize) { + mop = MOP_vxmovrv; + } else { + mop = (GetPrimTypeBitSize(sType) > k64BitSize) ? MOP_vwmovrv : MOP_vwmovru; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + vInsn.AddOpndChain(*res).AddOpndChain(*src); + vInsn.PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +/* adalp o1, o2 instruction accumulates into o1, overwriting the original operand. + Hence we perform c = vadalp(a,b) as + T tmp = a; + return tmp+b; + The return value of vadalp is then assigned to c, leaving value of a intact. + */ +RegOperand *AArch64CGFunc::SelectVectorPairwiseAdalp(Operand *src1, PrimType sty1, Operand *src2, PrimType sty2) +{ + VectorRegSpec *vecSpecDest; + RegOperand *res; + + if (!IsPrimitiveVector(sty1)) { + RegOperand *resF = SelectOneElementVectorCopy(src1, sty1); + res = &CreateRegisterOperandOfType(PTY_f64); + SelectCopy(*res, PTY_f64, *resF, PTY_f64); + vecSpecDest = GetMemoryPool()->New(k1ByteSize, k64BitSize); + } else { + res = &CreateRegisterOperandOfType(sty1); /* result type same as sty1 */ + SelectCopy(*res, sty1, *src1, sty1); + vecSpecDest = GetMemoryPool()->New(sty1); + } + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(sty2); + + MOperator mop; + if (IsUnsignedInteger(sty1)) { + mop = GetPrimTypeSize(sty1) > k8ByteSize ? MOP_vupadalvv : MOP_vupadaluu; + } else { + mop = GetPrimTypeSize(sty1) > k8ByteSize ? MOP_vspadalvv : MOP_vspadaluu; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + vInsn.AddOpndChain(*res).AddOpndChain(*src2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + if (!IsPrimitiveVector(sty1)) { + res = AdjustOneElementVectorOperand(sty1, res); + } + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorPairwiseAdd(PrimType rType, Operand *src, PrimType sType) +{ + PrimType oType = rType; + rType = FilterOneElementVectorType(oType); + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(sType); /* source operand */ + + if (rType == PTY_f64) { + vecSpecDest->vecLaneMax = 1; + } + + MOperator mop; + if (IsUnsignedInteger(sType)) { + mop = GetPrimTypeSize(sType) > k8ByteSize ? MOP_vupaddvv : MOP_vupadduu; + } else { + mop = GetPrimTypeSize(sType) > k8ByteSize ? MOP_vspaddvv : MOP_vspadduu; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + vInsn.AddOpndChain(*res).AddOpndChain(*src); + /* dest pushed first, popped first */ + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + if (oType != rType) { + res = AdjustOneElementVectorOperand(oType, res); + } + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorSetElement(Operand *eOpnd, PrimType eType, Operand *vOpnd, PrimType vType, + int32 lane) +{ + if (!IsPrimitiveVector(vType)) { + return SelectOneElementVectorCopy(eOpnd, eType); + } + RegOperand *reg = &CreateRegisterOperandOfType(eType); /* vector element type */ + SelectCopy(*reg, eType, *eOpnd, eType); + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(vType, lane); /* vector operand == result */ + + MOperator mOp; + if (GetPrimTypeSize(eType) > k4ByteSize) { + mOp = GetPrimTypeSize(vType) > k8ByteSize ? MOP_vxinsvr : MOP_vxinsur; + } else { + mOp = GetPrimTypeSize(vType) > k8ByteSize ? MOP_vwinsvr : MOP_vwinsur; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*vOpnd).AddOpndChain(*reg); + vInsn.PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + return static_cast(vOpnd); +} + +RegOperand *AArch64CGFunc::SelectVectorAbsSubL(PrimType rType, Operand *o1, Operand *o2, PrimType oTy, bool isLow) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecOpd1 = GetMemoryPool()->New(oTy); + VectorRegSpec *vecSpecOpd2 = GetMemoryPool()->New(oTy); /* same opnd types */ + + MOperator mop; + if (isLow) { + mop = IsPrimitiveUnSignedVector(rType) ? MOP_vuabdlvuu : MOP_vsabdlvuu; + } else { + mop = IsPrimitiveUnSignedVector(rType) ? MOP_vuabdl2vvv : MOP_vsabdl2vvv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecOpd1).PushRegSpecEntry(vecSpecOpd2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorMerge(PrimType rType, Operand *o1, Operand *o2, int32 index) +{ + if (!IsPrimitiveVector(rType)) { + static_cast(o1)->SetIF64Vec(); + return static_cast(o1); /* 64x1_t, index equals 0 */ + } + RegOperand *res = &CreateRegisterOperandOfType(rType); + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecOpd1 = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecOpd2 = GetMemoryPool()->New(rType); + + ImmOperand *imm = &CreateImmOperand(index, k8BitSize, true); + + MOperator mOp = (GetPrimTypeSize(rType) > k8ByteSize) ? MOP_vextvvvi : MOP_vextuuui; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2).AddOpndChain(*imm); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecOpd1).PushRegSpecEntry(vecSpecOpd2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorReverse(PrimType rType, Operand *src, PrimType sType, uint32 size) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(sType); /* vector operand */ + + MOperator mOp; + if (GetPrimTypeBitSize(rType) == k128BitSize) { + mOp = size >= k64BitSize ? MOP_vrev64qq : (size >= k32BitSize ? MOP_vrev32qq : MOP_vrev16qq); + } else if (GetPrimTypeBitSize(rType) == k64BitSize) { + mOp = size >= k64BitSize ? MOP_vrev64dd : (size >= k32BitSize ? MOP_vrev32dd : MOP_vrev16dd); + } else { + CHECK_FATAL(false, "should not be here"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*src); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorSum(PrimType rType, Operand *o1, PrimType oType) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* uint32_t result */ + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oType); + RegOperand *iOpnd = &CreateRegisterOperandOfType(oType); /* float intermediate result */ + uint32 eSize = GetVecEleSize(oType); /* vector opd in bits */ + bool is16ByteVec = GetPrimTypeSize(oType) >= k16ByteSize; + MOperator mOp; + if (is16ByteVec) { + mOp = eSize <= k8BitSize + ? MOP_vbaddvrv + : (eSize <= k16BitSize ? MOP_vhaddvrv : (eSize <= k32BitSize ? MOP_vsaddvrv : MOP_vdaddvrv)); + } else { + mOp = eSize <= k8BitSize ? MOP_vbaddvru : (eSize <= k16BitSize ? MOP_vhaddvru : MOP_vsaddvru); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*iOpnd).AddOpndChain(*o1); + vInsn.PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + + mOp = eSize > k32BitSize ? MOP_vxmovrv : MOP_vwmovrv; + VectorInsn &vInsn2 = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + auto *vecSpec2 = GetMemoryPool()->New(oType); + vInsn2.AddOpndChain(*res).AddOpndChain(*iOpnd); + vecSpec2->vecLane = 0; + vInsn2.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn2); + return res; +} + +void AArch64CGFunc::PrepareVectorOperands(Operand **o1, PrimType &oty1, Operand **o2, PrimType &oty2) +{ + /* Only 1 operand can be non vector, otherwise it's a scalar operation, wouldn't come here */ + if (IsPrimitiveVector(oty1) == IsPrimitiveVector(oty2)) { + return; + } + PrimType origTyp = !IsPrimitiveVector(oty2) ? oty2 : oty1; + Operand *opd = !IsPrimitiveVector(oty2) ? *o2 : *o1; + PrimType rType = !IsPrimitiveVector(oty2) ? oty1 : oty2; /* Type to dup into */ + RegOperand *res = &CreateRegisterOperandOfType(rType); + VectorRegSpec *vecSpec = GetMemoryPool()->New(rType); + + bool immOpnd = false; + if (opd->IsConstImmediate()) { + int64 val = static_cast(opd)->GetValue(); + if (val >= kMinImmVal && val <= kMaxImmVal && GetVecEleSize(rType) < k64BitSize) { + immOpnd = true; + } else { + RegOperand *regOpd = &CreateRegisterOperandOfType(origTyp); + SelectCopyImm(*regOpd, origTyp, static_cast(*opd), origTyp); + opd = static_cast(regOpd); + } + } + + /* need dup to vector operand */ + MOperator mOp; + if (immOpnd) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vmovvi : MOP_vmovui; /* a const */ + } else { + if (GetPrimTypeSize(origTyp) > k4ByteSize) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vxdupvr : MOP_vxdupur; + } else { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vwdupvr : MOP_vwdupur; /* a scalar var */ + } + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*opd); + vInsn.PushRegSpecEntry(vecSpec); + GetCurBB()->AppendInsn(vInsn); + if (!IsPrimitiveVector(oty2)) { + *o2 = static_cast(res); + oty2 = rType; + } else { + *o1 = static_cast(res); + oty1 = rType; + } +} + +void AArch64CGFunc::SelectVectorCvt(Operand *res, PrimType rType, Operand *o1, PrimType oType) +{ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oType); /* vector operand 1 */ + + MOperator mOp; + VectorInsn *insn; + if (GetPrimTypeSize(rType) > GetPrimTypeSize(oType)) { + /* expand, similar to vmov_XX() intrinsics */ + mOp = IsUnsignedInteger(rType) ? MOP_vushllvvi : MOP_vshllvvi; + ImmOperand *imm = &CreateImmOperand(0, k8BitSize, true); + insn = &GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + insn->AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*imm); + } else if (GetPrimTypeSize(rType) < GetPrimTypeSize(oType)) { + /* extract, similar to vqmovn_XX() intrinsics */ + insn = &GetInsnBuilder()->BuildVectorInsn(MOP_vxtnuv, AArch64CG::kMd[MOP_vxtnuv]); + insn->AddOpndChain(*res).AddOpndChain(*o1); + } else { + CHECK_FATAL(0, "Invalid cvt between 2 operands of the same size"); + } + insn->PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(*insn); +} + +RegOperand *AArch64CGFunc::SelectVectorCompareZero(Operand *o1, PrimType oty1, Operand *o2, Opcode opc) +{ + if (IsUnsignedInteger(oty1) && (opc != OP_eq && opc != OP_ne)) { + return nullptr; /* no unsigned instr for zero */ + } + RegOperand *res = &CreateRegisterOperandOfType(oty1); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(oty1); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oty1); /* vector operand 1 */ + + MOperator mOp; + switch (opc) { + case OP_eq: + case OP_ne: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmeqvv : MOP_vzcmequu; + break; + case OP_gt: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmgtvv : MOP_vzcmgtuu; + break; + case OP_ge: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmgevv : MOP_vzcmgeuu; + break; + case OP_lt: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmltvv : MOP_vzcmltuu; + break; + case OP_le: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmlevv : MOP_vzcmleuu; + break; + default: + CHECK_FATAL(0, "Invalid cc in vector compare"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + if (opc == OP_ne) { + res = SelectVectorNot(oty1, res); + } + return res; +} + +/* Neon compare intrinsics always return unsigned vector, MapleIR for comparison always return + signed. Using type of 1st operand for operation here */ +RegOperand *AArch64CGFunc::SelectVectorCompare(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) +{ + if (o2->IsConstImmediate() && static_cast(o2)->GetValue() == 0) { + RegOperand *zeroCmp = SelectVectorCompareZero(o1, oty1, o2, opc); + if (zeroCmp != nullptr) { + return zeroCmp; + } + } + PrepareVectorOperands(&o1, oty1, &o2, oty2); + DEBUG_ASSERT(oty1 == oty2, "vector operand type mismatch"); + + RegOperand *res = &CreateRegisterOperandOfType(oty1); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(oty1); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oty1); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oty2); /* vector operand 2 */ + + MOperator mOp; + switch (opc) { + case OP_eq: + case OP_ne: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmeqvvv : MOP_vcmequuu; + break; + case OP_lt: + case OP_gt: + if (IsUnsignedInteger(oty1)) { + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmhivvv : MOP_vcmhiuuu; + } else { + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmgtvvv : MOP_vcmgtuuu; + } + break; + case OP_le: + case OP_ge: + if (IsUnsignedInteger(oty1)) { + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmhsvvv : MOP_vcmhsuuu; + } else { + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmgevvv : MOP_vcmgeuuu; + } + break; + default: + CHECK_FATAL(0, "Invalid cc in vector compare"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + if (opc == OP_lt || opc == OP_le) { + vInsn.AddOpndChain(*res).AddOpndChain(*o2).AddOpndChain(*o1); + } else { + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + } + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + if (opc == OP_ne) { + res = SelectVectorNot(oty1, res); + } + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorShift(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, + Opcode opc) +{ + PrepareVectorOperands(&o1, oty1, &o2, oty2); + PrimType resultType = rType; + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); /* vector operand 2 */ + + if (!IsPrimitiveVector(rType)) { + o1 = &SelectCopy(*o1, rType, PTY_f64); + o2 = &SelectCopy(*o2, rType, PTY_f64); + resultType = PTY_f64; + } + RegOperand *res = &CreateRegisterOperandOfType(resultType); /* result operand */ + + /* signed and unsigned shl(v,v) both use sshl or ushl, they are the same */ + MOperator mOp; + if (IsPrimitiveUnsigned(rType)) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vushlvvv : MOP_vushluuu; + } else { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vshlvvv : MOP_vshluuu; + } + + if (opc != OP_shl) { + o2 = SelectVectorNeg(rType, o2); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +uint32 ValidShiftConst(PrimType rType) +{ + switch (rType) { + case PTY_v8u8: + case PTY_v8i8: + case PTY_v16u8: + case PTY_v16i8: + return k8BitSize; + case PTY_v4u16: + case PTY_v4i16: + case PTY_v8u16: + case PTY_v8i16: + return k16BitSize; + case PTY_v2u32: + case PTY_v2i32: + case PTY_v4u32: + case PTY_v4i32: + return k32BitSize; + case PTY_v2u64: + case PTY_v2i64: + return k64BitSize; + default: + CHECK_FATAL(0, "Invalid Shift operand type"); + } + return 0; +} + +RegOperand *AArch64CGFunc::SelectVectorShiftImm(PrimType rType, Operand *o1, Operand *imm, int32 sVal, Opcode opc) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + if (!imm->IsConstImmediate()) { + CHECK_FATAL(0, "VectorUShiftImm has invalid shift const"); + } + uint32 shift = static_cast(ValidShiftConst(rType)); + bool needDup = false; + if (opc == OP_shl) { + if ((shift == k8BitSize && (sVal < 0 || static_cast(sVal) >= shift)) || + (shift == k16BitSize && (sVal < 0 || static_cast(sVal) >= shift)) || + (shift == k32BitSize && (sVal < 0 || static_cast(sVal) >= shift)) || + (shift == k64BitSize && (sVal < 0 || static_cast(sVal) >= shift))) { + needDup = true; + } + } else { + if ((shift == k8BitSize && (sVal < 1 || static_cast(sVal) > shift)) || + (shift == k16BitSize && (sVal < 1 || static_cast(sVal) > shift)) || + (shift == k32BitSize && (sVal < 1 || static_cast(sVal) > shift)) || + (shift == k64BitSize && (sVal < 1 || static_cast(sVal) > shift))) { + needDup = true; + } + } + if (needDup) { + /* Dup constant to vector reg */ + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vmovvi : MOP_vmovui; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*imm); + vInsn.PushRegSpecEntry(vecSpecDest); + GetCurBB()->AppendInsn(vInsn); + res = SelectVectorShift(rType, o1, rType, res, rType, opc); + return res; + } + MOperator mOp; + if (GetPrimTypeSize(rType) > k8ByteSize) { + if (IsUnsignedInteger(rType)) { + mOp = opc == OP_shl ? MOP_vushlvvi : MOP_vushrvvi; + } else { + mOp = opc == OP_shl ? MOP_vushlvvi : MOP_vshrvvi; + } + } else { + if (IsUnsignedInteger(rType)) { + mOp = opc == OP_shl ? MOP_vushluui : MOP_vushruui; + } else { + mOp = opc == OP_shl ? MOP_vushluui : MOP_vshruui; + } + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*imm); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorTableLookup(PrimType rType, Operand *o1, Operand *o2) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); /* 8B or 16B */ + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); /* vector operand 2 */ + vecSpec1->compositeOpnds = 1; /* composite operand */ + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(MOP_vtbl1vvv, AArch64CG::kMd[MOP_vtbl1vvv]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorMadd(Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, Operand *o3, + PrimType oTyp3) +{ + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oTyp1); /* operand 1 and result */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oTyp2); /* vector operand 2 */ + VectorRegSpec *vecSpec3 = GetMemoryPool()->New(oTyp3); /* vector operand 2 */ + + MOperator mop = IsPrimitiveUnSignedVector(oTyp1) ? MOP_vumaddvvv : MOP_vsmaddvvv; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + vInsn.AddOpndChain(*o1).AddOpndChain(*o2).AddOpndChain(*o3); + vInsn.PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2).PushRegSpecEntry(vecSpec3); + GetCurBB()->AppendInsn(vInsn); + return static_cast(o1); +} + +RegOperand *AArch64CGFunc::SelectVectorMull(PrimType rType, Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, + bool isLow) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oTyp1); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oTyp2); /* vector operand 1 */ + + MOperator mop; + if (isLow) { + mop = IsPrimitiveUnSignedVector(rType) ? MOP_vumullvvv : MOP_vsmullvvv; + } else { + mop = IsPrimitiveUnSignedVector(rType) ? MOP_vumull2vvv : MOP_vsmull2vvv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorBinOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, + Opcode opc) +{ + PrepareVectorOperands(&o1, oty1, &o2, oty2); + DEBUG_ASSERT(oty1 == oty2, "vector operand type mismatch"); + + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oty1); /* source operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oty2); /* source operand 2 */ + + MOperator mOp; + if (opc == OP_add) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vaddvvv : MOP_vadduuu; + } else if (opc == OP_sub) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vsubvvv : MOP_vsubuuu; + } else if (opc == OP_mul) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vmulvvv : MOP_vmuluuu; + } else { + CHECK_FATAL(0, "Invalid opcode for SelectVectorBinOp"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + /* dest pushed first, popped first */ + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorBitwiseOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, + Opcode opc) +{ + PrepareVectorOperands(&o1, oty1, &o2, oty2); + DEBUG_ASSERT(oty1 == oty2, "vector operand type mismatch"); + + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + MOperator mOp; + if (opc == OP_band) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vandvvv : MOP_vanduuu; + } else if (opc == OP_bior) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vorvvv : MOP_voruuu; + } else if (opc == OP_bxor) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vxorvvv : MOP_vxoruuu; + } else { + CHECK_FATAL(0, "Invalid opcode for SelectVectorBitwiseOp"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorNarrow(PrimType rType, Operand *o1, PrimType otyp) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp); /* vector operand */ + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(MOP_vxtnuv, AArch64CG::kMd[MOP_vxtnuv]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorNarrow2(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2) +{ + (void)oty1; /* 1st opnd was loaded already, type no longer needed */ + RegOperand *res = static_cast(o1); /* o1 is also the result */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oty2); /* vector opnd2 */ + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(MOP_vxtn2uv, AArch64CG::kMd[MOP_vxtn2uv]); + vInsn.AddOpndChain(*res).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorNot(PrimType rType, Operand *o1) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vnotvv : MOP_vnotuu; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1); + vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorNeg(PrimType rType, Operand *o1) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vnegvv : MOP_vneguu; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1); + vInsn.PushRegSpecEntry(vecSpecDest); + vInsn.PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +/* + * Called internally for auto-vec, no intrinsics for now + */ +RegOperand *AArch64CGFunc::SelectVectorSelect(Operand &cond, PrimType rType, Operand &o0, Operand &o1) +{ + rType = GetPrimTypeSize(rType) > k8ByteSize ? PTY_v16u8 : PTY_v8u8; + RegOperand *res = &CreateRegisterOperandOfType(rType); + SelectCopy(*res, rType, cond, rType); + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); + + uint32 mOp = GetPrimTypeBitSize(rType) > k64BitSize ? MOP_vbslvvv : MOP_vbsluuu; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(o0).AddOpndChain(o1); + vInsn.PushRegSpecEntry(vecSpecDest); + vInsn.PushRegSpecEntry(vecSpec1); + vInsn.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorShiftRNarrow(PrimType rType, Operand *o1, PrimType oType, Operand *o2, + bool isLow) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oType); /* vector operand 1 */ + + ImmOperand *imm = static_cast(o2); + MOperator mOp; + if (isLow) { + mOp = MOP_vshrnuvi; + } else { + CHECK_FATAL(0, "NYI: vshrn_high_"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*imm); + vInsn.PushRegSpecEntry(vecSpecDest); + vInsn.PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorSubWiden(PrimType resType, Operand *o1, PrimType otyp1, Operand *o2, + PrimType otyp2, bool isLow, bool isWide) +{ + RegOperand *res = &CreateRegisterOperandOfType(resType); /* result reg */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(resType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp1); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(otyp2); /* vector operand 2 */ + + MOperator mOp; + if (!isWide) { + if (isLow) { + mOp = IsUnsignedInteger(otyp1) ? MOP_vusublvuu : MOP_vssublvuu; + } else { + mOp = IsUnsignedInteger(otyp1) ? MOP_vusubl2vvv : MOP_vssubl2vvv; + } + } else { + if (isLow) { + mOp = IsUnsignedInteger(otyp1) ? MOP_vusubwvvu : MOP_vssubwvvu; + } else { + mOp = IsUnsignedInteger(otyp1) ? MOP_vusubw2vvv : MOP_vssubw2vvv; + } + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn.PushRegSpecEntry(vecSpecDest); + vInsn.PushRegSpecEntry(vecSpec1); + vInsn.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +void AArch64CGFunc::SelectVectorZip(PrimType rType, Operand *o1, Operand *o2) +{ + RegOperand *res1 = &CreateRegisterOperandOfType(rType); /* result operand 1 */ + RegOperand *res2 = &CreateRegisterOperandOfType(rType); /* result operand 2 */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); /* vector operand 2 */ + + VectorInsn &vInsn1 = GetInsnBuilder()->BuildVectorInsn(MOP_vzip1vvv, AArch64CG::kMd[MOP_vzip1vvv]); + vInsn1.AddOpndChain(*res1).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn1.PushRegSpecEntry(vecSpecDest); + vInsn1.PushRegSpecEntry(vecSpec1); + vInsn1.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn1); + + VectorInsn &vInsn2 = GetInsnBuilder()->BuildVectorInsn(MOP_vzip2vvv, AArch64CG::kMd[MOP_vzip2vvv]); + vInsn2.AddOpndChain(*res2).AddOpndChain(*o1).AddOpndChain(*o2); + vInsn2.PushRegSpecEntry(vecSpecDest); + vInsn2.PushRegSpecEntry(vecSpec1); + vInsn2.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn2); + + if (GetPrimTypeSize(rType) <= k16ByteSize) { + Operand *preg1 = &GetOrCreatePhysicalRegisterOperand(V0, k64BitSize, kRegTyFloat); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xvmovd, *preg1, *res1)); + Operand *preg2 = &GetOrCreatePhysicalRegisterOperand(V1, k64BitSize, kRegTyFloat); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xvmovd, *preg2, *res2)); + } +} + +RegOperand *AArch64CGFunc::SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) +{ + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp); /* vector operand */ + + MOperator mOp; + if (isLow) { + mOp = IsPrimitiveUnSignedVector(rType) ? MOP_vuxtlvu : MOP_vsxtlvu; + } else { + mOp = IsPrimitiveUnSignedVector(rType) ? MOP_vuxtl2vv : MOP_vsxtl2vv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + vInsn.AddOpndChain(*res).AddOpndChain(*o1); + vInsn.PushRegSpecEntry(vecSpecDest); + vInsn.PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +/* + * Check the distance between the first insn of BB with the lable(targ_labidx) + * and the insn with targ_id. If the distance greater than kShortBRDistance + * return false. + */ +bool AArch64CGFunc::DistanceCheck(const BB &bb, LabelIdx targLabIdx, uint32 targId) const +{ + for (auto *tBB : bb.GetSuccs()) { + if (tBB->GetLabIdx() != targLabIdx) { + continue; + } + Insn *tInsn = tBB->GetFirstInsn(); + while (tInsn == nullptr || !tInsn->IsMachineInstruction()) { + if (tInsn == nullptr) { + tBB = tBB->GetNext(); + if (tBB == nullptr) { /* tailcallopt may make the target block empty */ + return true; + } + tInsn = tBB->GetFirstInsn(); + } else { + tInsn = tInsn->GetNext(); + } + } + uint32 tmp = (tInsn->GetId() > targId) ? (tInsn->GetId() - targId) : (targId - tInsn->GetId()); + return (tmp < kShortBRDistance); + } + CHECK_FATAL(false, "CFG error"); +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_color_ra.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_color_ra.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8ea617c68ae21875c0b63ffc4606296ceb0a8396 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_color_ra.cpp @@ -0,0 +1,5141 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_color_ra.h" +#include +#include +#include "aarch64_cg.h" +#include "mir_lower.h" +#include "securec.h" +/* + * Based on concepts from Chow and Hennessey. + * Phases are as follows: + * Prepass to collect local BB information. + * Compute local register allocation demands for global RA. + * Compute live ranges. + * Live ranges LR represented by a vector of size #BBs. + * for each cross bb vreg, a bit is set in the vector. + * Build interference graph with basic block as granularity. + * When intersection of two LRs is not null, they interfere. + * Separate unconstrained and constrained LRs. + * unconstrained - LR with connect edges less than available colors. + * These LR can always be colored. + * constrained - not uncontrained. + * Split LR based on priority cost + * Repetitive adding BB from original LR to new LR until constrained. + * Update all LR the new LR interferes with. + * Color the new LR + * Each LR has a forbidden list, the registers cannot be assigned + * Coalesce move using preferred color first. + * Mark the remaining uncolorable LR after split as spill. + * Local register allocate. + * Emit and insert spills. + */ +namespace maplebe { +#define JAVALANG (cgFunc->GetMirModule().IsJavaModule()) +#define CLANG (cgFunc->GetMirModule().IsCModule()) + +/* + * for physical regOpnd phyOpnd, + * R0->GetRegisterNumber() == 1 + * V0->GetRegisterNumber() == 33 + */ +constexpr uint32 kLoopWeight = 20; +constexpr uint32 kAdjustWeight = 2; +constexpr uint32 kInsnStep = 2; +constexpr uint32 kMaxSplitCount = 3; +constexpr uint32 kRematWeight = 3; +constexpr uint32 kPriorityDefThreashold = 1; +constexpr uint32 kPriorityUseThreashold = 5; +constexpr uint32 kPriorityBBThreashold = 1000; +constexpr float kPriorityRatioThreashold = 0.9; + +#define GCRA_DUMP CG_DEBUG_FUNC(*cgFunc) + +void LiveUnit::PrintLiveUnit() const +{ + LogInfo::MapleLogger() << "[" << begin << "," << end << "]" + << ""; + if (!hasCall) { + /* Too many calls, so only print when there is no call. */ + LogInfo::MapleLogger() << " nc"; + } + if (needReload) { + LogInfo::MapleLogger() << " rlod"; + } + if (needRestore) { + LogInfo::MapleLogger() << " rstr"; + } +} + +bool LiveRange::IsRematerializable(AArch64CGFunc &cgFunc, uint8 rematLev) const +{ + if (rematLev == rematOff) + return false; + + switch (op) { + case OP_undef: + return false; + case OP_constval: { + const MIRConst *mirConst = rematInfo.mirConst; + if (mirConst->GetKind() != kConstInt) { + return false; + } + const MIRIntConst *intConst = static_cast(rematInfo.mirConst); + int64 val = intConst->GetExtValue(); + if (val >= -kMax16UnsignedImm && val <= kMax16UnsignedImm) { + return true; + } + auto uval = static_cast(val); + if (IsMoveWidableImmediate(uval, GetSpillSize())) { + return true; + } + return IsBitmaskImmediate(uval, GetSpillSize()); + } + case OP_addrof: { + if (rematLev < rematAddr) { + return false; + } + const MIRSymbol *symbol = rematInfo.sym; + if (symbol->IsDeleted()) { + return false; + } + /* cost too much to remat */ + if ((symbol->GetStorageClass() == kScFormal) && (symbol->GetSKind() == kStVar) && + ((fieldID != 0) || + (cgFunc.GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()) > k16ByteSize))) { + return false; + } + if (!addrUpper && CGOptions::IsPIC() && + ((symbol->GetStorageClass() == kScGlobal) || (symbol->GetStorageClass() == kScExtern))) { + /* check if in loop */ + bool useInLoop = false; + bool defOutLoop = false; + for (auto luIt : luMap) { + BB *bb = cgFunc.GetBBFromID(luIt.first); + LiveUnit *curLu = luIt.second; + if (bb->GetLoop() != nullptr && curLu->GetUseNum() != 0) { + useInLoop = true; + } + if (bb->GetLoop() == nullptr && curLu->GetDefNum() != 0) { + defOutLoop = true; + } + } + return !(useInLoop && defOutLoop); + } + return true; + } + case OP_dread: { + if (rematLev < rematDreadLocal) { + return false; + } + const MIRSymbol *symbol = rematInfo.sym; + if (symbol->IsDeleted()) { + return false; + } + MIRStorageClass storageClass = symbol->GetStorageClass(); + if ((storageClass == kScAuto) || (storageClass == kScFormal)) { + /* cost too much to remat. */ + return false; + } + PrimType symType = symbol->GetType()->GetPrimType(); + int32 offset = 0; + if (fieldID != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "Rematerialize: non-zero fieldID for non-structure"); + symType = structType->GetFieldType(fieldID)->GetPrimType(); + offset = cgFunc.GetBecommon().GetFieldOffset(*structType, fieldID).first; + } + /* check stImm.GetOffset() is in addri12 */ + StImmOperand &stOpnd = cgFunc.CreateStImmOperand(*symbol, offset, 0); + uint32 dataSize = GetPrimTypeBitSize(symType); + ImmOperand &immOpnd = cgFunc.CreateImmOperand(stOpnd.GetOffset(), dataSize, false); + if (!immOpnd.IsInBitSize(kMaxImmVal12Bits, 0)) { + return false; + } + if (rematLev < rematDreadGlobal && !symbol->IsLocal()) { + return false; + } + return true; + } + default: + return false; + } +} + +std::vector LiveRange::Rematerialize(AArch64CGFunc *cgFunc, RegOperand ®Op) +{ + std::vector insns; + switch (op) { + case OP_constval: + switch (rematInfo.mirConst->GetKind()) { + case kConstInt: { + MIRIntConst *intConst = + const_cast(static_cast(rematInfo.mirConst)); + + Operand *immOp = cgFunc->SelectIntConst(*intConst); + MOperator movOp = (GetSpillSize() == k32BitSize) ? MOP_wmovri32 : MOP_xmovri64; + insns.push_back(&cgFunc->GetInsnBuilder()->BuildInsn(movOp, regOp, *immOp)); + break; + } + default: + DEBUG_ASSERT(false, "Unsupported constant for rematerialization"); + } + break; + case OP_dread: { + const MIRSymbol *symbol = rematInfo.sym; + PrimType symType = symbol->GetType()->GetPrimType(); + RegOperand *regOp64 = &cgFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(regOp.GetRegisterNumber()), k64BitSize, regOp.GetRegisterType()); + int32 offset = 0; + if (fieldID != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "Rematerialize: non-zero fieldID for non-structure"); + symType = structType->GetFieldType(fieldID)->GetPrimType(); + offset = cgFunc->GetBecommon().GetFieldOffset(*structType, fieldID).first; + } + + uint32 dataSize = GetPrimTypeBitSize(symType); + MemOperand *spillMemOp = + &cgFunc->GetOrCreateMemOpndAfterRa(*symbol, offset, dataSize, false, regOp64, insns); + MOperator mOp = cgFunc->PickLdInsn(spillMemOp->GetSize(), symType); + insns.push_back(&cgFunc->GetInsnBuilder()->BuildInsn(mOp, regOp, *spillMemOp)); + break; + } + case OP_addrof: { + const MIRSymbol *symbol = rematInfo.sym; + int32 offset = 0; + if (fieldID != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + DEBUG_ASSERT(structType != nullptr, "Rematerialize: non-zero fieldID for non-structure"); + offset = cgFunc->GetBecommon().GetFieldOffset(*structType, fieldID).first; + } + StImmOperand &stImm = cgFunc->CreateStImmOperand(*symbol, offset, 0); + if ((symbol->GetStorageClass() == kScAuto) || (symbol->GetStorageClass() == kScFormal)) { + AArch64SymbolAlloc *symLoc = + static_cast(cgFunc->GetMemlayout()->GetSymAllocInfo(symbol->GetStIndex())); + ImmOperand *offsetOp = nullptr; + offsetOp = &cgFunc->CreateImmOperand(cgFunc->GetBaseOffset(*symLoc) + offset, k64BitSize, false); + + Insn *insn = + &cgFunc->GetInsnBuilder()->BuildInsn(MOP_xaddrri12, regOp, *cgFunc->GetBaseReg(*symLoc), *offsetOp); + if (CGOptions::kVerboseCG) { + std::string comm = "local/formal var: "; + comm.append(symbol->GetName()); + insn->SetComment(comm); + } + insns.push_back(insn); + } else { + Insn *insn = &cgFunc->GetInsnBuilder()->BuildInsn(MOP_xadrp, regOp, stImm); + insns.push_back(insn); + if (!addrUpper && CGOptions::IsPIC() && + ((symbol->GetStorageClass() == kScGlobal) || (symbol->GetStorageClass() == kScExtern))) { + /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ + OfstOperand &offsetOp = cgFunc->CreateOfstOpnd(*symbol, offset, 0); + MemOperand &memOpnd = + cgFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPointerSize() * kBitsPerByte, + static_cast(®Op), nullptr, &offsetOp, nullptr); + MOperator ldOp = (memOpnd.GetSize() == k64BitSize) ? MOP_xldr : MOP_wldr; + insn = &cgFunc->GetInsnBuilder()->BuildInsn(ldOp, regOp, memOpnd); + insns.push_back(insn); + if (offset > 0) { + OfstOperand &ofstOpnd = + cgFunc->GetOrCreateOfstOpnd(static_cast(static_cast(offset)), k32BitSize); + insns.push_back(&cgFunc->GetInsnBuilder()->BuildInsn(MOP_xaddrri12, regOp, regOp, ofstOpnd)); + } + } else if (!addrUpper) { + insns.push_back(&cgFunc->GetInsnBuilder()->BuildInsn(MOP_xadrpl12, regOp, regOp, stImm)); + } + } + break; + } + default: + DEBUG_ASSERT(false, "Unexpected op in live range"); + } + + return insns; +} + +template +void GraphColorRegAllocator::ForEachBBArrElem(const uint64 *vec, Func functor) const +{ + for (uint32 iBBArrElem = 0; iBBArrElem < bbBuckets; ++iBBArrElem) { + for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { + if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { + functor(iBBArrElem * kU64 + bBBArrElem); + } + } + } +} + +template +void GraphColorRegAllocator::ForEachBBArrElemWithInterrupt(const uint64 *vec, Func functor) const +{ + for (uint32 iBBArrElem = 0; iBBArrElem < bbBuckets; ++iBBArrElem) { + for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { + if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { + if (functor(iBBArrElem * kU64 + bBBArrElem)) { + return; + } + } + } + } +} + +template +void GraphColorRegAllocator::ForEachRegArrElem(const uint64 *vec, Func functor) const +{ + for (uint32 iBBArrElem = 0; iBBArrElem < regBuckets; ++iBBArrElem) { + for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { + if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { + functor(iBBArrElem * kU64 + bBBArrElem); + } + } + } +} + +void GraphColorRegAllocator::PrintLiveUnitMap(const LiveRange &lr) const +{ + LogInfo::MapleLogger() << "\n\tlu:"; + for (uint32 i = 0; i < cgFunc->NumBBs(); ++i) { + if (!IsBitArrElemSet(lr.GetBBMember(), i)) { + continue; + } + auto lu = lr.GetLuMap().find(i); + if (lu != lr.GetLuMap().end() && (lu->second->GetDefNum() || lu->second->GetUseNum())) { + LogInfo::MapleLogger() << "(" << i << " "; + lu->second->PrintLiveUnit(); + LogInfo::MapleLogger() << ")"; + } + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::PrintLiveRangeConflicts(const LiveRange &lr) const +{ + LogInfo::MapleLogger() << "\n\tinterfere(" << lr.GetNumBBConflicts() << "): "; + for (uint32 i = 0; i < regBuckets; ++i) { + uint64 chunk = lr.GetBBConflictElem(i); + for (uint64 bit = 0; bit < kU64; ++bit) { + if (chunk & (1ULL << bit)) { + regno_t newNO = i * kU64 + bit; + LogInfo::MapleLogger() << newNO << ","; + } + } + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::PrintLiveBBBit(const LiveRange &lr) const +{ + LogInfo::MapleLogger() << "live_bb(" << lr.GetNumBBMembers() << "): "; + for (uint32 i = 0; i < cgFunc->NumBBs(); ++i) { + if (IsBitArrElemSet(lr.GetBBMember(), i)) { + LogInfo::MapleLogger() << i << " "; + } + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::PrintLiveRange(const LiveRange &lr, const std::string &str) const +{ + LogInfo::MapleLogger() << str << "\n"; + + LogInfo::MapleLogger() << "R" << lr.GetRegNO(); + if (lr.GetRegType() == kRegTyInt) { + LogInfo::MapleLogger() << "(I)"; + } else if (lr.GetRegType() == kRegTyFloat) { + LogInfo::MapleLogger() << "(F)"; + } else { + LogInfo::MapleLogger() << "(U)"; + } + if (lr.GetSpillSize() == k32) { + LogInfo::MapleLogger() << "S32"; + } else if (lr.GetSpillSize() == k64) { + LogInfo::MapleLogger() << "S64"; + } else { + LogInfo::MapleLogger() << "S0(nodef)"; + } + LogInfo::MapleLogger() << "\tnumCall " << lr.GetNumCall(); + LogInfo::MapleLogger() << "\tpriority " << lr.GetPriority(); + LogInfo::MapleLogger() << "\tforbidden: "; + for (regno_t preg = kInvalidRegNO; preg < kMaxRegNum; preg++) { + if (lr.GetForbidden(preg)) { + LogInfo::MapleLogger() << preg << ","; + } + } + LogInfo::MapleLogger() << "\tpregveto: "; + for (regno_t preg = kInvalidRegNO; preg < kMaxRegNum; preg++) { + if (lr.GetPregveto(preg)) { + LogInfo::MapleLogger() << preg << ","; + } + } + if (lr.IsSpilled()) { + LogInfo::MapleLogger() << " spilled"; + } + if (lr.GetSplitLr()) { + LogInfo::MapleLogger() << " split"; + } + LogInfo::MapleLogger() << "\top: " << kOpcodeInfo.GetName(lr.GetOp()); + LogInfo::MapleLogger() << "\n"; + PrintLiveBBBit(lr); + PrintLiveRangeConflicts(lr); + PrintLiveUnitMap(lr); + if (lr.GetSplitLr()) { + PrintLiveRange(*lr.GetSplitLr(), "===>Split LR"); + } +} + +void GraphColorRegAllocator::PrintLiveRanges() const +{ + LogInfo::MapleLogger() << "PrintLiveRanges: size = " << lrMap.size() << "\n"; + for (auto it : lrMap) { + PrintLiveRange(*it.second, ""); + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::PrintLocalRAInfo(const std::string &str) const +{ + LogInfo::MapleLogger() << str << "\n"; + for (uint32 id = 0; id < cgFunc->NumBBs(); ++id) { + LocalRaInfo *lraInfo = localRegVec[id]; + if (lraInfo == nullptr) { + continue; + } + LogInfo::MapleLogger() << "bb " << id << " def "; + for (const auto &defCntPair : lraInfo->GetDefCnt()) { + LogInfo::MapleLogger() << "[" << defCntPair.first << ":" << defCntPair.second << "],"; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "use "; + for (const auto &useCntPair : lraInfo->GetUseCnt()) { + LogInfo::MapleLogger() << "[" << useCntPair.first << ":" << useCntPair.second << "],"; + } + LogInfo::MapleLogger() << "\n"; + } +} + +void GraphColorRegAllocator::PrintBBAssignInfo() const +{ + for (size_t id = 0; id < bfs->sortedBBs.size(); ++id) { + uint32 bbID = bfs->sortedBBs[id]->GetId(); + BBAssignInfo *bbInfo = bbRegInfo[bbID]; + if (bbInfo == nullptr) { + continue; + } + LogInfo::MapleLogger() << "BBinfo(" << id << ")"; + LogInfo::MapleLogger() << " lra-needed int " << bbInfo->GetIntLocalRegsNeeded(); + LogInfo::MapleLogger() << " fp " << bbInfo->GetFpLocalRegsNeeded(); + LogInfo::MapleLogger() << " greg-used "; + for (regno_t regNO = kInvalidRegNO; regNO < kMaxRegNum; ++regNO) { + if (bbInfo->GetGlobalsAssigned(regNO)) { + LogInfo::MapleLogger() << regNO << ","; + } + } + LogInfo::MapleLogger() << "\n"; + } +} + +void GraphColorRegAllocator::CalculatePriority(LiveRange &lr) const +{ +#ifdef RANDOM_PRIORITY + unsigned long seed = 0; + size_t size = sizeof(seed); + std::ifstream randomNum("/dev/random", std::ios::in | std::ios::binary); + if (randomNum) { + randomNum.read(reinterpret_cast(&seed), size); + if (randomNum) { + lr.SetPriority(1 / (seed + 1)); + } + randomNum.close(); + } else { + std::cerr << "Failed to open /dev/urandom" << '\n'; + } + return; +#endif /* RANDOM_PRIORITY */ + float pri = 0.0; + uint32 bbNum = 0; + uint32 numDefs = 0; + uint32 numUses = 0; + auto *a64CGFunc = static_cast(cgFunc); + CG *cg = a64CGFunc->GetCG(); + + if (cg->GetRematLevel() >= rematConst && lr.IsRematerializable(*a64CGFunc, rematConst)) { + lr.SetRematLevel(rematConst); + } else if (cg->GetRematLevel() >= rematAddr && lr.IsRematerializable(*a64CGFunc, rematAddr)) { + lr.SetRematLevel(rematAddr); + } else if (cg->GetRematLevel() >= rematDreadLocal && lr.IsRematerializable(*a64CGFunc, rematDreadLocal)) { + lr.SetRematLevel(rematDreadLocal); + } else if (cg->GetRematLevel() >= rematDreadGlobal && lr.IsRematerializable(*a64CGFunc, rematDreadGlobal)) { + lr.SetRematLevel(rematDreadGlobal); + } + + auto calculatePriorityFunc = [&lr, &bbNum, &numDefs, &numUses, &pri, this](uint32 bbID) { + auto lu = lr.FindInLuMap(bbID); + DEBUG_ASSERT(lu != lr.EndOfLuMap(), "can not find live unit"); + BB *bb = bbVec[bbID]; + if (bb->GetFirstInsn() != nullptr && !bb->IsSoloGoto()) { + ++bbNum; + numDefs += lu->second->GetDefNum(); + numUses += lu->second->GetUseNum(); + uint32 useCnt = lu->second->GetDefNum() + lu->second->GetUseNum(); + uint32 mult; +#ifdef USE_BB_FREQUENCY + mult = bb->GetFrequency(); +#else /* USE_BB_FREQUENCY */ + if (bb->GetLoop() != nullptr) { + uint32 loopFactor; + if (lr.GetNumCall() > 0 && lr.GetRematLevel() == rematOff) { + loopFactor = bb->GetLoop()->GetLoopLevel() * kAdjustWeight; + } else { + loopFactor = bb->GetLoop()->GetLoopLevel() / kAdjustWeight; + } + mult = static_cast(pow(kLoopWeight, loopFactor)); + } else { + mult = 1; + } +#endif /* USE_BB_FREQUENCY */ + pri += useCnt * mult; + } + }; + ForEachBBArrElem(lr.GetBBMember(), calculatePriorityFunc); + + if (lr.GetRematLevel() == rematAddr || lr.GetRematLevel() == rematConst) { + if (numDefs <= 1 && numUses <= 1) { + pri = -0xFFFF; + } else { + pri /= kRematWeight; + } + } else if (lr.GetRematLevel() == rematDreadLocal) { + pri /= 4; + } else if (lr.GetRematLevel() == rematDreadGlobal) { + pri /= 2; + } + + lr.SetPriority(pri); + lr.SetNumDefs(numDefs); + lr.SetNumUses(numUses); + if (lr.GetPriority() > 0 && numDefs <= kPriorityDefThreashold && numUses <= kPriorityUseThreashold && + cgFunc->NumBBs() > kPriorityBBThreashold && + (static_cast(lr.GetNumBBMembers()) / cgFunc->NumBBs()) > kPriorityRatioThreashold) { + /* for large functions, delay allocating long LR with few defs and uses */ + lr.SetPriority(0.0); + } +} + +void GraphColorRegAllocator::PrintBBs() const +{ + for (auto *bb : bfs->sortedBBs) { + LogInfo::MapleLogger() << "\n< === > "; + LogInfo::MapleLogger() << bb->GetId(); + LogInfo::MapleLogger() << " succs:"; + for (auto *succBB : bb->GetSuccs()) { + LogInfo::MapleLogger() << " " << succBB->GetId(); + } + LogInfo::MapleLogger() << " eh_succs:"; + for (auto *succBB : bb->GetEhSuccs()) { + LogInfo::MapleLogger() << " " << succBB->GetId(); + } + } + LogInfo::MapleLogger() << "\n"; +} + +uint32 GraphColorRegAllocator::MaxIntPhysRegNum() const +{ + return (R28 - R0); +} + +uint32 GraphColorRegAllocator::MaxFloatPhysRegNum() const +{ + return (V31 - V0); +} + +bool GraphColorRegAllocator::IsReservedReg(AArch64reg regNO) const +{ + if (!doMultiPass || cgFunc->GetMirModule().GetSrcLang() != kSrcLangC) { + return (regNO == R16) || (regNO == R17); + } else { + return (regNO == R16); + } +} + +void GraphColorRegAllocator::InitFreeRegPool() +{ + /* + * ==== int regs ==== + * FP 29, LR 30, SP 31, 0 to 7 parameters + + * MapleCG defines 32 as ZR (zero register) + * use 8 if callee does not return large struct ? No + * 16 and 17 are intra-procedure call temp, can be caller saved + * 18 is platform reg, still use it + */ + uint32 intNum = 0; + uint32 fpNum = 0; + for (regno_t regNO = kRinvalid; regNO < kMaxRegNum; ++regNO) { + if (!AArch64Abi::IsAvailableReg(static_cast(regNO))) { + continue; + } + + /* + * Because of the try-catch scenario in JAVALANG, + * we should use specialized spill register to prevent register changes when exceptions occur. + */ + if (JAVALANG && AArch64Abi::IsSpillRegInRA(static_cast(regNO), needExtraSpillReg)) { + if (AArch64isa::IsGPRegister(static_cast(regNO))) { + /* Preset int spill registers */ + (void)intSpillRegSet.insert(regNO - R0); + } else { + /* Preset float spill registers */ + (void)fpSpillRegSet.insert(regNO - V0); + } + continue; + } + +#ifdef RESERVED_REGS + /* r16,r17 are used besides ra. */ + if (IsReservedReg(static_cast(regNO))) { + continue; + } +#endif /* RESERVED_REGS */ + + if (AArch64isa::IsGPRegister(static_cast(regNO))) { + /* when yieldpoint is enabled, x19 is reserved. */ + if (IsYieldPointReg(static_cast(regNO))) { + continue; + } + if (regNO == R29) { + if (!cgFunc->UseFP()) { + (void)intCalleeRegSet.insert(regNO - R0); + ++intNum; + } + continue; + } + if (AArch64Abi::IsCalleeSavedReg(static_cast(regNO))) { + (void)intCalleeRegSet.insert(regNO - R0); + } else { + (void)intCallerRegSet.insert(regNO - R0); + } + ++intNum; + } else { + if (AArch64Abi::IsCalleeSavedReg(static_cast(regNO))) { + (void)fpCalleeRegSet.insert(regNO - V0); + } else { + (void)fpCallerRegSet.insert(regNO - V0); + } + ++fpNum; + } + } + intRegNum = intNum; + fpRegNum = fpNum; +} + +void GraphColorRegAllocator::InitCCReg() +{ + Operand &opnd = cgFunc->GetOrCreateRflag(); + auto &tmpRegOp = static_cast(opnd); + ccReg = tmpRegOp.GetRegisterNumber(); +} + +bool GraphColorRegAllocator::IsYieldPointReg(regno_t regNO) const +{ + if (cgFunc->GetCG()->GenYieldPoint()) { + return (regNO == RYP); + } + return false; +} + +bool GraphColorRegAllocator::IsUnconcernedReg(regno_t regNO) const +{ + /* RFP = 32, RLR = 31, RSP = 33, RZR = 34 */ + if ((regNO >= RLR && regNO <= RZR) || regNO == RFP || regNO == ccReg) { + return true; + } + + /* when yieldpoint is enabled, the RYP(x19) can not be used. */ + if (IsYieldPointReg(static_cast(regNO))) { + return true; + } + + return false; +} + +bool GraphColorRegAllocator::IsUnconcernedReg(const RegOperand ®Opnd) const +{ + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return true; + } + uint32 regNO = regOpnd.GetRegisterNumber(); + if (regNO == RZR) { + return true; + } + return IsUnconcernedReg(regNO); +} + +/* + * Based on live analysis, the live-in and live-out set determines + * the bit to be set in the LR vector, which is of size #BBs. + * If a vreg is in the live-in and live-out set, it is live in the BB. + * + * Also keep track if a LR crosses a call. If a LR crosses a call, it + * interferes with all caller saved registers. Add all caller registers + * to the LR's forbidden list. + * + * Return created LiveRange object + * + * maybe need extra info: + * Add info for setjmp. + * Add info for defBB, useBB, index in BB for def and use + * Add info for startingBB and endingBB + */ +LiveRange *GraphColorRegAllocator::NewLiveRange() +{ + LiveRange *lr = memPool->New(alloc); + + if (bbBuckets == 0) { + bbBuckets = (cgFunc->NumBBs() / kU64) + 1; + } + lr->SetBBBuckets(bbBuckets); + lr->InitBBMember(*memPool, bbBuckets); + if (regBuckets == 0) { + regBuckets = (cgFunc->GetMaxRegNum() / kU64) + 1; + } + lr->SetRegBuckets(regBuckets); + lr->InitBBConflict(*memPool, regBuckets); + lr->InitPregveto(); + lr->InitForbidden(); + return lr; +} + +/* Create local info for LR. return true if reg is not local. */ +bool GraphColorRegAllocator::CreateLiveRangeHandleLocal(regno_t regNO, const BB &bb, bool isDef) +{ + if (FindIn(bb.GetLiveInRegNO(), regNO) || FindIn(bb.GetLiveOutRegNO(), regNO)) { + return true; + } + /* + * register not in globals for the bb, so it is local. + * Compute local RA info. + */ + LocalRaInfo *lraInfo = localRegVec[bb.GetId()]; + if (lraInfo == nullptr) { + lraInfo = memPool->New(alloc); + localRegVec[bb.GetId()] = lraInfo; + } + if (isDef) { + /* movk is handled by different id for use/def in the same insn. */ + lraInfo->SetDefCntElem(regNO, lraInfo->GetDefCntElem(regNO) + 1); + } else { + lraInfo->SetUseCntElem(regNO, lraInfo->GetUseCntElem(regNO) + 1); + } + /* lr info is useful for lra, so continue lr info */ + return false; +} + +LiveRange *GraphColorRegAllocator::CreateLiveRangeAllocateAndUpdate(regno_t regNO, const BB &bb, bool isDef, + uint32 currId) +{ + LiveRange *lr = GetLiveRange(regNO); + if (lr == nullptr) { + lr = NewLiveRange(); + lr->SetID(currId); + + LiveUnit *lu = memPool->New(); + lr->SetElemToLuMap(bb.GetId(), *lu); + lu->SetBegin(currId); + lu->SetEnd(currId); + if (isDef) { + /* means no use after def for reg, chances for ebo opt */ + for (const auto &pregNO : pregLive) { + lr->InsertElemToPregveto(pregNO); + } + } + } else { + LiveUnit *lu = lr->GetLiveUnitFromLuMap(bb.GetId()); + if (lu == nullptr) { + lu = memPool->New(); + lr->SetElemToLuMap(bb.GetId(), *lu); + lu->SetBegin(currId); + lu->SetEnd(currId); + } + if (lu->GetBegin() > currId) { + lu->SetBegin(currId); + } + } + + if (CLANG) { + auto *a64CGFunc = static_cast(cgFunc); + MIRPreg *preg = a64CGFunc->GetPseudoRegFromVirtualRegNO(regNO, CGOptions::DoCGSSA()); + if (preg) { + switch (preg->GetOp()) { + case OP_constval: + lr->SetRematerializable(preg->rematInfo.mirConst); + break; + case OP_addrof: + case OP_dread: + lr->SetRematerializable(preg->GetOp(), preg->rematInfo.sym, preg->fieldID, preg->addrUpper); + break; + case OP_undef: + break; + default: + DEBUG_ASSERT(false, "Unexpected op in Preg"); + } + } + } + + return lr; +} + +void GraphColorRegAllocator::CreateLiveRange(regno_t regNO, const BB &bb, bool isDef, uint32 currId, bool updateCount) +{ + bool isNonLocal = CreateLiveRangeHandleLocal(regNO, bb, isDef); + + if (!isDef) { + --currId; + } + + LiveRange *lr = CreateLiveRangeAllocateAndUpdate(regNO, bb, isDef, currId); + lr->SetRegNO(regNO); + lr->SetIsNonLocal(isNonLocal); + if (isDef) { + (void)vregLive.erase(regNO); +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog && updateCount) { + if (lr->GetNumDefs() == 0) { + lr->SetFrequency(lr->GetFrequency() + bb.GetFrequency()); + } + lr->IncNumDefs(); + } +#endif /* OPTIMIZE_FOR_PROLOG */ + } else { + (void)vregLive.insert(regNO); +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog && updateCount) { + if (lr->GetNumUses() == 0) { + lr->SetFrequency(lr->GetFrequency() + bb.GetFrequency()); + } + lr->IncNumUses(); + } +#endif /* OPTIMIZE_FOR_PROLOG */ + } + for (const auto &pregNO : pregLive) { + lr->InsertElemToPregveto(pregNO); + } + + /* only handle it in live_in and def point? */ + uint32 bbID = bb.GetId(); + lr->SetMemberBitArrElem(bbID); + + lrMap[regNO] = lr; +} + +bool GraphColorRegAllocator::SetupLiveRangeByOpHandlePhysicalReg(const RegOperand ®Opnd, Insn &insn, regno_t regNO, + bool isDef) +{ + if (!regOpnd.IsPhysicalRegister()) { + return false; + } + LocalRaInfo *lraInfo = localRegVec[insn.GetBB()->GetId()]; + if (lraInfo == nullptr) { + lraInfo = memPool->New(alloc); + localRegVec[insn.GetBB()->GetId()] = lraInfo; + } + + if (isDef) { + if (FindNotIn(pregLive, regNO)) { + for (const auto &vRegNO : vregLive) { + if (IsUnconcernedReg(vRegNO)) { + continue; + } + lrMap[vRegNO]->InsertElemToPregveto(regNO); + } + } + pregLive.erase(regNO); + if (lraInfo != nullptr) { + lraInfo->SetDefCntElem(regNO, lraInfo->GetDefCntElem(regNO) + 1); + } + } else { + (void)pregLive.insert(regNO); + for (const auto &vregNO : vregLive) { + if (IsUnconcernedReg(vregNO)) { + continue; + } + LiveRange *lr = lrMap[vregNO]; + lr->InsertElemToPregveto(regNO); + } + + if (lraInfo != nullptr) { + lraInfo->SetUseCntElem(regNO, lraInfo->GetUseCntElem(regNO) + 1); + } + } + return true; +} + +/* + * add pregs to forbidden list of lr. If preg is in + * the live list, then it is forbidden for other vreg on the list. + */ +void GraphColorRegAllocator::SetupLiveRangeByOp(Operand &op, Insn &insn, bool isDef, uint32 &numUses) +{ + if (!op.IsRegister()) { + return; + } + auto ®Opnd = static_cast(op); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (IsUnconcernedReg(regOpnd)) { + if (GetLiveRange(regNO) != nullptr) { + DEBUG_ASSERT(false, "Unconcerned reg"); + lrMap.erase(regNO); + } + return; + } + if (SetupLiveRangeByOpHandlePhysicalReg(regOpnd, insn, regNO, isDef)) { + return; + } + + CreateLiveRange(regNO, *insn.GetBB(), isDef, insn.GetId(), true); + + LiveRange *lr = GetLiveRange(regNO); + DEBUG_ASSERT(lr != nullptr, "lr should not be nullptr"); + if (isDef) { + lr->SetSpillSize((regOpnd.GetSize() <= k32) ? k32 : k64); + } + if (lr->GetRegType() == kRegTyUndef) { + lr->SetRegType(regOpnd.GetRegisterType()); + } + if (isDef) { + lr->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->IncDefNum(); + lr->AddRef(insn.GetBB()->GetId(), insn.GetId(), kIsDef); + } else { + lr->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->IncUseNum(); + lr->AddRef(insn.GetBB()->GetId(), insn.GetId(), kIsUse); + ++numUses; + } +#ifdef MOVE_COALESCE + if (insn.GetMachineOpcode() == MOP_xmovrr || insn.GetMachineOpcode() == MOP_wmovrr) { + RegOperand &opnd1 = static_cast(insn.GetOperand(1)); + if (opnd1.GetRegisterNumber() < kAllRegNum && !IsUnconcernedReg(opnd1)) { + lr->InsertElemToPrefs(opnd1.GetRegisterNumber() - R0); + } + RegOperand &opnd0 = static_cast(insn.GetOperand(0)); + if (opnd0.GetRegisterNumber() < kAllRegNum) { + lr->InsertElemToPrefs(opnd0.GetRegisterNumber() - R0); + } + } +#endif /* MOVE_COALESCE */ + if (!insn.IsSpecialIntrinsic() && insn.GetBothDefUseOpnd() != kInsnMaxOpnd) { + lr->SetDefUse(); + } +} + +/* handle live range for bb->live_out */ +void GraphColorRegAllocator::SetupLiveRangeByRegNO(regno_t liveOut, BB &bb, uint32 currPoint) +{ + if (IsUnconcernedReg(liveOut)) { + return; + } + if (liveOut >= kAllRegNum) { + (void)vregLive.insert(liveOut); + CreateLiveRange(liveOut, bb, false, currPoint, false); + return; + } + + (void)pregLive.insert(liveOut); + for (const auto &vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->InsertElemToPregveto(liveOut); + } + + /* See if phys reg is livein also. Then assume it span the entire bb. */ + if (!FindIn(bb.GetLiveInRegNO(), liveOut)) { + return; + } + LocalRaInfo *lraInfo = localRegVec[bb.GetId()]; + if (lraInfo == nullptr) { + lraInfo = memPool->New(alloc); + localRegVec[bb.GetId()] = lraInfo; + } + /* Make it a large enough so no locals can be allocated. */ + lraInfo->SetUseCntElem(liveOut, kMaxUint16); +} + +void GraphColorRegAllocator::ClassifyOperand(std::unordered_set &pregs, std::unordered_set &vregs, + const Operand &opnd) const +{ + if (!opnd.IsRegister()) { + return; + } + auto ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + if (IsUnconcernedReg(regNO)) { + return; + } + if (regOpnd.IsPhysicalRegister()) { + (void)pregs.insert(regNO); + } else { + (void)vregs.insert(regNO); + } +} + +void GraphColorRegAllocator::SetOpndConflict(const Insn &insn, bool onlyDef) +{ + uint32 opndNum = insn.GetOperandSize(); + if (opndNum <= 1) { + return; + } + const InsnDesc *md = insn.GetDesc(); + std::unordered_set pregs; + std::unordered_set vregs; + + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (!onlyDef) { + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + ClassifyOperand(pregs, vregs, *op); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + ClassifyOperand(pregs, vregs, *base); + } + if (offset != nullptr) { + ClassifyOperand(pregs, vregs, *offset); + } + } else if (opnd.IsRegister()) { + ClassifyOperand(pregs, vregs, opnd); + } + } else { + if (md->GetOpndDes(i)->IsRegDef()) { + ClassifyOperand(pregs, vregs, opnd); + } + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + if (base != nullptr && !memOpnd.IsIntactIndexed()) { + ClassifyOperand(pregs, vregs, *base); + } + } + } + } + + if (vregs.empty()) { + return; + } + /* Set BBConflict and Pregveto */ + for (regno_t vregNO : vregs) { + for (regno_t conflictVregNO : vregs) { + if (conflictVregNO != vregNO) { + lrMap[vregNO]->SetConflictBitArrElem(conflictVregNO); + } + } + for (regno_t conflictPregNO : pregs) { + lrMap[vregNO]->InsertElemToPregveto(conflictPregNO); + } + } +} + +void GraphColorRegAllocator::UpdateOpndConflict(const Insn &insn, bool multiDef) +{ + /* if IsSpecialIntrinsic or IsAtomicStore, set conflicts for all opnds */ + if (insn.IsAtomicStore() || insn.IsSpecialIntrinsic()) { + SetOpndConflict(insn, false); + return; + } + if (multiDef) { + SetOpndConflict(insn, true); + } +} + +void GraphColorRegAllocator::ComputeLiveRangesForEachDefOperand(Insn &insn, bool &multiDef) +{ + uint32 numDefs = 0; + uint32 numUses = 0; + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.GetMachineOpcode() == MOP_asm && (i == kAsmOutputListOpnd || i == kAsmClobberListOpnd)) { + for (auto opnd : static_cast(insn.GetOperand(i)).GetOperands()) { + SetupLiveRangeByOp(*static_cast(opnd), insn, true, numUses); + ++numDefs; + } + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + if (!memOpnd.IsIntactIndexed()) { + SetupLiveRangeByOp(opnd, insn, true, numUses); + ++numDefs; + } + } + if (!md->GetOpndDes(i)->IsRegDef()) { + continue; + } + SetupLiveRangeByOp(opnd, insn, true, numUses); + ++numDefs; + } + DEBUG_ASSERT(numUses == 0, "should only be def opnd"); + if (numDefs > 1) { + multiDef = true; + needExtraSpillReg = true; + } +} + +void GraphColorRegAllocator::ComputeLiveRangesForEachUseOperand(Insn &insn) +{ + uint32 numUses = 0; + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.GetMachineOpcode() == MOP_asm && i == kAsmInputListOpnd) { + for (auto opnd : static_cast(insn.GetOperand(i)).GetOperands()) { + SetupLiveRangeByOp(*static_cast(opnd), insn, false, numUses); + } + continue; + } + if (md->GetOpndDes(i)->IsRegDef() && !md->GetOpndDes(i)->IsRegUse()) { + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + SetupLiveRangeByOp(*op, insn, false, numUses); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + SetupLiveRangeByOp(*base, insn, false, numUses); + } + if (offset != nullptr) { + SetupLiveRangeByOp(*offset, insn, false, numUses); + } + } else { + SetupLiveRangeByOp(opnd, insn, false, numUses); + } + } + if (numUses >= AArch64Abi::kNormalUseOperandNum || insn.GetMachineOpcode() == MOP_lazy_ldr) { + needExtraSpillReg = true; + } +} + +void GraphColorRegAllocator::ComputeLiveRangesUpdateIfInsnIsCall(const Insn &insn) +{ + if (!insn.IsCall()) { + return; + } + /* def the return value */ + pregLive.erase(R0); + pregLive.erase(V0); + + /* active the parametes */ + Operand &opnd1 = insn.GetOperand(1); + if (opnd1.IsList()) { + auto &srcOpnds = static_cast(opnd1); + for (auto regOpnd : srcOpnds.GetOperands()) { + DEBUG_ASSERT(!regOpnd->IsVirtualRegister(), "not be a virtual register"); + auto physicalReg = static_cast(regOpnd->GetRegisterNumber()); + (void)pregLive.insert(physicalReg); + } + } +} + +void GraphColorRegAllocator::ComputeLiveRangesUpdateLiveUnitInsnRange(BB &bb, uint32 currPoint) +{ + for (auto lin : bb.GetLiveInRegNO()) { + if (lin < kAllRegNum) { + continue; + } + LiveRange *lr = GetLiveRange(lin); + if (lr == nullptr) { + continue; + } + auto lu = lr->FindInLuMap(bb.GetId()); + DEBUG_ASSERT(lu != lr->EndOfLuMap(), "container empty check"); + if (bb.GetFirstInsn()) { + lu->second->SetBegin(bb.GetFirstInsn()->GetId()); + } else { + /* since bb is empty, then use pointer as is */ + lu->second->SetBegin(currPoint); + } + lu->second->SetBegin(lu->second->GetBegin() - 1); + } +} + +bool GraphColorRegAllocator::UpdateInsnCntAndSkipUseless(Insn &insn, uint32 &currPoint) const +{ + insn.SetId(currPoint); + if (insn.IsImmaterialInsn() || !insn.IsMachineInstruction()) { + --currPoint; + return true; + } + return false; +} + +void GraphColorRegAllocator::UpdateCallInfo(uint32 bbId, uint32 currPoint, const Insn &insn) +{ + auto *targetOpnd = insn.GetCallTargetOperand(); + CHECK_FATAL(targetOpnd != nullptr, "target is null in Insn::IsCallToFunctionThatNeverReturns"); + if (CGOptions::DoIPARA() && targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + DEBUG_ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + for (auto preg : func->GetReferedRegs()) { + if (AArch64Abi::IsCallerSaveReg(static_cast(preg))) { + for (auto vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->InsertElemToCallDef(preg); + } + } + } + } else { + for (auto vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->SetCrossCall(); + } + } + } else { + for (auto vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->SetCrossCall(); + } + } + for (auto vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->IncNumCall(); + lr->AddRef(bbId, currPoint, kIsCall); + + auto lu = lr->FindInLuMap(bbId); + if (lu != lr->EndOfLuMap()) { + lu->second->SetHasCall(true); + } + } +} + +void GraphColorRegAllocator::SetLrMustAssign(const RegOperand *regOpnd) +{ + regno_t regNO = regOpnd->GetRegisterNumber(); + LiveRange *lr = GetLiveRange(regNO); + if (lr != nullptr) { + lr->SetMustAssigned(); + lr->SetIsNonLocal(true); + } +} + +void GraphColorRegAllocator::SetupMustAssignedLiveRanges(const Insn &insn) +{ + if (!insn.IsSpecialIntrinsic()) { + return; + } + if (insn.GetMachineOpcode() == MOP_asm) { + for (auto regOpnd : static_cast(insn.GetOperand(kAsmOutputListOpnd)).GetOperands()) { + SetLrMustAssign(regOpnd); + } + for (auto regOpnd : static_cast(insn.GetOperand(kAsmInputListOpnd)).GetOperands()) { + SetLrMustAssign(regOpnd); + } + return; + } + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand *opnd = &insn.GetOperand(i); + if (!opnd->IsRegister()) { + continue; + } + auto regOpnd = static_cast(opnd); + SetLrMustAssign(regOpnd); + } +} + +/* + * For each succ bb->GetSuccs(), if bb->liveout - succ->livein is not empty, the vreg(s) is + * dead on this path (but alive on the other path as there is some use of it on the + * other path). This might be useful for optimization of reload placement later for + * splits (lr split into lr1 & lr2 and lr2 will need to reload.) + * Not for now though. + */ +void GraphColorRegAllocator::ComputeLiveRanges() +{ + bbVec.clear(); + bbVec.resize(cgFunc->NumBBs()); + + auto currPoint = static_cast(cgFunc->GetTotalNumberOfInstructions() + bfs->sortedBBs.size()); + /* distinguish use/def */ + CHECK_FATAL(currPoint < (INT_MAX >> 2), "integer overflow check"); + currPoint = currPoint << 2; + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + bbVec[bb->GetId()] = bb; + bb->SetLevel(bbIdx - 1); + + pregLive.clear(); + vregLive.clear(); + for (auto liveOut : bb->GetLiveOutRegNO()) { + SetupLiveRangeByRegNO(liveOut, *bb, currPoint); + } + --currPoint; + + if (bb->GetLastInsn() != nullptr && bb->GetLastInsn()->IsMachineInstruction() && bb->GetLastInsn()->IsCall()) { + UpdateCallInfo(bb->GetId(), currPoint, *bb->GetLastInsn()); + } + + FOR_BB_INSNS_REV_SAFE(insn, bb, ninsn) { +#ifdef MOVE_COALESCE + if ((insn->GetMachineOpcode() == MOP_xmovrr || insn->GetMachineOpcode() == MOP_wmovrr) && + (!AArch64isa::IsPhysicalRegister(static_cast(insn->GetOperand(0)).GetRegisterNumber())) && + (static_cast(insn->GetOperand(0)).GetRegisterNumber() == + static_cast(insn->GetOperand(1)).GetRegisterNumber())) { + bb->RemoveInsn(*insn); + continue; + } +#endif + if (UpdateInsnCntAndSkipUseless(*insn, currPoint)) { + if (ninsn && ninsn->IsMachineInstruction() && ninsn->IsCall()) { + UpdateCallInfo(bb->GetId(), currPoint, *ninsn); + } + continue; + } + + bool multiDef = false; + ComputeLiveRangesForEachDefOperand(*insn, multiDef); + ComputeLiveRangesForEachUseOperand(*insn); + + UpdateOpndConflict(*insn, multiDef); + SetupMustAssignedLiveRanges(*insn); + + if (ninsn && ninsn->IsMachineInstruction() && ninsn->IsCall()) { + UpdateCallInfo(bb->GetId(), currPoint - kInsnStep, *ninsn); + } + + ComputeLiveRangesUpdateIfInsnIsCall(*insn); + /* distinguish use/def */ + currPoint -= 2; + } + ComputeLiveRangesUpdateLiveUnitInsnRange(*bb, currPoint); + /* move one more step for each BB */ + --currPoint; + } + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "After ComputeLiveRanges\n"; + PrintLiveRanges(); +#ifdef USE_LRA + if (doLRA) { + PrintLocalRAInfo("After ComputeLiveRanges"); + } +#endif /* USE_LRA */ + } +} + +/* Create a common stack space for spilling with need_spill */ +MemOperand *GraphColorRegAllocator::CreateSpillMem(uint32 spillIdx, SpillMemCheck check) +{ + if (spillIdx >= spillMemOpnds.size()) { + return nullptr; + } + + if (operandSpilled[spillIdx]) { + /* For this insn, spill slot already used, need to find next available slot. */ + uint32 i; + for (i = spillIdx + 1; i < kSpillMemOpndNum; ++i) { + if (!operandSpilled[i]) { + break; + } + } + CHECK_FATAL(i < kSpillMemOpndNum, "no more available spill mem slot"); + spillIdx = i; + } + if (check == kSpillMemPost) { + operandSpilled[spillIdx] = true; + } + + if (spillMemOpnds[spillIdx] == nullptr) { + regno_t reg = cgFunc->NewVReg(kRegTyInt, sizeof(int64)); + auto *a64CGFunc = static_cast(cgFunc); + spillMemOpnds[spillIdx] = a64CGFunc->GetOrCreatSpillMem(reg); + } + return spillMemOpnds[spillIdx]; +} + +bool GraphColorRegAllocator::IsLocalReg(regno_t regNO) const +{ + LiveRange *lr = GetLiveRange(regNO); + if (lr == nullptr) { + LogInfo::MapleLogger() << "unexpected regNO" << regNO; + return true; + } + return IsLocalReg(*lr); +} + +bool GraphColorRegAllocator::IsLocalReg(const LiveRange &lr) const +{ + return !lr.GetSplitLr() && (lr.GetNumBBMembers() == 1) && !lr.IsNonLocal(); +} + +bool GraphColorRegAllocator::CheckOverlap(uint64 val, uint32 i, LiveRange &lr1, LiveRange &lr2) const +{ + regno_t lr1RegNO = lr1.GetRegNO(); + regno_t lr2RegNO = lr2.GetRegNO(); + for (uint32 x = 0; x < kU64; ++x) { + if ((val & (1ULL << x)) != 0) { + uint32 lastBitSet = i * kU64 + x; + /* + * begin and end should be in the bb info (LU) + * Need to rethink this if. + * Under some circumstance, lr->begin can occur after lr->end. + */ + auto lu1 = lr1.FindInLuMap(lastBitSet); + auto lu2 = lr2.FindInLuMap(lastBitSet); + if (lu1 != lr1.EndOfLuMap() && lu2 != lr2.EndOfLuMap() && + !((lu1->second->GetBegin() < lu2->second->GetBegin() && + lu1->second->GetEnd() < lu2->second->GetBegin()) || + (lu2->second->GetBegin() < lu1->second->GetEnd() && + lu2->second->GetEnd() < lu1->second->GetBegin()))) { + lr1.SetConflictBitArrElem(lr2RegNO); + lr2.SetConflictBitArrElem(lr1RegNO); + return true; + } + } + } + return false; +} + +void GraphColorRegAllocator::CheckInterference(LiveRange &lr1, LiveRange &lr2) const +{ + uint64 bitArr[bbBuckets]; + for (uint32 i = 0; i < bbBuckets; ++i) { + bitArr[i] = lr1.GetBBMember()[i] & lr2.GetBBMember()[i]; + } + + for (uint32 i = 0; i < bbBuckets; ++i) { + uint64 val = bitArr[i]; + if (val == 0) { + continue; + } + if (CheckOverlap(val, i, lr1, lr2)) { + break; + } + } +} + +void GraphColorRegAllocator::BuildInterferenceGraphSeparateIntFp(std::vector &intLrVec, + std::vector &fpLrVec) +{ + for (auto it : lrMap) { + LiveRange *lr = it.second; + if (lr->GetRegNO() == 0) { + continue; + } +#ifdef USE_LRA + if (doLRA && IsLocalReg(*lr)) { + continue; + } +#endif /* USE_LRA */ + if (lr->GetRegType() == kRegTyInt) { + intLrVec.emplace_back(lr); + } else if (lr->GetRegType() == kRegTyFloat) { + fpLrVec.emplace_back(lr); + } else { + DEBUG_ASSERT(false, "Illegal regType in BuildInterferenceGraph"); + LogInfo::MapleLogger() << "error: Illegal regType in BuildInterferenceGraph\n"; + } + } +} + +/* + * Based on intersection of LRs. When two LRs interfere, add to each other's + * interference list. + */ +void GraphColorRegAllocator::BuildInterferenceGraph() +{ + std::vector intLrVec; + std::vector fpLrVec; + BuildInterferenceGraphSeparateIntFp(intLrVec, fpLrVec); + + /* + * Once number of BB becomes larger for big functions, the checking for interferences + * takes significant long time. Taking advantage of unique bucket is one of strategies + * to avoid unnecessary computation + */ + auto lrSize = intLrVec.size(); + std::vector uniqueBucketIdx(lrSize); + for (uint32 i = 0; i < lrSize; i++) { + uint32 count = 0; + uint32 uniqueIdx; + LiveRange *lr = intLrVec[i]; + for (uint32 j = 0; j < bbBuckets; ++j) { + if (lr->GetBBMember()[j]) { + count++; + uniqueIdx = j; + } + } + if (count == 1) { + uniqueBucketIdx[i] = static_cast(uniqueIdx); + } else { + /* LR spans multiple buckets */ + DEBUG_ASSERT(count >= 1, "A live range can not be empty"); + uniqueBucketIdx[i] = -1; + } + } + + for (auto it1 = intLrVec.begin(); it1 != intLrVec.end(); ++it1) { + LiveRange *lr1 = *it1; + CalculatePriority(*lr1); + int32 lr1UniqueBucketIdx = uniqueBucketIdx[static_cast(std::distance(intLrVec.begin(), it1))]; + for (auto it2 = it1 + 1; it2 != intLrVec.end(); ++it2) { + LiveRange *lr2 = *it2; + if (lr1->GetRegNO() < lr2->GetRegNO()) { + int32 lr2UniqueBucketIdx = uniqueBucketIdx[static_cast(std::distance(intLrVec.begin(), it2))]; + if (lr1UniqueBucketIdx == -1 && lr2UniqueBucketIdx == -1) { + CheckInterference(*lr1, *lr2); + } else if (((lr1UniqueBucketIdx >= 0) && + lr1->GetBBMember()[lr1UniqueBucketIdx] & lr2->GetBBMember()[lr1UniqueBucketIdx]) || + ((lr2UniqueBucketIdx >= 0) && + lr1->GetBBMember()[lr2UniqueBucketIdx] & lr2->GetBBMember()[lr2UniqueBucketIdx])) { + CheckInterference(*lr1, *lr2); + } + } + } + } + + // Might need to do same as to intLrVec + for (auto it1 = fpLrVec.begin(); it1 != fpLrVec.end(); ++it1) { + LiveRange *lr1 = *it1; + CalculatePriority(*lr1); + for (auto it2 = it1 + 1; it2 != fpLrVec.end(); ++it2) { + LiveRange *lr2 = *it2; + if (lr1->GetRegNO() < lr2->GetRegNO()) { + CheckInterference(*lr1, *lr2); + } + } + } + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "After BuildInterferenceGraph\n"; + PrintLiveRanges(); + } +} + +void GraphColorRegAllocator::SetBBInfoGlobalAssigned(uint32 bbID, regno_t regNO) +{ + DEBUG_ASSERT(bbID < bbRegInfo.size(), "index out of range in GraphColorRegAllocator::SetBBInfoGlobalAssigned"); + BBAssignInfo *bbInfo = bbRegInfo[bbID]; + if (bbInfo == nullptr) { + bbInfo = memPool->New(alloc); + bbRegInfo[bbID] = bbInfo; + bbInfo->InitGlobalAssigned(); + } + bbInfo->InsertElemToGlobalsAssigned(regNO); +} + +bool GraphColorRegAllocator::HaveAvailableColor(const LiveRange &lr, uint32 num) const +{ + return ((lr.GetRegType() == kRegTyInt && num < intRegNum) || (lr.GetRegType() == kRegTyFloat && num < fpRegNum)); +} + +/* + * If the members on the interference list is less than #colors, then + * it can be trivially assigned a register. Otherwise it is constrained. + * Separate the LR based on if it is contrained or not. + * + * The unconstrained LRs are colored last. + * + * Compute a sorted list of constrained LRs based on priority cost. + */ +void GraphColorRegAllocator::Separate() +{ + for (auto it : lrMap) { + LiveRange *lr = it.second; +#ifdef USE_LRA + if (doLRA && IsLocalReg(*lr)) { + continue; + } +#endif /* USE_LRA */ +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog && ((lr->GetNumDefs() <= 1) && (lr->GetNumUses() <= 1) && (lr->GetNumCall() > 0)) && + (lr->GetFrequency() <= (cgFunc->GetFirstBB()->GetFrequency() << 1))) { + if (lr->GetRegType() == kRegTyInt) { + intDelayed.emplace_back(lr); + } else { + fpDelayed.emplace_back(lr); + } + continue; + } +#endif /* OPTIMIZE_FOR_PROLOG */ + if (lr->GetRematLevel() != rematOff) { + unconstrained.emplace_back(lr); + } else if (HaveAvailableColor(*lr, lr->GetNumBBConflicts() + static_cast(lr->GetPregvetoSize()) + + static_cast(lr->GetForbiddenSize()))) { + if (lr->GetPrefs().size()) { + unconstrainedPref.emplace_back(lr); + } else { + unconstrained.emplace_back(lr); + } + } else if (lr->IsMustAssigned()) { + mustAssigned.emplace_back(lr); + } else { + if (lr->GetPrefs().size() && lr->GetNumCall() == 0) { + unconstrainedPref.emplace_back(lr); + } else { + constrained.emplace_back(lr); + } + } + } + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "Unconstrained : "; + for (auto lr : unconstrainedPref) { + LogInfo::MapleLogger() << lr->GetRegNO() << " "; + } + for (auto lr : unconstrained) { + LogInfo::MapleLogger() << lr->GetRegNO() << " "; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "Constrained : "; + for (auto lr : constrained) { + LogInfo::MapleLogger() << lr->GetRegNO() << " "; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "mustAssigned : "; + for (auto lr : mustAssigned) { + LogInfo::MapleLogger() << lr->GetRegNO() << " "; + } + LogInfo::MapleLogger() << "\n"; + } +} + +MapleVector::iterator GraphColorRegAllocator::GetHighPriorityLr(MapleVector &lrSet) const +{ + auto it = lrSet.begin(); + auto highestIt = it; + LiveRange *startLr = *it; + float maxPrio = startLr->GetPriority(); + ++it; + for (; it != lrSet.end(); ++it) { + LiveRange *lr = *it; + if (lr->GetPriority() > maxPrio) { + maxPrio = lr->GetPriority(); + highestIt = it; + } + } + return highestIt; +} + +void GraphColorRegAllocator::UpdateForbiddenForNeighbors(const LiveRange &lr) const +{ + auto updateForbidden = [&lr, this](regno_t regNO) { + LiveRange *newLr = GetLiveRange(regNO); + DEBUG_ASSERT(newLr != nullptr, "newLr should not be nullptr"); + if (!newLr->GetPregveto(lr.GetAssignedRegNO())) { + newLr->InsertElemToForbidden(lr.GetAssignedRegNO()); + } + }; + ForEachRegArrElem(lr.GetBBConflict(), updateForbidden); +} + +void GraphColorRegAllocator::UpdatePregvetoForNeighbors(const LiveRange &lr) const +{ + auto updatePregveto = [&lr, this](regno_t regNO) { + LiveRange *newLr = GetLiveRange(regNO); + DEBUG_ASSERT(newLr != nullptr, "newLr should not be nullptr"); + newLr->InsertElemToPregveto(lr.GetAssignedRegNO()); + newLr->EraseElemFromForbidden(lr.GetAssignedRegNO()); + }; + ForEachRegArrElem(lr.GetBBConflict(), updatePregveto); +} + +/* + * For cases with only one def/use and crosses a call. + * It might be more beneficial to spill vs save/restore in prolog/epilog. + * But if the callee register is already used, then it is ok to reuse it again. + * Or in certain cases, just use the callee. + */ +bool GraphColorRegAllocator::ShouldUseCallee(LiveRange &lr, const MapleSet &calleeUsed, + const MapleVector &delayed) const +{ + if (FindIn(calleeUsed, lr.GetAssignedRegNO())) { + return true; + } + if (AArch64Abi::IsCalleeSavedReg(static_cast(lr.GetAssignedRegNO())) && + (calleeUsed.size() % kDivide2) != 0) { + return true; + } + if (delayed.size() > 1 && calleeUsed.empty()) { + /* If there are more than 1 vreg that can benefit from callee, use callee */ + return true; + } + lr.SetAssignedRegNO(0); + return false; +} + +void GraphColorRegAllocator::AddCalleeUsed(regno_t regNO, RegType regType) +{ + DEBUG_ASSERT(AArch64isa::IsPhysicalRegister(regNO), "regNO should be physical register"); + bool isCalleeReg = AArch64Abi::IsCalleeSavedReg(static_cast(regNO)); + if (isCalleeReg) { + if (regType == kRegTyInt) { + (void)intCalleeUsed.insert(regNO); + } else { + (void)fpCalleeUsed.insert(regNO); + } + } +} + +regno_t GraphColorRegAllocator::FindColorForLr(const LiveRange &lr) const +{ + regno_t reg = 0; + regno_t base; + RegType regType = lr.GetRegType(); + const MapleSet *currRegSet = nullptr; + const MapleSet *nextRegSet = nullptr; + if (regType == kRegTyInt) { + if (lr.GetNumCall() != 0) { + currRegSet = &intCalleeRegSet; + nextRegSet = &intCallerRegSet; + } else { + currRegSet = &intCallerRegSet; + nextRegSet = &intCalleeRegSet; + } + base = R0; + } else { + if (lr.GetNumCall() != 0) { + currRegSet = &fpCalleeRegSet; + nextRegSet = &fpCallerRegSet; + } else { + currRegSet = &fpCallerRegSet; + nextRegSet = &fpCalleeRegSet; + } + base = V0; + } + +#ifdef MOVE_COALESCE + if (lr.GetNumCall() == 0 || (lr.GetNumDefs() + lr.GetNumUses() <= 2)) { + for (const auto &it : lr.GetPrefs()) { + reg = it + base; + if ((FindIn(*currRegSet, reg) || FindIn(*nextRegSet, reg)) && !lr.GetForbidden(reg) && + !lr.GetPregveto(reg)) { + return reg; + } + } + } +#endif /* MOVE_COALESCE */ + for (const auto &it : *currRegSet) { + reg = it + base; + if (!lr.GetForbidden(reg) && !lr.GetPregveto(reg)) { + return reg; + } + } + /* Failed to allocate in first choice. Try 2nd choice. */ + for (const auto &it : *nextRegSet) { + reg = it + base; + if (!lr.GetForbidden(reg) && !lr.GetPregveto(reg)) { + return reg; + } + } + DEBUG_ASSERT(false, "Failed to find a register"); + return 0; +} + +regno_t GraphColorRegAllocator::TryToAssignCallerSave(const LiveRange &lr) const +{ + regno_t base; + RegType regType = lr.GetRegType(); + const MapleSet *currRegSet = nullptr; + if (regType == kRegTyInt) { + currRegSet = &intCallerRegSet; + base = R0; + } else { + currRegSet = &fpCallerRegSet; + base = V0; + } + + regno_t reg = 0; +#ifdef MOVE_COALESCE + if (lr.GetNumCall() == 0 || (lr.GetNumDefs() + lr.GetNumUses() <= 2)) { + for (const auto &it : lr.GetPrefs()) { + reg = it + base; + if ((FindIn(*currRegSet, reg)) && !lr.GetForbidden(reg) && !lr.GetPregveto(reg) && !lr.GetCallDef(reg)) { + return reg; + } + } + } +#endif /* MOVE_COALESCE */ + for (const auto &it : *currRegSet) { + reg = it + base; + if (!lr.GetForbidden(reg) && !lr.GetPregveto(reg) && !lr.GetCallDef(reg)) { + return reg; + } + } + return 0; +} + +/* + * If forbidden list has more registers than max of all BB's local reg + * requirement, then LR can be colored. + * Update LR's color if success, return true, else return false. + */ +bool GraphColorRegAllocator::AssignColorToLr(LiveRange &lr, bool isDelayed) +{ + if (lr.GetAssignedRegNO() > 0) { + /* Already assigned. */ + return true; + } + if (!HaveAvailableColor(lr, lr.GetForbiddenSize() + lr.GetPregvetoSize())) { + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "assigned fail to R" << lr.GetRegNO() << "\n"; + } + return false; + } + regno_t callerSaveReg = 0; + regno_t reg = FindColorForLr(lr); + if (lr.GetNumCall() != 0 && !lr.GetCrossCall()) { + callerSaveReg = TryToAssignCallerSave(lr); + bool prefCaller = AArch64Abi::IsCalleeSavedReg(static_cast(reg)) && + intCalleeUsed.find(reg) == intCalleeUsed.end() && + fpCalleeUsed.find(reg) == fpCalleeUsed.end(); + if (callerSaveReg != 0 && (prefCaller || !AArch64Abi::IsCalleeSavedReg(static_cast(reg)))) { + reg = callerSaveReg; + lr.SetNumCall(0); + } + } + lr.SetAssignedRegNO(reg); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "assigned " << lr.GetAssignedRegNO() << " to R" << lr.GetRegNO() << "\n"; + } + if (lr.GetAssignedRegNO() == 0) { + return false; + } +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog && isDelayed) { + if ((lr.GetRegType() == kRegTyInt && !ShouldUseCallee(lr, intCalleeUsed, intDelayed)) || + (lr.GetRegType() == kRegTyFloat && !ShouldUseCallee(lr, fpCalleeUsed, fpDelayed))) { + return false; + } + } +#endif /* OPTIMIZE_FOR_PROLOG */ + + AddCalleeUsed(lr.GetAssignedRegNO(), lr.GetRegType()); + + UpdateForbiddenForNeighbors(lr); + ForEachBBArrElem(lr.GetBBMember(), + [&lr, this](uint32 bbID) { SetBBInfoGlobalAssigned(bbID, lr.GetAssignedRegNO()); }); + return true; +} + +void GraphColorRegAllocator::PruneLrForSplit(LiveRange &lr, BB &bb, bool remove, + std::set &candidateInLoop, + std::set &defInLoop) +{ + if (bb.GetInternalFlag1()) { + /* already visited */ + return; + } + + bb.SetInternalFlag1(true); + auto lu = lr.FindInLuMap(bb.GetId()); + uint32 defNum = 0; + uint32 useNum = 0; + if (lu != lr.EndOfLuMap()) { + defNum = lu->second->GetDefNum(); + useNum = lu->second->GetUseNum(); + } + + if (remove) { + /* In removal mode, has not encountered a ref yet. */ + if (defNum == 0 && useNum == 0) { + if (bb.GetLoop() != nullptr && FindIn(candidateInLoop, bb.GetLoop())) { + /* + * Upward search has found a loop. Regardless of def/use + * The loop members must be included in the new LR. + */ + remove = false; + } else { + /* No ref in this bb. mark as potential remove. */ + bb.SetInternalFlag2(true); + return; + } + } else { + /* found a ref, no more removal of bb and preds. */ + remove = false; + } + } + + if (bb.GetLoop() != nullptr) { + /* With a def in loop, cannot prune that loop */ + if (defNum > 0) { + (void)defInLoop.insert(bb.GetLoop()); + } + /* bb in loop, need to make sure of loop carried dependency */ + (void)candidateInLoop.insert(bb.GetLoop()); + } + for (auto pred : bb.GetPreds()) { + if (FindNotIn(bb.GetLoopPreds(), pred)) { + PruneLrForSplit(lr, *pred, remove, candidateInLoop, defInLoop); + } + } + for (auto pred : bb.GetEhPreds()) { + if (FindNotIn(bb.GetLoopPreds(), pred)) { + PruneLrForSplit(lr, *pred, remove, candidateInLoop, defInLoop); + } + } +} + +void GraphColorRegAllocator::FindBBSharedInSplit(LiveRange &lr, + const std::set &candidateInLoop, + std::set &defInLoop) +{ + /* A loop might be split into two. Need to see over the entire LR if there is a def in the loop. */ + auto FindBBSharedFunc = [&lr, &candidateInLoop, &defInLoop, this](uint32 bbID) { + BB *bb = bbVec[bbID]; + if (bb->GetLoop() != nullptr && FindIn(candidateInLoop, bb->GetLoop())) { + auto lu = lr.FindInLuMap(bb->GetId()); + if (lu != lr.EndOfLuMap() && lu->second->GetDefNum() > 0) { + (void)defInLoop.insert(bb->GetLoop()); + } + } + }; + ForEachBBArrElem(lr.GetBBMember(), FindBBSharedFunc); +} + +/* + * Backward traversal of the top part of the split LR. + * Prune the part of the LR that has no downward exposing references. + * Take into account of loops and loop carried dependencies. + * The candidate bb to be removed, if in a loop, store that info. + * If a LR crosses a loop, even if the loop has no def/use, it must + * be included in the new LR. + */ +void GraphColorRegAllocator::ComputeBBForNewSplit(LiveRange &newLr, LiveRange &origLr) +{ + /* + * The candidate bb to be removed, if in a loop, store that info. + * If a LR crosses a loop, even if the loop has no def/use, it must + * be included in the new LR. + */ + std::set candidateInLoop; + /* If a bb has a def and is in a loop, store that info. */ + std::set defInLoop; + std::set smember; + ForEachBBArrElem(newLr.GetBBMember(), [this, &smember](uint32 bbID) { (void)smember.insert(bbVec[bbID]); }); + for (auto bbIt = smember.rbegin(); bbIt != smember.rend(); ++bbIt) { + BB *bb = *bbIt; + if (bb->GetInternalFlag1() != 0) { + continue; + } + PruneLrForSplit(newLr, *bb, true, candidateInLoop, defInLoop); + } + FindBBSharedInSplit(origLr, candidateInLoop, defInLoop); + auto pruneTopLr = [this, &newLr, &candidateInLoop, &defInLoop](uint32 bbID) { + BB *bb = bbVec[bbID]; + if (bb->GetInternalFlag2() != 0) { + if (bb->GetLoop() != nullptr && FindIn(candidateInLoop, bb->GetLoop())) { + return; + } + if (bb->GetLoop() != nullptr || FindNotIn(defInLoop, bb->GetLoop())) { + /* defInLoop should be a subset of candidateInLoop. remove. */ + newLr.UnsetMemberBitArrElem(bbID); + } + } + }; + ForEachBBArrElem(newLr.GetBBMember(), pruneTopLr); /* prune the top LR. */ +} + +bool GraphColorRegAllocator::UseIsUncovered(const BB &bb, const BB &startBB, std::vector &visitedBB) +{ + CHECK_FATAL(bb.GetId() < visitedBB.size(), "index out of range"); + visitedBB[bb.GetId()] = true; + for (auto pred : bb.GetPreds()) { + if (visitedBB[pred->GetId()]) { + continue; + } + if (pred->GetLevel() <= startBB.GetLevel()) { + return true; + } + if (UseIsUncovered(*pred, startBB, visitedBB)) { + return true; + } + } + for (auto pred : bb.GetEhPreds()) { + if (visitedBB[pred->GetId()]) { + continue; + } + if (pred->GetLevel() <= startBB.GetLevel()) { + return true; + } + if (UseIsUncovered(*pred, startBB, visitedBB)) { + return true; + } + } + return false; +} + +void GraphColorRegAllocator::FindUseForSplit(LiveRange &lr, SplitBBInfo &bbInfo, bool &remove, + std::set &candidateInLoop, + std::set &defInLoop) +{ + BB *bb = bbInfo.GetCandidateBB(); + const BB *startBB = bbInfo.GetStartBB(); + if (bb->GetInternalFlag1() != 0) { + /* already visited */ + return; + } + for (auto pred : bb->GetPreds()) { + if (pred->GetInternalFlag1() == 0) { + return; + } + } + for (auto pred : bb->GetEhPreds()) { + if (pred->GetInternalFlag1() == 0) { + return; + } + } + + bb->SetInternalFlag1(true); + auto lu = lr.FindInLuMap(bb->GetId()); + uint32 defNum = 0; + uint32 useNum = 0; + if (lu != lr.EndOfLuMap()) { + defNum = lu->second->GetDefNum(); + useNum = lu->second->GetUseNum(); + } + + std::vector visitedBB(cgFunc->GetAllBBs().size(), false); + if (remove) { + /* In removal mode, has not encountered a ref yet. */ + if (defNum == 0 && useNum == 0) { + /* No ref in this bb. mark as potential remove. */ + bb->SetInternalFlag2(true); + if (bb->GetLoop() != nullptr) { + /* bb in loop, need to make sure of loop carried dependency */ + (void)candidateInLoop.insert(bb->GetLoop()); + } + } else { + /* found a ref, no more removal of bb and preds. */ + remove = false; + /* A potential point for a upward exposing use. (might be a def). */ + lu->second->SetNeedReload(true); + } + } else if ((defNum > 0 || useNum > 0) && UseIsUncovered(*bb, *startBB, visitedBB)) { + lu->second->SetNeedReload(true); + } + + /* With a def in loop, cannot prune that loop */ + if (bb->GetLoop() != nullptr && defNum > 0) { + (void)defInLoop.insert(bb->GetLoop()); + } + + for (auto succ : bb->GetSuccs()) { + if (FindNotIn(bb->GetLoopSuccs(), succ)) { + bbInfo.SetCandidateBB(*succ); + FindUseForSplit(lr, bbInfo, remove, candidateInLoop, defInLoop); + } + } + for (auto succ : bb->GetEhSuccs()) { + if (FindNotIn(bb->GetLoopSuccs(), succ)) { + bbInfo.SetCandidateBB(*succ); + FindUseForSplit(lr, bbInfo, remove, candidateInLoop, defInLoop); + } + } +} + +void GraphColorRegAllocator::ClearLrBBFlags(const std::set &member) const +{ + for (auto bb : member) { + bb->SetInternalFlag1(0); + bb->SetInternalFlag2(0); + for (auto pred : bb->GetPreds()) { + pred->SetInternalFlag1(0); + pred->SetInternalFlag2(0); + } + for (auto pred : bb->GetEhPreds()) { + pred->SetInternalFlag1(0); + pred->SetInternalFlag2(0); + } + } +} + +/* + * Downward traversal of the bottom part of the split LR. + * Prune the part of the LR that has no upward exposing references. + * Take into account of loops and loop carried dependencies. + */ +void GraphColorRegAllocator::ComputeBBForOldSplit(LiveRange &newLr, LiveRange &origLr) +{ + /* The candidate bb to be removed, if in a loop, store that info. */ + std::set candidateInLoop; + /* If a bb has a def and is in a loop, store that info. */ + std::set defInLoop; + SplitBBInfo bbInfo; + bool remove = true; + + std::set smember; + ForEachBBArrElem(origLr.GetBBMember(), [this, &smember](uint32 bbID) { (void)smember.insert(bbVec[bbID]); }); + ClearLrBBFlags(smember); + for (auto bb : smember) { + if (bb->GetInternalFlag1() != 0) { + continue; + } + for (auto pred : bb->GetPreds()) { + pred->SetInternalFlag1(true); + } + for (auto pred : bb->GetEhPreds()) { + pred->SetInternalFlag1(true); + } + bbInfo.SetCandidateBB(*bb); + bbInfo.SetStartBB(*bb); + FindUseForSplit(origLr, bbInfo, remove, candidateInLoop, defInLoop); + } + FindBBSharedInSplit(newLr, candidateInLoop, defInLoop); + auto pruneLrFunc = [&origLr, &defInLoop, this](uint32 bbID) { + BB *bb = bbVec[bbID]; + if (bb->GetInternalFlag2() != 0) { + if (bb->GetLoop() != nullptr && FindNotIn(defInLoop, bb->GetLoop())) { + origLr.UnsetMemberBitArrElem(bbID); + } + } + }; + ForEachBBArrElem(origLr.GetBBMember(), pruneLrFunc); +} + +/* + * There is at least one available color for this BB from the neighbors + * minus the ones reserved for local allocation. + * bbAdded : The new BB to be added into the split LR if color is available. + * conflictRegs : Reprent the LR before adding the bbAdded. These are the + * forbidden regs before adding the new BBs. + * Side effect : Adding the new forbidden regs from bbAdded into + * conflictRegs if the LR can still be colored. + */ +bool GraphColorRegAllocator::LrCanBeColored(const LiveRange &lr, const BB &bbAdded, + std::unordered_set &conflictRegs) +{ + RegType type = lr.GetRegType(); + + std::unordered_set newConflict; + auto updateConflictFunc = [&bbAdded, &conflictRegs, &newConflict, &lr, this](regno_t regNO) { + /* check the real conflict in current bb */ + LiveRange *conflictLr = lrMap[regNO]; + /* + * If the bb to be added to the new LR has an actual + * conflict with another LR, and if that LR has already + * assigned a color that is not in the conflictRegs, + * then add it as a newConflict. + */ + if (IsBitArrElemSet(conflictLr->GetBBMember(), bbAdded.GetId())) { + regno_t confReg = conflictLr->GetAssignedRegNO(); + if ((confReg > 0) && FindNotIn(conflictRegs, confReg) && !lr.GetPregveto(confReg)) { + (void)newConflict.insert(confReg); + } + } else if (conflictLr->GetSplitLr() != nullptr && + IsBitArrElemSet(conflictLr->GetSplitLr()->GetBBMember(), bbAdded.GetId())) { + /* + * The after split LR is split into pieces, and this ensures + * the after split color is taken into consideration. + */ + regno_t confReg = conflictLr->GetSplitLr()->GetAssignedRegNO(); + if ((confReg > 0) && FindNotIn(conflictRegs, confReg) && !lr.GetPregveto(confReg)) { + (void)newConflict.insert(confReg); + } + } + }; + ForEachRegArrElem(lr.GetBBConflict(), updateConflictFunc); + + size_t numRegs = newConflict.size() + lr.GetPregvetoSize() + conflictRegs.size(); + + bool canColor = false; + if (type == kRegTyInt) { + if (numRegs < intRegNum) { + canColor = true; + } + } else if (numRegs < fpRegNum) { + canColor = true; + } + + if (canColor) { + for (auto regNO : newConflict) { + (void)conflictRegs.insert(regNO); + } + } + + /* Update all the registers conflicting when adding thew new bb. */ + return canColor; +} + +/* Support function for LR split. Move one BB from LR1 to LR2. */ +void GraphColorRegAllocator::MoveLrBBInfo(LiveRange &oldLr, LiveRange &newLr, BB &bb) const +{ + /* initialize backward traversal flag for the bb pruning phase */ + bb.SetInternalFlag1(false); + /* initialize bb removal marker */ + bb.SetInternalFlag2(false); + /* Insert BB into new LR */ + uint32 bbID = bb.GetId(); + newLr.SetMemberBitArrElem(bbID); + + /* Move LU from old LR to new LR */ + auto luIt = oldLr.FindInLuMap(bb.GetId()); + if (luIt != oldLr.EndOfLuMap()) { + newLr.SetElemToLuMap(luIt->first, *(luIt->second)); + oldLr.EraseLuMap(luIt); + } + + /* Remove BB from old LR */ + oldLr.UnsetMemberBitArrElem(bbID); +} + +/* Is the set of loops inside the loop? */ +bool GraphColorRegAllocator::ContainsLoop(const CGFuncLoops &loop, + const std::set &loops) const +{ + for (const CGFuncLoops *lp : loops) { + while (lp != nullptr) { + if (lp == &loop) { + return true; + } + lp = lp->GetOuterLoop(); + } + } + return false; +} + +void GraphColorRegAllocator::GetAllLrMemberLoops(LiveRange &lr, std::set &loops) +{ + auto GetLrMemberFunc = [&loops, this](uint32 bbID) { + BB *bb = bbVec[bbID]; + CGFuncLoops *loop = bb->GetLoop(); + if (loop != nullptr) { + (void)loops.insert(loop); + } + }; + ForEachBBArrElem(lr.GetBBMember(), GetLrMemberFunc); +} + +bool GraphColorRegAllocator::SplitLrShouldSplit(LiveRange &lr) +{ + if (lr.GetSplitLr() != nullptr || lr.GetNumBBMembers() == 1) { + return false; + } + /* Need to split within the same hierarchy */ + uint32 loopID = 0xFFFFFFFF; /* loopID is initialized the maximum value,and then be assigned in function */ + bool needSplit = true; + auto setNeedSplit = [&needSplit, &loopID, this](uint32 bbID) -> bool { + BB *bb = bbVec[bbID]; + if (loopID == 0xFFFFFFFF) { + if (bb->GetLoop() != nullptr) { + loopID = static_cast(bb->GetLoop()->GetHeader()->GetId()); + } else { + loopID = 0; + } + } else if ((bb->GetLoop() != nullptr && bb->GetLoop()->GetHeader()->GetId() != loopID) || + (bb->GetLoop() == nullptr && loopID != 0)) { + needSplit = false; + return true; + } + return false; + }; + ForEachBBArrElemWithInterrupt(lr.GetBBMember(), setNeedSplit); + return needSplit; +} + +/* + * When a BB in the LR has no def or use in it, then potentially + * there is no conflict within these BB for the new LR, since + * the new LR will need to spill the defs which terminates the + * new LR unless there is a use later which extends the new LR. + * There is no need to compute conflicting register set unless + * there is a def or use. + * It is assumed that the new LR is extended to the def or use. + * Initially newLr is empty, then add bb if can be colored. + * Return true if there is a split. + */ +bool GraphColorRegAllocator::SplitLrFindCandidateLr(LiveRange &lr, LiveRange &newLr, + std::unordered_set &conflictRegs) +{ + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "start split lr for vreg " << lr.GetRegNO() << "\n"; + } + std::set smember; + ForEachBBArrElem(lr.GetBBMember(), [&smember, this](uint32 bbID) { (void)smember.insert(bbVec[bbID]); }); + for (auto bb : smember) { + if (!LrCanBeColored(lr, *bb, conflictRegs)) { + break; + } + MoveLrBBInfo(lr, newLr, *bb); + } + + /* return ture if split is successful */ + return newLr.GetNumBBMembers() != 0; +} + +void GraphColorRegAllocator::SplitLrHandleLoops(LiveRange &lr, LiveRange &newLr, + const std::set &origLoops, + const std::set &newLoops) +{ + /* + * bb in loops might need a reload due to loop carried dependency. + * Compute this before pruning the LRs. + * if there is no re-definition, then reload is not necessary. + * Part of the new LR region after the last reference is + * no longer in the LR. Remove those bb. + */ + ComputeBBForNewSplit(newLr, lr); + + /* With new LR, recompute conflict. */ + auto recomputeConflict = [&lr, &newLr, this](uint32 bbID) { + auto lrFunc = [&newLr, &bbID, this](regno_t regNO) { + LiveRange *confLrVec = lrMap[regNO]; + if (IsBitArrElemSet(confLrVec->GetBBMember(), bbID) || + (confLrVec->GetSplitLr() != nullptr && IsBitArrElemSet(confLrVec->GetSplitLr()->GetBBMember(), bbID))) { + /* + * New LR getting the interference does not mean the + * old LR can remove the interference. + * Old LR's interference will be handled at the end of split. + */ + newLr.SetConflictBitArrElem(regNO); + } + }; + ForEachRegArrElem(lr.GetBBConflict(), lrFunc); + }; + ForEachBBArrElem(newLr.GetBBMember(), recomputeConflict); + + /* update bb/loop same as for new LR. */ + ComputeBBForOldSplit(newLr, lr); + /* Update the conflict interference for the original LR later. */ + for (auto loop : newLoops) { + if (!ContainsLoop(*loop, origLoops)) { + continue; + } + for (auto bb : loop->GetLoopMembers()) { + if (!IsBitArrElemSet(newLr.GetBBMember(), bb->GetId())) { + continue; + } + LiveUnit *lu = newLr.GetLiveUnitFromLuMap(bb->GetId()); + if (lu->GetUseNum() != 0) { + lu->SetNeedReload(true); + } + } + } +} + +void GraphColorRegAllocator::SplitLrFixNewLrCallsAndRlod(LiveRange &newLr, + const std::set &origLoops) +{ + /* If a 2nd split loop is before the bb in 1st split bb. */ + newLr.SetNumCall(0); + auto fixCallsAndRlod = [&newLr, &origLoops, this](uint32 bbID) { + BB *bb = bbVec[bbID]; + for (auto loop : origLoops) { + if (loop->GetHeader()->GetLevel() >= bb->GetLevel()) { + continue; + } + LiveUnit *lu = newLr.GetLiveUnitFromLuMap(bbID); + if (lu->GetUseNum() != 0) { + lu->SetNeedReload(true); + } + } + LiveUnit *lu = newLr.GetLiveUnitFromLuMap(bbID); + if (lu->HasCall()) { + newLr.IncNumCall(); + } + }; + ForEachBBArrElem(newLr.GetBBMember(), fixCallsAndRlod); +} + +void GraphColorRegAllocator::SplitLrFixOrigLrCalls(LiveRange &lr) const +{ + lr.SetNumCall(0); + auto fixOrigCalls = [&lr](uint32 bbID) { + LiveUnit *lu = lr.GetLiveUnitFromLuMap(bbID); + if (lu->HasCall()) { + lr.IncNumCall(); + } + }; + ForEachBBArrElem(lr.GetBBMember(), fixOrigCalls); +} + +void GraphColorRegAllocator::SplitLrUpdateInterference(LiveRange &lr) +{ + /* + * newLr is now a separate LR from the original lr. + * Update the interference info. + * Also recompute the forbidden info + */ + lr.ClearForbidden(); + auto updateInterfrence = [&lr, this](regno_t regNO) { + LiveRange *confLrVec = lrMap[regNO]; + if (IsBBsetOverlap(lr.GetBBMember(), confLrVec->GetBBMember(), bbBuckets)) { + /* interfere */ + if (confLrVec->GetAssignedRegNO() && !lr.GetPregveto(confLrVec->GetAssignedRegNO())) { + lr.InsertElemToForbidden(confLrVec->GetAssignedRegNO()); + } + } else { + /* no interference */ + lr.UnsetConflictBitArrElem(regNO); + } + }; + ForEachRegArrElem(lr.GetBBConflict(), updateInterfrence); +} + +void GraphColorRegAllocator::SplitLrUpdateRegInfo(const LiveRange &origLr, LiveRange &newLr, + std::unordered_set &conflictRegs) const +{ + for (regno_t regNO = kInvalidRegNO; regNO < kMaxRegNum; ++regNO) { + if (origLr.GetPregveto(regNO)) { + newLr.InsertElemToPregveto(regNO); + } + } + for (auto regNO : conflictRegs) { + if (!newLr.GetPregveto(regNO)) { + newLr.InsertElemToForbidden(regNO); + } + } +} + +void GraphColorRegAllocator::SplitLrErrorCheckAndDebug(const LiveRange &origLr) const +{ + if (origLr.GetNumBBMembers() == 0) { + DEBUG_ASSERT(origLr.GetNumBBConflicts() == 0, "Error: member and conflict not match"); + } +} + +/* + * Pick a starting BB, then expand to maximize the new LR. + * Return the new LR. + */ +void GraphColorRegAllocator::SplitLr(LiveRange &lr) +{ + if (!SplitLrShouldSplit(lr)) { + return; + } + LiveRange *newLr = NewLiveRange(); + /* + * For the new LR, whenever a BB with either a def or + * use is added, then add the registers that the neighbor + * is using to the conflict register set indicating that these + * registers cannot be used for the new LR's color. + */ + std::unordered_set conflictRegs; + if (!SplitLrFindCandidateLr(lr, *newLr, conflictRegs)) { + return; + } +#ifdef REUSE_SPILLMEM + /* Copy the original conflict vector for spill reuse optimization */ + lr.SetOldConflict(memPool->NewArray(regBuckets)); + for (uint32 i = 0; i < regBuckets; ++i) { + lr.SetBBConflictElem(static_cast(i), lr.GetBBConflictElem(static_cast(i))); + } +#endif /* REUSE_SPILLMEM */ + + std::set newLoops; + std::set origLoops; + GetAllLrMemberLoops(*newLr, newLoops); + GetAllLrMemberLoops(lr, origLoops); + SplitLrHandleLoops(lr, *newLr, origLoops, newLoops); + SplitLrFixNewLrCallsAndRlod(*newLr, origLoops); + SplitLrFixOrigLrCalls(lr); + + SplitLrUpdateRegInfo(lr, *newLr, conflictRegs); + + CalculatePriority(lr); + /* At this point, newLr should be unconstrained. */ + lr.SetSplitLr(*newLr); + + newLr->SetRegNO(lr.GetRegNO()); + newLr->SetRegType(lr.GetRegType()); + newLr->SetID(lr.GetID()); + newLr->CopyRematerialization(lr); + CalculatePriority(*newLr); + SplitLrUpdateInterference(lr); + newLr->SetAssignedRegNO(FindColorForLr(*newLr)); + + AddCalleeUsed(newLr->GetAssignedRegNO(), newLr->GetRegType()); + + /* For the new LR, update assignment for local RA */ + ForEachBBArrElem(newLr->GetBBMember(), + [&newLr, this](uint32 bbID) { SetBBInfoGlobalAssigned(bbID, newLr->GetAssignedRegNO()); }); + + UpdatePregvetoForNeighbors(*newLr); + + SplitLrErrorCheckAndDebug(lr); +} + +void GraphColorRegAllocator::ColorForOptPrologEpilog() +{ +#ifdef OPTIMIZE_FOR_PROLOG + if (!doOptProlog) { + return; + } + for (auto lr : intDelayed) { + if (!AssignColorToLr(*lr, true)) { + lr->SetSpilled(true); + } + } + for (auto lr : fpDelayed) { + if (!AssignColorToLr(*lr, true)) { + lr->SetSpilled(true); + } + } +#endif +} + +/* + * From the sorted list of constrained LRs, pick the most profitable LR. + * Split the LR into LRnew1 LRnew2 where LRnew1 has the maximum number of + * BB and is colorable. + * The starting BB for traversal must have a color available. + * + * Assign a color, update neighbor's forbidden list. + * + * Update the conflict graph by change the interference list. + * In the case of both LRnew1 and LRnew2 conflicts with a BB, this BB's + * #neightbors increased. If this BB was unconstrained, must check if + * it is still unconstrained. Move to constrained if necessary. + * + * Color the unconstrained LRs. + */ +void GraphColorRegAllocator::SplitAndColorForEachLr(MapleVector &targetLrVec) +{ + while (!targetLrVec.empty()) { + auto highestIt = GetHighPriorityLr(targetLrVec); + LiveRange *lr = *highestIt; + /* check those lrs in lr->sconflict which is in unconstrained whether it turns to constrined */ + if (highestIt != targetLrVec.end()) { + targetLrVec.erase(highestIt); + } else { + DEBUG_ASSERT(false, "Error: not in targetLrVec"); + } + if (AssignColorToLr(*lr)) { + continue; + } +#ifdef USE_SPLIT + SplitLr(*lr); +#endif /* USE_SPLIT */ + /* + * When LR is spilled, it potentially has no conflicts as + * each def/use is spilled/reloaded. + */ +#ifdef COLOR_SPLIT + if (!AssignColorToLr(*lr)) { +#endif /* COLOR_SPLIT */ + lr->SetSpilled(true); + hasSpill = true; +#ifdef COLOR_SPLIT + } +#endif /* COLOR_SPLIT */ + } +} + +void GraphColorRegAllocator::SplitAndColor() +{ + /* handle mustAssigned */ + if (GCRA_DUMP) { + LogInfo::MapleLogger() << " starting mustAssigned : \n"; + } + SplitAndColorForEachLr(mustAssigned); + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << " starting unconstrainedPref : \n"; + } + /* assign color for unconstained */ + SplitAndColorForEachLr(unconstrainedPref); + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << " starting constrained : \n"; + } + /* handle constrained */ + SplitAndColorForEachLr(constrained); + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << " starting unconstrained : \n"; + } + /* assign color for unconstained */ + SplitAndColorForEachLr(unconstrained); + +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog) { + ColorForOptPrologEpilog(); + } +#endif /* OPTIMIZE_FOR_PROLOG */ +} + +void GraphColorRegAllocator::HandleLocalRegAssignment(regno_t regNO, LocalRegAllocator &localRa, bool isInt) +{ + /* vreg, get a reg for it if not assigned already. */ + if (!localRa.IsInRegAssigned(regNO, isInt) && !localRa.isInRegSpilled(regNO, isInt)) { + /* find an available phys reg */ + bool founded = false; + LiveRange *lr = lrMap[regNO]; + regno_t maxIntReg = R0 + MaxIntPhysRegNum(); + regno_t maxFpReg = V0 + MaxFloatPhysRegNum(); + regno_t startReg = isInt ? R0 : V0; + regno_t endReg = isInt ? maxIntReg : maxFpReg; + for (uint32 preg = startReg; preg <= endReg; ++preg) { + if (!localRa.IsPregAvailable(preg, isInt)) { + continue; + } + if (lr->GetNumCall() != 0 && !AArch64Abi::IsCalleeSavedReg(static_cast(preg))) { + continue; + } + if (lr->GetPregveto(preg)) { + continue; + } + regno_t assignedReg = preg; + localRa.ClearPregs(assignedReg, isInt); + localRa.SetPregUsed(assignedReg, isInt); + localRa.SetRegAssigned(regNO, isInt); + localRa.SetRegAssignmentMap(isInt, regNO, assignedReg); + lr->SetAssignedRegNO(assignedReg); + founded = true; + break; + } + if (!founded) { + localRa.SetRegSpilled(regNO, isInt); + lr->SetSpilled(true); + } + } +} + +void GraphColorRegAllocator::UpdateLocalRegDefUseCount(regno_t regNO, LocalRegAllocator &localRa, bool isDef, + bool isInt) const +{ + auto usedIt = localRa.GetUseInfo().find(regNO); + if (usedIt != localRa.GetUseInfo().end() && !isDef) { + /* reg use, decrement count */ + DEBUG_ASSERT(usedIt->second > 0, "Incorrect local ra info"); + localRa.SetUseInfoElem(regNO, usedIt->second - 1); + if (!AArch64isa::IsPhysicalRegister(static_cast(regNO)) && localRa.IsInRegAssigned(regNO, isInt)) { + localRa.IncUseInfoElem(localRa.GetRegAssignmentItem(isInt, regNO)); + } + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\t\treg " << regNO << " update #use to " << localRa.GetUseInfoElem(regNO) + << "\n"; + } + } + + auto defIt = localRa.GetDefInfo().find(regNO); + if (defIt != localRa.GetDefInfo().end() && isDef) { + /* reg def, decrement count */ + DEBUG_ASSERT(defIt->second > 0, "Incorrect local ra info"); + localRa.SetDefInfoElem(regNO, defIt->second - 1); + if (!AArch64isa::IsPhysicalRegister(static_cast(regNO)) && localRa.IsInRegAssigned(regNO, isInt)) { + localRa.IncDefInfoElem(localRa.GetRegAssignmentItem(isInt, regNO)); + } + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\t\treg " << regNO << " update #def to " << localRa.GetDefInfoElem(regNO) + << "\n"; + } + } +} + +void GraphColorRegAllocator::UpdateLocalRegConflict(regno_t regNO, LocalRegAllocator &localRa, bool isInt) +{ + LiveRange *lr = lrMap[regNO]; + if (lr->GetNumBBConflicts() == 0) { + return; + } + if (!localRa.IsInRegAssigned(regNO, isInt)) { + return; + } + regno_t preg = localRa.GetRegAssignmentItem(isInt, regNO); + ForEachRegArrElem(lr->GetBBConflict(), [&preg, this](regno_t regNO) { lrMap[regNO]->InsertElemToPregveto(preg); }); +} + +void GraphColorRegAllocator::HandleLocalRaDebug(regno_t regNO, const LocalRegAllocator &localRa, bool isInt) const +{ + LogInfo::MapleLogger() << "HandleLocalReg " << regNO << "\n"; + LogInfo::MapleLogger() << "\tregUsed:"; + uint64 regUsed = localRa.GetPregUsed(isInt); + regno_t base = isInt ? R0 : V0; + regno_t end = isInt ? (RLR - R0) : (V31 - V0); + + for (uint32 i = 0; i <= end; ++i) { + if ((regUsed & (1ULL << i)) != 0) { + LogInfo::MapleLogger() << " " << (i + base); + } + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "\tregs:"; + uint64 regs = localRa.GetPregs(isInt); + for (uint32 regnoInLoop = 0; regnoInLoop <= end; ++regnoInLoop) { + if ((regs & (1ULL << regnoInLoop)) != 0) { + LogInfo::MapleLogger() << " " << (regnoInLoop + base); + } + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::HandleLocalReg(Operand &op, LocalRegAllocator &localRa, const BBAssignInfo *bbInfo, + bool isDef, bool isInt) +{ + if (!op.IsRegister()) { + return; + } + auto ®Opnd = static_cast(op); + regno_t regNO = regOpnd.GetRegisterNumber(); + + if (IsUnconcernedReg(regOpnd)) { + return; + } + + /* is this a local register ? */ + if (regNO >= kAllRegNum && !IsLocalReg(regNO)) { + return; + } + + if (GCRA_DUMP) { + HandleLocalRaDebug(regNO, localRa, isInt); + } + + if (regOpnd.IsPhysicalRegister()) { + /* conflict with preg is record in lr->pregveto and BBAssignInfo->globalsAssigned */ + UpdateLocalRegDefUseCount(regNO, localRa, isDef, isInt); + /* See if it is needed by global RA */ + if (localRa.GetUseInfoElem(regNO) == 0 && localRa.GetDefInfoElem(regNO) == 0) { + if (bbInfo && !bbInfo->GetGlobalsAssigned(regNO)) { + /* This phys reg is now available for assignment for a vreg */ + localRa.SetPregs(regNO, isInt); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\t\tlast ref, phys-reg " << regNO << " now available\n"; + } + } + } + } else { + HandleLocalRegAssignment(regNO, localRa, isInt); + UpdateLocalRegDefUseCount(regNO, localRa, isDef, isInt); + UpdateLocalRegConflict(regNO, localRa, isInt); + if (localRa.GetUseInfoElem(regNO) == 0 && localRa.GetDefInfoElem(regNO) == 0 && + localRa.IsInRegAssigned(regNO, isInt)) { + /* last ref of vreg, release assignment */ + localRa.SetPregs(localRa.GetRegAssignmentItem(isInt, regNO), isInt); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\t\tlast ref, release reg " << localRa.GetRegAssignmentItem(isInt, regNO) + << " for " << regNO << "\n"; + } + } + } +} + +void GraphColorRegAllocator::LocalRaRegSetEraseReg(LocalRegAllocator &localRa, regno_t regNO) const +{ + bool isInt = AArch64isa::IsGPRegister(static_cast(regNO)); + if (localRa.IsPregAvailable(regNO, isInt)) { + localRa.ClearPregs(regNO, isInt); + } +} + +bool GraphColorRegAllocator::LocalRaInitRegSet(LocalRegAllocator &localRa, uint32 bbID) +{ + bool needLocalRa = false; + /* Note physical regs start from R0, V0. */ + localRa.InitPregs(MaxIntPhysRegNum(), MaxFloatPhysRegNum(), cgFunc->GetCG()->GenYieldPoint(), intSpillRegSet, + fpSpillRegSet); + + localRa.ClearUseInfo(); + localRa.ClearDefInfo(); + LocalRaInfo *lraInfo = localRegVec[bbID]; + DEBUG_ASSERT(lraInfo != nullptr, "lraInfo not be nullptr"); + for (const auto &useCntPair : lraInfo->GetUseCnt()) { + regno_t regNO = useCntPair.first; + if (regNO >= kAllRegNum) { + needLocalRa = true; + } + localRa.SetUseInfoElem(useCntPair.first, useCntPair.second); + } + for (const auto &defCntPair : lraInfo->GetDefCnt()) { + regno_t regNO = defCntPair.first; + if (regNO >= kAllRegNum) { + needLocalRa = true; + } + localRa.SetDefInfoElem(defCntPair.first, defCntPair.second); + } + return needLocalRa; +} + +void GraphColorRegAllocator::LocalRaInitAllocatableRegs(LocalRegAllocator &localRa, uint32 bbID) +{ + BBAssignInfo *bbInfo = bbRegInfo[bbID]; + if (bbInfo != nullptr) { + for (regno_t regNO = kInvalidRegNO; regNO < kMaxRegNum; ++regNO) { + if (bbInfo->GetGlobalsAssigned(regNO)) { + LocalRaRegSetEraseReg(localRa, regNO); + } + } + } +} + +void GraphColorRegAllocator::LocalRaForEachDefOperand(const Insn &insn, LocalRegAllocator &localRa, + const BBAssignInfo *bbInfo) +{ + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + /* handle def opnd */ + if (!md->GetOpndDes(i)->IsRegDef()) { + continue; + } + auto ®Opnd = static_cast(opnd); + bool isInt = (regOpnd.GetRegisterType() == kRegTyInt); + HandleLocalReg(opnd, localRa, bbInfo, true, isInt); + } +} + +void GraphColorRegAllocator::LocalRaForEachUseOperand(const Insn &insn, LocalRegAllocator &localRa, + const BBAssignInfo *bbInfo) +{ + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + continue; + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + HandleLocalReg(*base, localRa, bbInfo, false, true); + } + if (!memOpnd.IsIntactIndexed()) { + HandleLocalReg(*base, localRa, bbInfo, true, true); + } + if (offset != nullptr) { + HandleLocalReg(*offset, localRa, bbInfo, false, true); + } + } else if (md->GetOpndDes(i)->IsRegUse()) { + auto ®Opnd = static_cast(opnd); + bool isInt = (regOpnd.GetRegisterType() == kRegTyInt); + HandleLocalReg(opnd, localRa, bbInfo, false, isInt); + } + } +} + +void GraphColorRegAllocator::LocalRaPrepareBB(BB &bb, LocalRegAllocator &localRa) +{ + BBAssignInfo *bbInfo = bbRegInfo[bb.GetId()]; + FOR_BB_INSNS(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + + /* + * Use reverse operand order, assuming use first then def for allocation. + * need to free the use resource so it can be reused for def. + */ + LocalRaForEachUseOperand(*insn, localRa, bbInfo); + LocalRaForEachDefOperand(*insn, localRa, bbInfo); + } +} + +void GraphColorRegAllocator::LocalRaFinalAssignment(const LocalRegAllocator &localRa, BBAssignInfo &bbInfo) +{ + for (const auto &intRegAssignmentMapPair : localRa.GetIntRegAssignmentMap()) { + regno_t regNO = intRegAssignmentMapPair.second; + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "[" << intRegAssignmentMapPair.first << "," << regNO << "],"; + } + /* Might need to get rid of this copy. */ + bbInfo.SetRegMapElem(intRegAssignmentMapPair.first, regNO); + AddCalleeUsed(regNO, kRegTyInt); + } + for (const auto &fpRegAssignmentMapPair : localRa.GetFpRegAssignmentMap()) { + regno_t regNO = fpRegAssignmentMapPair.second; + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "[" << fpRegAssignmentMapPair.first << "," << regNO << "],"; + } + /* Might need to get rid of this copy. */ + bbInfo.SetRegMapElem(fpRegAssignmentMapPair.first, regNO); + AddCalleeUsed(regNO, kRegTyFloat); + } +} + +void GraphColorRegAllocator::LocalRaDebug(const BB &bb, const LocalRegAllocator &localRa) const +{ + LogInfo::MapleLogger() << "bb " << bb.GetId() << " local ra INT need " << localRa.GetNumIntPregUsed() << " regs\n"; + LogInfo::MapleLogger() << "bb " << bb.GetId() << " local ra FP need " << localRa.GetNumFpPregUsed() << " regs\n"; + LogInfo::MapleLogger() << "\tpotential assignments:"; + for (auto it : localRa.GetIntRegAssignmentMap()) { + LogInfo::MapleLogger() << "[" << it.first << "," << it.second << "],"; + } + for (auto it : localRa.GetFpRegAssignmentMap()) { + LogInfo::MapleLogger() << "[" << it.first << "," << it.second << "],"; + } + LogInfo::MapleLogger() << "\n"; +} + +/* + * When do_allocate is false, it is prepass: + * Traverse each BB, keep track of the number of registers required + * for local registers in the BB. Communicate this to global RA. + * + * When do_allocate is true: + * Allocate local registers for each BB based on unused registers + * from global RA. Spill if no register available. + */ +void GraphColorRegAllocator::LocalRegisterAllocator(bool doAllocate) +{ + if (GCRA_DUMP) { + if (doAllocate) { + LogInfo::MapleLogger() << "LRA allocation start\n"; + PrintBBAssignInfo(); + } else { + LogInfo::MapleLogger() << "LRA preprocessing start\n"; + } + } + LocalRegAllocator *localRa = memPool->New(*cgFunc, alloc); + for (auto *bb : bfs->sortedBBs) { + uint32 bbID = bb->GetId(); + + LocalRaInfo *lraInfo = localRegVec[bb->GetId()]; + if (lraInfo == nullptr) { + /* No locals to allocate */ + continue; + } + + localRa->ClearLocalRaInfo(); + bool needLocalRa = LocalRaInitRegSet(*localRa, bbID); + if (!needLocalRa) { + /* Only physical regs in bb, no local ra needed. */ + continue; + } + + if (doAllocate) { + LocalRaInitAllocatableRegs(*localRa, bbID); + } + + LocalRaPrepareBB(*bb, *localRa); + + BBAssignInfo *bbInfo = bbRegInfo[bb->GetId()]; + if (bbInfo == nullptr) { + bbInfo = memPool->New(alloc); + bbRegInfo[bbID] = bbInfo; + bbInfo->InitGlobalAssigned(); + } + bbInfo->SetIntLocalRegsNeeded(localRa->GetNumIntPregUsed()); + bbInfo->SetFpLocalRegsNeeded(localRa->GetNumFpPregUsed()); + + if (doAllocate) { + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\tbb(" << bb->GetId() << ")final local ra assignments:"; + } + LocalRaFinalAssignment(*localRa, *bbInfo); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\n"; + } + } else if (GCRA_DUMP) { + LocalRaDebug(*bb, *localRa); + } + } +} + +MemOperand *GraphColorRegAllocator::GetConsistentReuseMem(const uint64 *conflict, + const std::set &usedMemOpnd, uint32 size, + RegType regType) +{ + std::set sconflict; + regno_t regNO; + for (uint32 i = 0; i < regBuckets; ++i) { + for (uint32 b = 0; b < kU64; ++b) { + if ((conflict[i] & (1ULL << b)) != 0) { + continue; + } + regNO = i * kU64 + b; + if (regNO >= numVregs) { + break; + } + if (GetLiveRange(regNO) != nullptr) { + (void)sconflict.insert(lrMap[regNO]); + } + } + } + + for (auto *noConflictLr : sconflict) { + if (noConflictLr == nullptr || noConflictLr->GetRegType() != regType || noConflictLr->GetSpillSize() != size) { + continue; + } + if (usedMemOpnd.find(noConflictLr->GetSpillMem()) == usedMemOpnd.end()) { + return noConflictLr->GetSpillMem(); + } + } + return nullptr; +} + +MemOperand *GraphColorRegAllocator::GetCommonReuseMem(const uint64 *conflict, const std::set &usedMemOpnd, + uint32 size, RegType regType) +{ + regno_t regNO; + for (uint32 i = 0; i < regBuckets; ++i) { + for (uint32 b = 0; b < kU64; ++b) { + if ((conflict[i] & (1ULL << b)) != 0) { + continue; + } + regNO = i * kU64 + b; + if (regNO >= numVregs) { + break; + } + LiveRange *noConflictLr = GetLiveRange(regNO); + if (noConflictLr == nullptr || noConflictLr->GetRegType() != regType || + noConflictLr->GetSpillSize() != size) { + continue; + } + if (usedMemOpnd.find(noConflictLr->GetSpillMem()) == usedMemOpnd.end()) { + return noConflictLr->GetSpillMem(); + } + } + } + return nullptr; +} + +/* See if any of the non-conflict LR is spilled and use its memOpnd. */ +MemOperand *GraphColorRegAllocator::GetReuseMem(uint32 vregNO, uint32 size, RegType regType) +{ + if (cgFunc->GetMirModule().GetSrcLang() != kSrcLangC) { + return nullptr; + } + if (IsLocalReg(vregNO)) { + return nullptr; + } + + LiveRange *lr = lrMap[vregNO]; + const uint64 *conflict; + if (lr->GetSplitLr() != nullptr) { + /* + * For split LR, the vreg liveness is optimized, but for spill location + * the stack location needs to be maintained for the entire LR. + */ + return nullptr; + } else { + conflict = lr->GetBBConflict(); + } + + std::set usedMemOpnd; + auto updateMemOpnd = [&usedMemOpnd, this](regno_t regNO) { + if (regNO >= numVregs) { + return; + } + LiveRange *lrInner = GetLiveRange(regNO); + if (lrInner && lrInner->GetSpillMem() != nullptr) { + (void)usedMemOpnd.insert(lrInner->GetSpillMem()); + } + }; + ForEachRegArrElem(conflict, updateMemOpnd); + uint32 regSize = (size <= k32) ? k32 : k64; + /* + * This is to order the search so memOpnd given out is consistent. + * When vreg#s do not change going through VtableImpl.mpl file + * then this can be simplified. + */ +#ifdef CONSISTENT_MEMOPND + return GetConsistentReuseMem(conflict, usedMemOpnd, regSize, regType); +#else /* CONSISTENT_MEMOPND */ + return GetCommonReuseMem(conflict, usedMemOpnd, regSize, regType); +#endif /* CONSISTENT_MEMOPNDi */ +} + +MemOperand *GraphColorRegAllocator::GetSpillMem(uint32 vregNO, bool isDest, Insn &insn, AArch64reg regNO, + bool &isOutOfRange) const +{ + auto *a64CGFunc = static_cast(cgFunc); + MemOperand *memOpnd = a64CGFunc->GetOrCreatSpillMem(vregNO); + return (a64CGFunc->AdjustMemOperandIfOffsetOutOfRange(memOpnd, vregNO, isDest, insn, regNO, isOutOfRange)); +} + +void GraphColorRegAllocator::SpillOperandForSpillPre(Insn &insn, const Operand &opnd, RegOperand &phyOpnd, + uint32 spillIdx, bool needSpill) +{ + if (!needSpill) { + return; + } + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + LiveRange *lr = lrMap[regNO]; + + auto *a64CGFunc = static_cast(cgFunc); + + MemOperand *spillMem = CreateSpillMem(spillIdx, kSpillMemPre); + DEBUG_ASSERT(spillMem != nullptr, "spillMem nullptr check"); + + uint32 regSize = regOpnd.GetSize(); + PrimType stype; + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyInt) { + stype = (regSize <= k32) ? PTY_i32 : PTY_i64; + } else { + stype = (regSize <= k32) ? PTY_f32 : PTY_f64; + } + + if (a64CGFunc->IsImmediateOffsetOutOfRange(*spillMem, k64)) { + regno_t pregNO = R16; + spillMem = + &a64CGFunc->SplitOffsetWithAddInstruction(*spillMem, k64, static_cast(pregNO), false, &insn); + } + Insn &stInsn = + cgFunc->GetInsnBuilder()->BuildInsn(a64CGFunc->PickStInsn(spillMem->GetSize(), stype), phyOpnd, *spillMem); + std::string comment = " SPILL for spill vreg: " + std::to_string(regNO) + " op:" + kOpcodeInfo.GetName(lr->GetOp()); + stInsn.SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, stInsn); +} + +void GraphColorRegAllocator::SpillOperandForSpillPost(Insn &insn, const Operand &opnd, RegOperand &phyOpnd, + uint32 spillIdx, bool needSpill) +{ + if (!needSpill) { + return; + } + + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + LiveRange *lr = lrMap[regNO]; + auto *a64CGFunc = static_cast(cgFunc); + bool isLastInsn = false; + if (insn.GetBB()->GetKind() == BB::kBBIf && insn.GetBB()->IsLastInsn(&insn)) { + isLastInsn = true; + } + + if (lr->GetRematLevel() != rematOff) { + std::string comment = " REMATERIALIZE for spill vreg: " + std::to_string(regNO); + if (isLastInsn) { + for (auto tgtBB : insn.GetBB()->GetSuccs()) { + std::vector rematInsns = lr->Rematerialize(a64CGFunc, phyOpnd); + for (auto &&remat : rematInsns) { + remat->SetComment(comment); + tgtBB->InsertInsnBegin(*remat); + } + } + } else { + std::vector rematInsns = lr->Rematerialize(a64CGFunc, phyOpnd); + for (auto &&remat : rematInsns) { + remat->SetComment(comment); + insn.GetBB()->InsertInsnAfter(insn, *remat); + } + } + return; + } + + MemOperand *spillMem = CreateSpillMem(spillIdx, kSpillMemPost); + DEBUG_ASSERT(spillMem != nullptr, "spillMem nullptr check"); + + uint32 regSize = regOpnd.GetSize(); + PrimType stype; + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyInt) { + stype = (regSize <= k32) ? PTY_i32 : PTY_i64; + } else { + stype = (regSize <= k32) ? PTY_f32 : PTY_f64; + } + + bool isOutOfRange = false; + Insn *nextInsn = insn.GetNextMachineInsn(); + if (a64CGFunc->IsImmediateOffsetOutOfRange(*spillMem, k64)) { + regno_t pregNO = R16; + spillMem = + &a64CGFunc->SplitOffsetWithAddInstruction(*spillMem, k64, static_cast(pregNO), true, &insn); + isOutOfRange = true; + } + std::string comment = + " RELOAD for spill vreg: " + std::to_string(regNO) + " op:" + kOpcodeInfo.GetName(lr->GetOp()); + if (isLastInsn) { + for (auto tgtBB : insn.GetBB()->GetSuccs()) { + MOperator mOp = a64CGFunc->PickLdInsn(spillMem->GetSize(), stype); + Insn *newLd = &cgFunc->GetInsnBuilder()->BuildInsn(mOp, phyOpnd, *spillMem); + newLd->SetComment(comment); + tgtBB->InsertInsnBegin(*newLd); + } + } else { + MOperator mOp = a64CGFunc->PickLdInsn(spillMem->GetSize(), stype); + Insn &ldrInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, phyOpnd, *spillMem); + ldrInsn.SetComment(comment); + if (isOutOfRange) { + if (nextInsn == nullptr) { + insn.GetBB()->AppendInsn(ldrInsn); + } else { + insn.GetBB()->InsertInsnBefore(*nextInsn, ldrInsn); + } + } else { + insn.GetBB()->InsertInsnAfter(insn, ldrInsn); + } + } +} + +MemOperand *GraphColorRegAllocator::GetSpillOrReuseMem(LiveRange &lr, uint32 regSize, bool &isOutOfRange, Insn &insn, + bool isDef) +{ + (void)regSize; + MemOperand *memOpnd = nullptr; + if (lr.GetSpillMem() != nullptr) { + /* the saved memOpnd cannot be out-of-range */ + memOpnd = lr.GetSpillMem(); + } else { +#ifdef REUSE_SPILLMEM + memOpnd = GetReuseMem(lr.GetRegNO(), regSize, lr.GetRegType()); + if (memOpnd != nullptr) { + lr.SetSpillMem(*memOpnd); + lr.SetSpillSize((regSize <= k32) ? k32 : k64); + } else { +#endif /* REUSE_SPILLMEM */ + regno_t baseRegNO; + if (!isDef && lr.GetRegNO() == kRegTyInt) { + /* src will use its' spill reg as baseRegister when offset out-of-range + * add x16, x29, #max-offset //out-of-range + * ldr x16, [x16, #offset] //reload + * mov xd, x16 + */ + baseRegNO = lr.GetSpillReg(); + if (baseRegNO > RLAST_INT_REG) { + baseRegNO = R16; + } + } else { + /* dest will use R16 as baseRegister when offset out-of-range + * mov x16, xs + * add x17, x29, #max-offset //out-of-range + * str x16, [x17, #offset] //spill + */ + baseRegNO = R16; + } + DEBUG_ASSERT(baseRegNO != kRinvalid, "invalid base register number"); + memOpnd = GetSpillMem(lr.GetRegNO(), isDef, insn, static_cast(baseRegNO), isOutOfRange); + /* dest's spill reg can only be R15 and R16 () */ + if (isOutOfRange && isDef) { + DEBUG_ASSERT(lr.GetSpillReg() != R16, "can not find valid memopnd's base register"); + } +#ifdef REUSE_SPILLMEM + if (isOutOfRange == 0) { + lr.SetSpillMem(*memOpnd); + lr.SetSpillSize((regSize <= k32) ? k32 : k64); + } + } +#endif /* REUSE_SPILLMEM */ + } + return memOpnd; +} + +/* + * Create spill insn for the operand. + * When need_spill is true, need to spill the spill operand register first + * then use it for the current spill, then reload it again. + */ +Insn *GraphColorRegAllocator::SpillOperand(Insn &insn, const Operand &opnd, bool isDef, RegOperand &phyOpnd, + bool forCall) +{ + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + uint32 pregNO = phyOpnd.GetRegisterNumber(); + bool isCalleeReg = AArch64Abi::IsCalleeSavedReg(static_cast(pregNO)); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "SpillOperand " << regNO << "\n"; + } + LiveRange *lr = lrMap[regNO]; + bool isForCallerSave = lr->GetSplitLr() == nullptr && lr->GetNumCall() && !isCalleeReg; + uint32 regSize = regOpnd.GetSize(); + bool isOutOfRange = false; + PrimType stype; + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyInt) { + stype = (regSize <= k32) ? PTY_i32 : PTY_i64; + } else { + stype = (regSize <= k32) ? PTY_f32 : PTY_f64; + } + auto *a64CGFunc = static_cast(cgFunc); + + Insn *spillDefInsn = nullptr; + if (isDef) { + if (lr->GetRematLevel() == rematOff) { + lr->SetSpillReg(pregNO); + Insn *nextInsn = insn.GetNextMachineInsn(); + MemOperand *memOpnd = GetSpillOrReuseMem(*lr, regSize, isOutOfRange, insn, forCall ? false : true); + spillDefInsn = + &cgFunc->GetInsnBuilder()->BuildInsn(a64CGFunc->PickStInsn(regSize, stype), phyOpnd, *memOpnd); + spillDefInsn->SetIsSpill(); + std::string comment = " SPILL vreg: " + std::to_string(regNO) + " op:" + kOpcodeInfo.GetName(lr->GetOp()); + if (isForCallerSave) { + comment += " for caller save in BB " + std::to_string(insn.GetBB()->GetId()); + } + spillDefInsn->SetComment(comment); + if (forCall) { + insn.GetBB()->InsertInsnBefore(insn, *spillDefInsn); + } else if (isOutOfRange) { + if (nextInsn == nullptr) { + insn.GetBB()->AppendInsn(*spillDefInsn); + } else { + insn.GetBB()->InsertInsnBefore(*nextInsn, *spillDefInsn); + } + } else if (insn.GetNext() && insn.GetNext()->GetMachineOpcode() == MOP_clinit_tail) { + insn.GetBB()->InsertInsnAfter(*insn.GetNext(), *spillDefInsn); + } else { + insn.GetBB()->InsertInsnAfter(insn, *spillDefInsn); + } + } + + if ((insn.GetMachineOpcode() != MOP_xmovkri16) && (insn.GetMachineOpcode() != MOP_wmovkri16)) { + return spillDefInsn; + } + } + if (insn.GetMachineOpcode() == MOP_clinit_tail) { + return nullptr; + } + Insn *nextInsn = insn.GetNextMachineInsn(); + lr->SetSpillReg(pregNO); + + std::vector spillUseInsns; + std::string comment; + if (lr->GetRematLevel() != rematOff) { + spillUseInsns = lr->Rematerialize(a64CGFunc, phyOpnd); + comment = " REMATERIALIZE vreg: " + std::to_string(regNO); + } else { + MemOperand *memOpnd = GetSpillOrReuseMem(*lr, regSize, isOutOfRange, insn, forCall ? true : false); + Insn &spillUseInsn = + cgFunc->GetInsnBuilder()->BuildInsn(a64CGFunc->PickLdInsn(regSize, stype), phyOpnd, *memOpnd); + spillUseInsn.SetIsReload(); + spillUseInsns.push_back(&spillUseInsn); + comment = " RELOAD vreg: " + std::to_string(regNO) + " op:" + kOpcodeInfo.GetName(lr->GetOp()); + } + if (isForCallerSave) { + comment += " for caller save in BB " + std::to_string(insn.GetBB()->GetId()); + } + for (auto &&spillUseInsn : spillUseInsns) { + spillUseInsn->SetComment(comment); + if (forCall) { + if (nextInsn == nullptr) { + insn.GetBB()->AppendInsn(*spillUseInsn); + } else { + insn.GetBB()->InsertInsnBefore(*nextInsn, *spillUseInsn); + } + } else { + insn.GetBB()->InsertInsnBefore(insn, *spillUseInsn); + } + } + if (spillDefInsn != nullptr) { + return spillDefInsn; + } + return &insn; +} + +/* Try to find available reg for spill. */ +bool GraphColorRegAllocator::SetAvailableSpillReg(std::unordered_set &cannotUseReg, LiveRange &lr, + uint64 &usedRegMask) +{ + bool isInt = (lr.GetRegType() == kRegTyInt); + regno_t base = isInt ? R0 : V0; + uint32 pregInterval = isInt ? 0 : (V0 - R30); + MapleSet &callerRegSet = isInt ? intCallerRegSet : fpCallerRegSet; + MapleSet &calleeRegSet = isInt ? intCalleeRegSet : fpCalleeRegSet; + + for (const auto &it : callerRegSet) { + regno_t spillReg = it + base; + if (cannotUseReg.find(spillReg) == cannotUseReg.end() && + (usedRegMask & (1ULL << (spillReg - pregInterval))) == 0) { + lr.SetAssignedRegNO(spillReg); + usedRegMask |= 1ULL << (spillReg - pregInterval); + return true; + } + } + for (const auto &it : calleeRegSet) { + regno_t spillReg = it + base; + if (cannotUseReg.find(spillReg) == cannotUseReg.end() && + (usedRegMask & (1ULL << (spillReg - pregInterval))) == 0) { + lr.SetAssignedRegNO(spillReg); + usedRegMask |= 1ULL << (spillReg - pregInterval); + return true; + } + } + return false; +} + +void GraphColorRegAllocator::CollectCannotUseReg(std::unordered_set &cannotUseReg, const LiveRange &lr, + Insn &insn) +{ + /* Find the bb in the conflict LR that actually conflicts with the current bb. */ + for (regno_t regNO = kRinvalid; regNO < kMaxRegNum; ++regNO) { + if (lr.GetPregveto(regNO)) { + (void)cannotUseReg.insert(regNO); + } + } + auto updateCannotUse = [&insn, &cannotUseReg, this](regno_t regNO) { + LiveRange *conflictLr = lrMap[regNO]; + /* + * conflictLr->GetAssignedRegNO() might be zero + * caller save will be inserted so the assigned reg can be released actually + */ + if ((conflictLr->GetAssignedRegNO() > 0) && IsBitArrElemSet(conflictLr->GetBBMember(), insn.GetBB()->GetId())) { + if (!AArch64Abi::IsCalleeSavedReg(static_cast(conflictLr->GetAssignedRegNO())) && + conflictLr->GetNumCall() && !conflictLr->GetProcessed()) { + return; + } + (void)cannotUseReg.insert(conflictLr->GetAssignedRegNO()); + } + }; + ForEachRegArrElem(lr.GetBBConflict(), updateCannotUse); +#ifdef USE_LRA + if (!doLRA) { + return; + } + BBAssignInfo *bbInfo = bbRegInfo[insn.GetBB()->GetId()]; + if (bbInfo != nullptr) { + for (const auto ®MapPair : bbInfo->GetRegMap()) { + (void)cannotUseReg.insert(regMapPair.second); + } + } +#endif /* USE_LRA */ +} + +regno_t GraphColorRegAllocator::PickRegForSpill(uint64 &usedRegMask, RegType regType, uint32 spillIdx, + bool &needSpillLr) +{ + regno_t base; + regno_t spillReg; + uint32 pregInterval; + bool isIntReg = (regType == kRegTyInt); + if (isIntReg) { + base = R0; + pregInterval = 0; + } else { + base = V0; + pregInterval = V0 - R30; + } + + if (JAVALANG) { + /* Use predetermined spill register */ + MapleSet &spillRegSet = isIntReg ? intSpillRegSet : fpSpillRegSet; + DEBUG_ASSERT(spillIdx < spillRegSet.size(), "spillIdx large than spillRegSet.size()"); + auto regNumIt = spillRegSet.begin(); + for (; spillIdx > 0; --spillIdx) { + ++regNumIt; + } + spillReg = *regNumIt + base; + return spillReg; + } + + /* Temporary find a unused reg to spill */ + uint32 maxPhysRegNum = isIntReg ? MaxIntPhysRegNum() : MaxFloatPhysRegNum(); + for (spillReg = (maxPhysRegNum + base); spillReg > base; --spillReg) { + if (spillReg >= k64BitSize) { + spillReg = k64BitSize - 1; + } + if ((usedRegMask & (1ULL << (spillReg - pregInterval))) == 0) { + usedRegMask |= (1ULL << (spillReg - pregInterval)); + needSpillLr = true; + return spillReg; + } + } + + DEBUG_ASSERT(false, "can not find spillReg"); + return 0; +} + +/* return true if need extra spill */ +bool GraphColorRegAllocator::SetRegForSpill(LiveRange &lr, Insn &insn, uint32 spillIdx, uint64 &usedRegMask, bool isDef) +{ + std::unordered_set cannotUseReg; + /* SPILL COALESCE */ + if (!isDef && (insn.GetMachineOpcode() == MOP_xmovrr || insn.GetMachineOpcode() == MOP_wmovrr)) { + auto &ropnd = static_cast(insn.GetOperand(0)); + if (ropnd.IsPhysicalRegister()) { + lr.SetAssignedRegNO(ropnd.GetRegisterNumber()); + return false; + } + } + + CollectCannotUseReg(cannotUseReg, lr, insn); + + if (SetAvailableSpillReg(cannotUseReg, lr, usedRegMask)) { + return false; + } + + bool needSpillLr = false; + if (!lr.GetAssignedRegNO()) { + /* + * All regs are assigned and none are free. + * Pick a reg to spill and reuse for this spill. + * Need to make sure the reg picked is not assigned to this insn, + * else there will be conflict. + */ + RegType regType = lr.GetRegType(); + regno_t spillReg = PickRegForSpill(usedRegMask, regType, spillIdx, needSpillLr); + if (insn.GetMachineOpcode() == MOP_lazy_ldr && spillReg == R17) { + CHECK_FATAL(false, "register IP1(R17) may be changed when lazy_ldr"); + } + lr.SetAssignedRegNO(spillReg); + } + return needSpillLr; +} + +RegOperand *GraphColorRegAllocator::GetReplaceOpndForLRA(Insn &insn, const Operand &opnd, uint32 &spillIdx, + uint64 &usedRegMask, bool isDef) +{ + auto ®Opnd = static_cast(opnd); + uint32 vregNO = regOpnd.GetRegisterNumber(); + RegType regType = regOpnd.GetRegisterType(); + BBAssignInfo *bbInfo = bbRegInfo[insn.GetBB()->GetId()]; + if (bbInfo == nullptr) { + return nullptr; + } + auto regIt = bbInfo->GetRegMap().find(vregNO); + if (regIt != bbInfo->GetRegMap().end()) { + RegOperand &phyOpnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(regIt->second), regOpnd.GetSize(), regType); + return &phyOpnd; + } + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "spill vreg " << vregNO << "\n"; + } + regno_t spillReg; + bool needSpillLr = false; + if (insn.IsBranch() || insn.IsCall() || (insn.GetMachineOpcode() == MOP_clinit_tail) || + (insn.GetNext() && isDef && insn.GetNext()->GetMachineOpcode() == MOP_clinit_tail)) { + spillReg = R16; + } else { + /* + * use the reg that exclude livein/liveout/bbInfo->regMap + * Need to make sure the reg picked is not assigned to this insn, + * else there will be conflict. + */ + spillReg = PickRegForSpill(usedRegMask, regType, spillIdx, needSpillLr); + if (insn.GetMachineOpcode() == MOP_lazy_ldr && spillReg == R17) { + CHECK_FATAL(false, "register IP1(R17) may be changed when lazy_ldr"); + } + AddCalleeUsed(spillReg, regType); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\tassigning lra spill reg " << spillReg << "\n"; + } + } + RegOperand &phyOpnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(spillReg), regOpnd.GetSize(), regType); + SpillOperandForSpillPre(insn, regOpnd, phyOpnd, spillIdx, needSpillLr); + Insn *spill = SpillOperand(insn, regOpnd, isDef, phyOpnd); + if (spill != nullptr) { + SpillOperandForSpillPost(*spill, regOpnd, phyOpnd, spillIdx, needSpillLr); + } + ++spillIdx; + return &phyOpnd; +} + +/* get spill reg and check if need extra spill */ +bool GraphColorRegAllocator::GetSpillReg(Insn &insn, LiveRange &lr, const uint32 &spillIdx, uint64 &usedRegMask, + bool isDef) +{ + bool needSpillLr = false; + /* + * Find a spill reg for the BB among interfereing LR. + * Without LRA, this info is very inaccurate. It will falsely interfere + * with all locals which the spill might not be interfering. + * For now, every instance of the spill requires a brand new reg assignment. + */ + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "LR-regNO " << lr.GetRegNO() << " spilled, finding a spill reg\n"; + } + if (insn.IsBranch() || insn.IsCall() || (insn.GetMachineOpcode() == MOP_clinit_tail) || + (insn.GetNext() && isDef && insn.GetNext()->GetMachineOpcode() == MOP_clinit_tail)) { + /* + * When a cond branch reg is spilled, it cannot + * restore the value after the branch since it can be the target from other br. + * Todo it properly, it will require creating a intermediate bb for the reload. + * Use x16, it is taken out from available since it is used as a global in the system. + */ + lr.SetAssignedRegNO(R16); + } else { + lr.SetAssignedRegNO(0); + needSpillLr = SetRegForSpill(lr, insn, spillIdx, usedRegMask, isDef); + AddCalleeUsed(lr.GetAssignedRegNO(), lr.GetRegType()); + } + return needSpillLr; +} + +// find prev use/def after prev call +bool GraphColorRegAllocator::EncountPrevRef(const BB &pred, LiveRange &lr, bool isDef, std::vector &visitedMap) +{ + if (!visitedMap[pred.GetId()] && lr.FindInLuMap(pred.GetId()) != lr.EndOfLuMap()) { + LiveUnit *lu = lr.GetLiveUnitFromLuMap(pred.GetId()); + if (lu->GetDefNum() || lu->GetUseNum() || lu->HasCall()) { + MapleMap refs = lr.GetRefs(pred.GetId()); + auto it = refs.rbegin(); + bool findPrevRef = (it->second & kIsCall) == 0; + return findPrevRef; + } + if (lu->HasCall()) { + return false; + } + } + visitedMap[pred.GetId()] = true; + bool found = true; + for (auto predBB : pred.GetPreds()) { + if (!visitedMap[predBB->GetId()]) { + found &= EncountPrevRef(*predBB, lr, isDef, visitedMap); + } + } + return found; +} + +bool GraphColorRegAllocator::FoundPrevBeforeCall(Insn &insn, LiveRange &lr, bool isDef) +{ + bool hasFind = true; + std::vector visitedMap(bbVec.size() + 1, false); + for (auto pred : insn.GetBB()->GetPreds()) { + hasFind &= EncountPrevRef(*pred, lr, isDef, visitedMap); + if (!hasFind) { + return false; + } + } + return insn.GetBB()->GetPreds().size() == 0 ? false : true; +} + +// find next def before next call ? and no next use +bool GraphColorRegAllocator::EncountNextRef(const BB &succ, LiveRange &lr, bool isDef, std::vector &visitedMap) +{ + if (lr.FindInLuMap(succ.GetId()) != lr.EndOfLuMap()) { + LiveUnit *lu = lr.GetLiveUnitFromLuMap(succ.GetId()); + bool findNextDef = false; + if (lu->GetDefNum() || lu->HasCall()) { + MapleMap refs = lr.GetRefs(succ.GetId()); + for (auto it = refs.begin(); it != refs.end(); ++it) { + if ((it->second & kIsDef) != 0) { + findNextDef = true; + break; + } + if ((it->second & kIsCall) != 0) { + break; + } + if ((it->second & kIsUse) != 0) { + continue; + } + } + return findNextDef; + } + if (lu->HasCall()) { + return false; + } + } + visitedMap[succ.GetId()] = true; + bool found = true; + for (auto succBB : succ.GetSuccs()) { + if (!visitedMap[succBB->GetId()]) { + found &= EncountNextRef(*succBB, lr, isDef, visitedMap); + if (!found) { + return false; + } + } + } + return found; +} + +bool GraphColorRegAllocator::FoundNextBeforeCall(Insn &insn, LiveRange &lr, bool isDef) +{ + bool haveFind = true; + std::vector visitedMap(bbVec.size() + 1, false); + for (auto succ : insn.GetBB()->GetSuccs()) { + haveFind &= EncountNextRef(*succ, lr, isDef, visitedMap); + if (!haveFind) { + return false; + } + } + return insn.GetBB()->GetSuccs().size() > 0; +} + +bool GraphColorRegAllocator::HavePrevRefInCurBB(Insn &insn, LiveRange &lr, bool &contSearch) const +{ + LiveUnit *lu = lr.GetLiveUnitFromLuMap(insn.GetBB()->GetId()); + bool findPrevRef = false; + if (lu->GetDefNum() || lu->GetUseNum() || lu->HasCall()) { + MapleMap refs = lr.GetRefs(insn.GetBB()->GetId()); + for (auto it = refs.rbegin(); it != refs.rend(); ++it) { + if (it->first >= insn.GetId()) { + continue; + } + if ((it->second & kIsCall) != 0) { + contSearch = false; + break; + } + if (((it->second & kIsUse) != 0) || ((it->second & kIsDef) != 0)) { + findPrevRef = true; + contSearch = false; + break; + } + } + } + return findPrevRef; +} + +bool GraphColorRegAllocator::HaveNextDefInCurBB(Insn &insn, LiveRange &lr, bool &contSearch) const +{ + LiveUnit *lu = lr.GetLiveUnitFromLuMap(insn.GetBB()->GetId()); + bool findNextDef = false; + if (lu->GetDefNum() || lu->GetUseNum() || lu->HasCall()) { + MapleMap refs = lr.GetRefs(insn.GetBB()->GetId()); + for (auto it = refs.begin(); it != refs.end(); ++it) { + if (it->first <= insn.GetId()) { + continue; + } + if ((it->second & kIsCall) != 0) { + contSearch = false; + break; + } + if ((it->second & kIsDef) != 0) { + findNextDef = true; + contSearch = false; + } + } + } + return findNextDef; +} + +bool GraphColorRegAllocator::NeedCallerSave(Insn &insn, LiveRange &lr, bool isDef) +{ + if (doLRA) { + return true; + } + if (lr.HasDefUse()) { + return true; + } + + bool contSearch = true; + bool needed = true; + if (isDef) { + needed = !HaveNextDefInCurBB(insn, lr, contSearch); + } else { + needed = !HavePrevRefInCurBB(insn, lr, contSearch); + } + if (!contSearch) { + return needed; + } + + if (isDef) { + needed = true; + } else { + needed = !FoundPrevBeforeCall(insn, lr, isDef); + } + return needed; +} + +RegOperand *GraphColorRegAllocator::GetReplaceOpnd(Insn &insn, const Operand &opnd, uint32 &spillIdx, + uint64 &usedRegMask, bool isDef) +{ + if (!opnd.IsRegister()) { + return nullptr; + } + auto ®Opnd = static_cast(opnd); + + uint32 vregNO = regOpnd.GetRegisterNumber(); + if (vregNO == RFP) { + seenFP = true; + } + RegType regType = regOpnd.GetRegisterType(); + if (vregNO < kAllRegNum) { + return nullptr; + } + if (IsUnconcernedReg(regOpnd)) { + return nullptr; + } + +#ifdef USE_LRA + if (doLRA && IsLocalReg(vregNO)) { + return GetReplaceOpndForLRA(insn, opnd, spillIdx, usedRegMask, isDef); + } +#endif /* USE_LRA */ + + DEBUG_ASSERT(vregNO < numVregs, "index out of range of MapleVector in GraphColorRegAllocator::GetReplaceOpnd"); + LiveRange *lr = lrMap[vregNO]; + + bool isSplitPart = false; + bool needSpillLr = false; + if (lr->GetSplitLr() && IsBitArrElemSet(lr->GetSplitLr()->GetBBMember(), insn.GetBB()->GetId())) { + isSplitPart = true; + } + + if (lr->IsSpilled() && !isSplitPart) { + needSpillLr = GetSpillReg(insn, *lr, spillIdx, usedRegMask, isDef); + } + + regno_t regNO; + if (isSplitPart) { + regNO = lr->GetSplitLr()->GetAssignedRegNO(); + } else { + regNO = lr->GetAssignedRegNO(); + } + bool isCalleeReg = AArch64Abi::IsCalleeSavedReg(static_cast(regNO)); + RegOperand &phyOpnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(regNO), opnd.GetSize(), regType); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "replace R" << vregNO << " with R" << (regNO - R0) << "\n"; + } + + insn.AppendComment(" [R" + std::to_string(vregNO) + "] "); + + if (isSplitPart && (isCalleeReg || lr->GetSplitLr()->GetNumCall() == 0)) { + if (isDef) { + SpillOperand(insn, opnd, isDef, phyOpnd); + ++spillIdx; + } else { + if (lr->GetSplitLr()->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->NeedReload()) { + SpillOperand(insn, opnd, isDef, phyOpnd); + ++spillIdx; + } + } + return &phyOpnd; + } + + bool needCallerSave = false; + if (lr->GetNumCall() && !isCalleeReg) { + if (isDef) { + needCallerSave = NeedCallerSave(insn, *lr, isDef) && lr->GetRematLevel() == rematOff; + } else { + needCallerSave = !lr->GetProcessed(); + } + } + + if (lr->IsSpilled() || (isSplitPart && (lr->GetSplitLr()->GetNumCall() != 0)) || needCallerSave || + (!isSplitPart && !(lr->IsSpilled()) && lr->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->NeedReload())) { + SpillOperandForSpillPre(insn, regOpnd, phyOpnd, spillIdx, needSpillLr); + Insn *spill = SpillOperand(insn, opnd, isDef, phyOpnd); + if (spill != nullptr) { + SpillOperandForSpillPost(*spill, regOpnd, phyOpnd, spillIdx, needSpillLr); + } + ++spillIdx; + } + + return &phyOpnd; +} + +void GraphColorRegAllocator::MarkUsedRegs(Operand &opnd, uint64 &usedRegMask) +{ + auto ®Opnd = static_cast(opnd); + uint32 pregInterval = (regOpnd.GetRegisterType() == kRegTyInt) ? 0 : (V0 - R30); + uint32 vregNO = regOpnd.GetRegisterNumber(); + LiveRange *lr = GetLiveRange(vregNO); + if (lr != nullptr) { + if (lr->IsSpilled()) { + lr->SetAssignedRegNO(0); + } + if (lr->GetAssignedRegNO() != 0) { + usedRegMask |= (1ULL << (lr->GetAssignedRegNO() - pregInterval)); + } + if (lr->GetSplitLr() && lr->GetSplitLr()->GetAssignedRegNO()) { + usedRegMask |= (1ULL << (lr->GetSplitLr()->GetAssignedRegNO() - pregInterval)); + } + } +} + +uint64 GraphColorRegAllocator::FinalizeRegisterPreprocess(FinalizeRegisterInfo &fInfo, const Insn &insn, + bool &needProcess) +{ + uint64 usedRegMask = 0; + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + bool hasVirtual = false; + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + DEBUG_ASSERT(md->GetOpndDes(i) != nullptr, "pointer is null in GraphColorRegAllocator::FinalizeRegisters"); + + if (opnd.IsList()) { + if (insn.GetMachineOpcode() != MOP_asm) { + continue; + } + hasVirtual = true; + if (i == kAsmOutputListOpnd) { + fInfo.SetDefOperand(opnd, static_cast(i)); + } + if (i == kAsmInputListOpnd) { + fInfo.SetUseOperand(opnd, static_cast(i)); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + if (base != nullptr) { + fInfo.SetBaseOperand(opnd, static_cast(i)); + MarkUsedRegs(*base, usedRegMask); + hasVirtual |= static_cast(base)->IsVirtualRegister(); + } + Operand *offset = memOpnd.GetIndexRegister(); + if (offset != nullptr) { + fInfo.SetOffsetOperand(opnd); + MarkUsedRegs(*offset, usedRegMask); + hasVirtual |= static_cast(offset)->IsVirtualRegister(); + } + } else { + bool isDef = md->GetOpndDes(i)->IsRegDef(); + if (isDef) { + fInfo.SetDefOperand(opnd, static_cast(i)); + /* + * Need to exclude def also, since it will clobber the result when the + * original value is reloaded. + */ + hasVirtual |= static_cast(opnd).IsVirtualRegister(); + MarkUsedRegs(opnd, usedRegMask); + } else { + fInfo.SetUseOperand(opnd, static_cast(i)); + if (opnd.IsRegister()) { + hasVirtual |= static_cast(opnd).IsVirtualRegister(); + MarkUsedRegs(opnd, usedRegMask); + } + } + } + } /* operand */ + needProcess = hasVirtual; + return usedRegMask; +} + +void GraphColorRegAllocator::GenerateSpillFillRegs(const Insn &insn) +{ + static regno_t intRegs[kSpillMemOpndNum] = {R10, R11, R12, R13}; // R9 is used for large stack offset temp + static regno_t fpRegs[kSpillMemOpndNum] = {V16, V17, V18, V19}; + uint32 opndNum = insn.GetOperandSize(); + std::set defPregs; + std::set usePregs; + std::vector defLrs; + std::vector useLrs; + if (insn.GetMachineOpcode() == MOP_xmovrr || insn.GetMachineOpcode() == MOP_wmovrr) { + RegOperand &opnd1 = static_cast(insn.GetOperand(1)); + RegOperand &opnd0 = static_cast(insn.GetOperand(0)); + if (opnd1.GetRegisterNumber() < R20 && opnd0.GetRegisterNumber() >= kAllRegNum) { + LiveRange *lr = lrMap[opnd0.GetRegisterNumber()]; + if (lr->IsSpilled()) { + lr->SetSpillReg(opnd1.GetRegisterNumber()); + DEBUG_ASSERT(lr->GetSpillReg() != 0, "no spill reg in GenerateSpillFillRegs"); + return; + } + } + if (opnd0.GetRegisterNumber() < R20 && opnd1.GetRegisterNumber() >= kAllRegNum) { + LiveRange *lr = lrMap[opnd1.GetRegisterNumber()]; + if (lr->IsSpilled()) { + lr->SetSpillReg(opnd0.GetRegisterNumber()); + DEBUG_ASSERT(lr->GetSpillReg() != 0, "no spill reg in GenerateSpillFillRegs"); + return; + } + } + } + const InsnDesc *md = insn.GetDesc(); + bool isIndexedMemOp = false; + for (uint32 opndIdx = 0; opndIdx < opndNum; ++opndIdx) { + Operand *opnd = &insn.GetOperand(opndIdx); + if (opnd == nullptr) { + continue; + } + if (opnd->IsList()) { + // call parameters + } else if (opnd->IsMemoryAccessOperand()) { + auto *memopnd = static_cast(opnd); + if (memopnd->GetIndexOpt() == MemOperand::kPreIndex || memopnd->GetIndexOpt() == MemOperand::kPostIndex) { + isIndexedMemOp = true; + } + auto *base = static_cast(memopnd->GetBaseRegister()); + if (base != nullptr && !IsUnconcernedReg(*base)) { + if (!memopnd->IsIntactIndexed()) { + if (base->IsPhysicalRegister()) { + defPregs.insert(base->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[base->GetRegisterNumber()]; + if (lr->IsSpilled()) { + defLrs.emplace_back(lr); + } + } + } + if (base->IsPhysicalRegister()) { + usePregs.insert(base->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[base->GetRegisterNumber()]; + if (lr->IsSpilled()) { + useLrs.emplace_back(lr); + } + } + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr) { + if (offset->IsPhysicalRegister()) { + usePregs.insert(offset->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[offset->GetRegisterNumber()]; + if (lr->IsSpilled()) { + useLrs.emplace_back(lr); + } + } + } + } else if (opnd->IsRegister()) { + bool isDef = md->GetOpndDes(static_cast(opndIdx))->IsRegDef(); + bool isUse = md->GetOpndDes(static_cast(opndIdx))->IsRegUse(); + RegOperand *ropnd = static_cast(opnd); + if (IsUnconcernedReg(*ropnd)) { + continue; + } + if (ropnd != nullptr) { + if (isUse) { + if (ropnd->IsPhysicalRegister()) { + usePregs.insert(ropnd->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[ropnd->GetRegisterNumber()]; + if (lr->IsSpilled()) { + useLrs.emplace_back(lr); + } + } + } + if (isDef) { + if (ropnd->IsPhysicalRegister()) { + defPregs.insert(ropnd->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[ropnd->GetRegisterNumber()]; + if (lr->IsSpilled()) { + defLrs.emplace_back(lr); + } + } + } + } + } + } + auto comparator = [=](const LiveRange *lr1, const LiveRange *lr2) -> bool { return lr1->GetID() > lr2->GetID(); }; + std::sort(useLrs.begin(), useLrs.end(), comparator); + for (auto lr : useLrs) { + lr->SetID(insn.GetId()); + RegType rtype = lr->GetRegType(); + regno_t firstSpillReg = rtype == kRegTyInt ? intRegs[0] : fpRegs[0]; + if (lr->GetSpillReg() != 0 && lr->GetSpillReg() < firstSpillReg && lr->GetPregveto(lr->GetSpillReg())) { + lr->SetSpillReg(0); + } + if (lr->GetSpillReg() != 0 && lr->GetSpillReg() >= firstSpillReg && + usePregs.find(lr->GetSpillReg()) == usePregs.end()) { + usePregs.insert(lr->GetSpillReg()); + continue; + } else { + lr->SetSpillReg(0); + } + for (uint32 i = 0; i < kSpillMemOpndNum; i++) { + regno_t preg = rtype == kRegTyInt ? intRegs[i] : fpRegs[i]; + if (usePregs.find(preg) == usePregs.end()) { + lr->SetSpillReg(preg); + usePregs.insert(preg); + break; + } + } + DEBUG_ASSERT(lr->GetSpillReg() != 0, "no reg"); + } + size_t spillRegIdx; + if (isIndexedMemOp) { + spillRegIdx = useLrs.size(); + } else { + spillRegIdx = 0; + } + for (auto lr : defLrs) { + lr->SetID(insn.GetId()); + RegType rtype = lr->GetRegType(); + regno_t firstSpillReg = rtype == kRegTyInt ? intRegs[0] : fpRegs[0]; + if (lr->GetSpillReg() != 0) { + if (lr->GetSpillReg() < firstSpillReg && lr->GetPregveto(lr->GetSpillReg())) { + lr->SetSpillReg(0); + } + if (lr->GetSpillReg() >= firstSpillReg && defPregs.find(lr->GetSpillReg()) != defPregs.end()) { + lr->SetSpillReg(0); + } + } + if (lr->GetSpillReg() != 0) { + continue; + } + for (; spillRegIdx < kSpillMemOpndNum; spillRegIdx++) { + regno_t preg = rtype == kRegTyInt ? intRegs[spillRegIdx] : fpRegs[spillRegIdx]; + if (defPregs.find(preg) == defPregs.end()) { + lr->SetSpillReg(preg); + defPregs.insert(preg); + break; + } + } + DEBUG_ASSERT(lr->GetSpillReg() != 0, "no reg"); + } +} + +RegOperand *GraphColorRegAllocator::CreateSpillFillCode(const RegOperand &opnd, Insn &insn, uint32 spillCnt, bool isdef) +{ + regno_t vregno = opnd.GetRegisterNumber(); + LiveRange *lr = GetLiveRange(vregno); + if (lr != nullptr && lr->IsSpilled()) { + AArch64CGFunc *a64cgfunc = static_cast(cgFunc); + uint32 bits = opnd.GetSize(); + if (bits < k32BitSize) { + bits = k32BitSize; + } + if (cgFunc->IsExtendReg(vregno)) { + bits = k64BitSize; + } + regno_t spreg = 0; + RegType rtype = lr->GetRegType(); + spreg = lr->GetSpillReg(); + DEBUG_ASSERT(lr->GetSpillReg() != 0, "no reg in CreateSpillFillCode"); + RegOperand *regopnd = + &a64cgfunc->GetOrCreatePhysicalRegisterOperand(static_cast(spreg), opnd.GetSize(), rtype); + + if (lr->GetRematLevel() != rematOff) { + if (isdef) { + return nullptr; + } else { + std::vector rematInsns = lr->Rematerialize(a64cgfunc, *static_cast(regopnd)); + for (auto &&remat : rematInsns) { + std::string comment = " REMATERIALIZE color vreg: " + std::to_string(vregno); + remat->SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, *remat); + } + return regopnd; + } + } + + bool isOutOfRange = false; + Insn *nextInsn = insn.GetNextMachineInsn(); + MemOperand *loadmem = GetSpillOrReuseMem(*lr, opnd.GetSize(), isOutOfRange, insn, isdef); + PrimType pty = (lr->GetRegType() == kRegTyInt) ? ((bits > k32BitSize) ? PTY_i64 : PTY_i32) + : ((bits > k32BitSize) ? PTY_f64 : PTY_f32); + CHECK_FATAL(spillCnt < kSpillMemOpndNum, "spill count exceeded"); + Insn *memInsn; + if (isdef) { + memInsn = &cgFunc->GetInsnBuilder()->BuildInsn(a64cgfunc->PickStInsn(bits, pty), *regopnd, *loadmem); + memInsn->SetIsSpill(); + std::string comment = + " SPILLcolor vreg: " + std::to_string(vregno) + " op:" + kOpcodeInfo.GetName(lr->GetOp()); + memInsn->SetComment(comment); + if (nextInsn == nullptr) { + insn.GetBB()->AppendInsn(*memInsn); + } else { + insn.GetBB()->InsertInsnBefore(*nextInsn, *memInsn); + } + } else { + memInsn = &cgFunc->GetInsnBuilder()->BuildInsn(a64cgfunc->PickLdInsn(bits, pty), *regopnd, *loadmem); + memInsn->SetIsReload(); + std::string comment = + " RELOADcolor vreg: " + std::to_string(vregno) + " op:" + kOpcodeInfo.GetName(lr->GetOp()); + memInsn->SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, *memInsn); + } + return regopnd; + } + return nullptr; +} + +bool GraphColorRegAllocator::SpillLiveRangeForSpills() +{ + bool done = false; + for (uint32_t bbIdx = 0; bbIdx < bfs->sortedBBs.size(); bbIdx++) { + BB *bb = bfs->sortedBBs[bbIdx]; + FOR_BB_INSNS(insn, bb) { + uint32 spillCnt; + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction() || insn->GetId() == 0) { + continue; + } + spillCnt = 0; + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + GenerateSpillFillRegs(*insn); + for (uint32 i = 0; i < opndNum; ++i) { + Operand *opnd = &insn->GetOperand(i); + if (opnd == nullptr) { + continue; + } + if (opnd->IsList()) { + // call parameters + } else if (opnd->IsMemoryAccessOperand()) { + MemOperand *newmemopnd = nullptr; + auto *memopnd = static_cast(opnd); + auto *base = static_cast(memopnd->GetBaseRegister()); + if (base != nullptr && base->IsVirtualRegister()) { + RegOperand *replace = CreateSpillFillCode(*base, *insn, spillCnt); + if (!memopnd->IsIntactIndexed()) { + (void)CreateSpillFillCode(*base, *insn, spillCnt, true); + } + if (replace != nullptr) { + spillCnt++; + newmemopnd = (static_cast(opnd)->Clone(*cgFunc->GetMemoryPool())); + newmemopnd->SetBaseRegister(*replace); + insn->SetOperand(i, *newmemopnd); + done = true; + } + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr && offset->IsVirtualRegister()) { + RegOperand *replace = CreateSpillFillCode(*offset, *insn, spillCnt); + if (replace != nullptr) { + spillCnt++; + if (newmemopnd == nullptr) { + newmemopnd = (static_cast(opnd)->Clone(*cgFunc->GetMemoryPool())); + } + newmemopnd->SetIndexRegister(*replace); + insn->SetOperand(i, *newmemopnd); + done = true; + } + } + } else if (opnd->IsRegister()) { + bool isdef = md->opndMD[i]->IsRegDef(); + bool isuse = md->opndMD[i]->IsRegUse(); + RegOperand *replace = CreateSpillFillCode(*static_cast(opnd), *insn, spillCnt, isdef); + if (isuse && isdef) { + (void)CreateSpillFillCode(*static_cast(opnd), *insn, spillCnt, false); + } + if (replace != nullptr) { + if (!isdef) { + spillCnt++; + } + insn->SetOperand(i, *replace); + done = true; + } + } + } + } + } + return done; +} + +static bool ReloadAtCallee(CgOccur *occ) +{ + auto *defOcc = occ->GetDef(); + if (defOcc == nullptr || defOcc->GetOccType() != kOccStore) { + return false; + } + return static_cast(defOcc)->Reload(); +} + +void CallerSavePre::DumpWorkCandAndOcc() +{ + if (workCand->GetTheOperand()->IsRegister()) { + LogInfo::MapleLogger() << "Cand R"; + LogInfo::MapleLogger() << static_cast(workCand->GetTheOperand())->GetRegisterNumber() << '\n'; + } else { + LogInfo::MapleLogger() << "Cand Index" << workCand->GetIndex() << '\n'; + } + for (CgOccur *occ : allOccs) { + occ->Dump(); + LogInfo::MapleLogger() << '\n'; + } +} + +void CallerSavePre::CodeMotion() +{ + constexpr uint32 limitNum = UINT32_MAX; + uint32 cnt = 0; + for (auto *occ : allOccs) { + if (occ->GetOccType() == kOccUse) { + ++cnt; + beyondLimit |= (cnt == limitNum); + if (!beyondLimit && dump) { + LogInfo::MapleLogger() << "opt use occur: "; + occ->Dump(); + } + } + if (occ->GetOccType() == kOccUse && + (beyondLimit || (static_cast(occ)->Reload() && !ReloadAtCallee(occ)))) { + RegOperand &phyOpnd = static_cast(func)->GetOrCreatePhysicalRegisterOperand( + static_cast(workLr->GetAssignedRegNO()), occ->GetOperand()->GetSize(), + static_cast(occ->GetOperand())->GetRegisterType()); + (void)regAllocator->SpillOperand(*occ->GetInsn(), *occ->GetOperand(), false, phyOpnd); + continue; + } + if (occ->GetOccType() == kOccPhiopnd && static_cast(occ)->Reload() && !ReloadAtCallee(occ)) { + RegOperand &phyOpnd = static_cast(func)->GetOrCreatePhysicalRegisterOperand( + static_cast(workLr->GetAssignedRegNO()), occ->GetOperand()->GetSize(), + static_cast(occ->GetOperand())->GetRegisterType()); + Insn *insn = occ->GetBB()->GetLastInsn(); + if (insn == nullptr) { + insn = &(static_cast(func)->CreateCommentInsn("reload caller save register")); + occ->GetBB()->AppendInsn(*insn); + } + auto defOcc = occ->GetDef(); + bool forCall = (defOcc != nullptr && insn == defOcc->GetInsn()); + (void)regAllocator->SpillOperand(*insn, *occ->GetOperand(), false, phyOpnd, forCall); + continue; + } + if (occ->GetOccType() == kOccStore && static_cast(occ)->Reload()) { + RegOperand &phyOpnd = static_cast(func)->GetOrCreatePhysicalRegisterOperand( + static_cast(workLr->GetAssignedRegNO()), occ->GetOperand()->GetSize(), + static_cast(occ->GetOperand())->GetRegisterType()); + (void)regAllocator->SpillOperand(*occ->GetInsn(), *occ->GetOperand(), false, phyOpnd, true); + continue; + } + } + if (dump) { + PreWorkCand *curCand = workCand; + LogInfo::MapleLogger() << "========ssapre candidate " << curCand->GetIndex() + << " after codemotion ===========\n"; + DumpWorkCandAndOcc(); + func->DumpCFGToDot("raCodeMotion-"); + } +} + +void CallerSavePre::UpdateLoadSite(CgOccur *occ) +{ + if (occ == nullptr) { + return; + } + auto *defOcc = occ->GetDef(); + if (occ->GetOccType() == kOccUse) { + defOcc = static_cast(occ)->GetPrevVersionOccur(); + } + if (defOcc == nullptr) { + return; + } + switch (defOcc->GetOccType()) { + case kOccDef: + break; + case kOccUse: + UpdateLoadSite(defOcc); + return; + case kOccStore: { + auto *storeOcc = static_cast(defOcc); + if (storeOcc->Reload()) { + break; + } + switch (occ->GetOccType()) { + case kOccUse: { + static_cast(occ)->SetReload(true); + break; + } + case kOccPhiopnd: { + static_cast(occ)->SetReload(true); + break; + } + default: { + CHECK_FATAL(false, "must not be here"); + } + } + return; + } + case kOccPhiocc: { + auto *phiOcc = static_cast(defOcc); + if (phiOcc->IsFullyAvailable()) { + break; + } + if (!phiOcc->IsDownSafe() || phiOcc->IsNotAvailable()) { + switch (occ->GetOccType()) { + case kOccUse: { + static_cast(occ)->SetReload(true); + break; + } + case kOccPhiopnd: { + static_cast(occ)->SetReload(true); + break; + } + default: { + CHECK_FATAL(false, "must not be here"); + } + } + return; + } + + if (defOcc->Processed()) { + return; + } + defOcc->SetProcessed(true); + for (auto *opndOcc : phiOcc->GetPhiOpnds()) { + UpdateLoadSite(opndOcc); + } + return; + } + default: { + CHECK_FATAL(false, "NIY"); + break; + } + } +} + +void CallerSavePre::CalLoadSites() +{ + for (auto *occ : allOccs) { + if (occ->GetOccType() == kOccUse) { + UpdateLoadSite(occ); + } + } + std::vector availableDef(classCount, nullptr); + for (auto *occ : allOccs) { + auto classID = static_cast(occ->GetClassID()); + switch (occ->GetOccType()) { + case kOccDef: + availableDef[classID] = occ; + break; + case kOccStore: { + if (static_cast(occ)->Reload()) { + availableDef[classID] = occ; + } else { + availableDef[classID] = nullptr; + } + break; + } + case kOccPhiocc: { + auto *phiOcc = static_cast(occ); + if (!phiOcc->IsNotAvailable() && phiOcc->IsDownSafe()) { + availableDef[classID] = occ; + } else { + availableDef[classID] = nullptr; + } + break; + } + case kOccUse: { + auto *useOcc = static_cast(occ); + if (useOcc->Reload()) { + auto *availDef = availableDef[classID]; + if (availDef != nullptr && dom->Dominate(*availDef->GetBB(), *useOcc->GetBB())) { + useOcc->SetReload(false); + } else { + availableDef[classID] = useOcc; + } + } + break; + } + case kOccPhiopnd: { + auto *phiOpnd = static_cast(occ); + if (phiOpnd->Reload()) { + auto *availDef = availableDef[classID]; + if (availDef != nullptr && dom->Dominate(*availDef->GetBB(), *phiOpnd->GetBB())) { + phiOpnd->SetReload(false); + } else { + availableDef[classID] = phiOpnd; + } + } + break; + } + case kOccExit: + break; + default: + CHECK_FATAL(false, "not supported occur type"); + } + } + if (dump) { + PreWorkCand *curCand = workCand; + LogInfo::MapleLogger() << "========ssapre candidate " << curCand->GetIndex() + << " after CalLoadSite===================\n"; + DumpWorkCandAndOcc(); + LogInfo::MapleLogger() << "\n"; + } +} + +void CallerSavePre::ComputeAvail() +{ + bool changed = true; + while (changed) { + changed = false; + for (auto *phiOcc : phiOccs) { + if (phiOcc->IsNotAvailable()) { + continue; + } + size_t killedCnt = 0; + for (auto *opndOcc : phiOcc->GetPhiOpnds()) { + auto defOcc = opndOcc->GetDef(); + if (defOcc == nullptr) { + continue; + } + // for not move load too far from use site, set not-fully-available-phi killing availibity of phiOpnd + if ((defOcc->GetOccType() == kOccPhiocc && !static_cast(defOcc)->IsFullyAvailable()) || + defOcc->GetOccType() == kOccStore) { + ++killedCnt; + opndOcc->SetHasRealUse(false); + // opnd at back-edge is killed, set phi not avail + if (dom->Dominate(*phiOcc->GetBB(), *opndOcc->GetBB())) { + killedCnt = phiOcc->GetPhiOpnds().size(); + break; + } + if (opndOcc->GetBB()->IsSoloGoto() && opndOcc->GetBB()->GetLoop() != nullptr) { + killedCnt = phiOcc->GetPhiOpnds().size(); + break; + } + continue; + } + } + if (killedCnt == phiOcc->GetPhiOpnds().size()) { + changed |= !phiOcc->IsNotAvailable(); + phiOcc->SetAvailability(kNotAvailable); + } else if (killedCnt > 0) { + changed |= !phiOcc->IsPartialAvailable(); + phiOcc->SetAvailability(kPartialAvailable); + } else { + } // fully available is default state + } + } +} + +void CallerSavePre::Rename1() +{ + std::stack occStack; + classCount = 1; + // iterate the occurrence according to its preorder dominator tree + for (CgOccur *occ : allOccs) { + while (!occStack.empty() && !occStack.top()->IsDominate(*dom, *occ)) { + occStack.pop(); + } + switch (occ->GetOccType()) { + case kOccUse: { + if (occStack.empty()) { + // assign new class + occ->SetClassID(static_cast(classCount++)); + occStack.push(occ); + break; + } + CgOccur *topOccur = occStack.top(); + if (topOccur->GetOccType() == kOccStore || topOccur->GetOccType() == kOccDef || + topOccur->GetOccType() == kOccPhiocc) { + // assign new class + occ->SetClassID(topOccur->GetClassID()); + occ->SetPrevVersionOccur(topOccur); + occStack.push(occ); + break; + } else if (topOccur->GetOccType() == kOccUse) { + occ->SetClassID(topOccur->GetClassID()); + if (topOccur->GetDef() != nullptr) { + occ->SetDef(topOccur->GetDef()); + } else { + occ->SetDef(topOccur); + } + break; + } + CHECK_FATAL(false, "unsupported occur type"); + break; + } + case kOccPhiocc: { + // assign new class + occ->SetClassID(static_cast(classCount++)); + occStack.push(occ); + break; + } + case kOccPhiopnd: { + if (!occStack.empty()) { + CgOccur *topOccur = occStack.top(); + auto *phiOpndOcc = static_cast(occ); + phiOpndOcc->SetDef(topOccur); + phiOpndOcc->SetClassID(topOccur->GetClassID()); + if (topOccur->GetOccType() == kOccUse) { + phiOpndOcc->SetHasRealUse(true); + } + } + break; + } + case kOccDef: { + if (!occStack.empty()) { + CgOccur *topOccur = occStack.top(); + if (topOccur->GetOccType() == kOccPhiocc) { + auto *phiTopOccur = static_cast(topOccur); + phiTopOccur->SetIsDownSafe(false); + } + } + + // assign new class + occ->SetClassID(static_cast(classCount++)); + occStack.push(occ); + break; + } + case kOccStore: { + if (!occStack.empty()) { + CgOccur *topOccur = occStack.top(); + auto prevVersionOcc = topOccur->GetDef() ? topOccur->GetDef() : topOccur; + static_cast(occ)->SetPrevVersionOccur(prevVersionOcc); + if (topOccur->GetOccType() == kOccPhiocc) { + auto *phiTopOccur = static_cast(topOccur); + phiTopOccur->SetIsDownSafe(false); + } + } + + // assign new class + occ->SetClassID(static_cast(classCount++)); + occStack.push(occ); + break; + } + case kOccExit: { + if (occStack.empty()) { + break; + } + CgOccur *topOccur = occStack.top(); + if (topOccur->GetOccType() == kOccPhiocc) { + auto *phiTopOccur = static_cast(topOccur); + phiTopOccur->SetIsDownSafe(false); + } + break; + } + default: + DEBUG_ASSERT(false, "should not be here"); + break; + } + } + if (dump) { + PreWorkCand *curCand = workCand; + LogInfo::MapleLogger() << "========ssapre candidate " << curCand->GetIndex() << " after rename1============\n"; + DumpWorkCandAndOcc(); + } +} + +void CallerSavePre::ComputeVarAndDfPhis() +{ + dfPhiDfns.clear(); + PreWorkCand *workCand = GetWorkCand(); + for (auto *realOcc : workCand->GetRealOccs()) { + BB *defBB = realOcc->GetBB(); + GetIterDomFrontier(defBB, &dfPhiDfns); + } +} + +void CallerSavePre::BuildWorkList() +{ + size_t numBBs = dom->GetDtPreOrderSize(); + std::vector callSaveLrs; + for (auto it : regAllocator->GetLrMap()) { + LiveRange *lr = it.second; + if (lr == nullptr || lr->IsSpilled()) { + continue; + } + bool isCalleeReg = AArch64Abi::IsCalleeSavedReg(static_cast(lr->GetAssignedRegNO())); + if (lr->GetSplitLr() == nullptr && lr->GetNumCall() && !isCalleeReg) { + callSaveLrs.emplace_back(lr); + } + } + const MapleVector &preOrderDt = dom->GetDtPreOrder(); + for (size_t i = 0; i < numBBs; ++i) { + BB *bb = func->GetBBFromID(preOrderDt[i]); + std::map insnMap; + FOR_BB_INSNS_SAFE(insn, bb, ninsn) { + insnMap.insert(std::make_pair(insn->GetId(), insn)); + } + for (auto lr : callSaveLrs) { + LiveUnit *lu = lr->GetLiveUnitFromLuMap(bb->GetId()); + RegOperand &opnd = func->GetOrCreateVirtualRegisterOperand(lr->GetRegNO()); + if (lu != nullptr && (lu->GetDefNum() || lu->GetUseNum() || lu->HasCall())) { + MapleMap refs = lr->GetRefs(bb->GetId()); + for (auto it = refs.begin(); it != refs.end(); ++it) { + if (it->second & kIsUse) { + (void)CreateRealOcc(*insnMap[it->first], opnd, kOccUse); + } + if (it->second & kIsDef) { + (void)CreateRealOcc(*insnMap[it->first], opnd, kOccDef); + } + if (it->second & kIsCall) { + Insn *callInsn = insnMap[it->first]; + auto *targetOpnd = callInsn->GetCallTargetOperand(); + if (CGOptions::DoIPARA() && targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + DEBUG_ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *mirFunc = funcSt->GetFunction(); + if (mirFunc != nullptr && mirFunc->IsReferedRegsValid()) { + auto regSet = mirFunc->GetReferedRegs(); + if (regSet.find(lr->GetAssignedRegNO()) == regSet.end()) { + continue; + } + } + } + (void)CreateRealOcc(*callInsn, opnd, kOccStore); + } + } + } + } + if (bb->GetKind() == BB::kBBReturn) { + CreateExitOcc(*bb); + } + } +} + +void CallerSavePre::ApplySSAPRE() +{ + // #0 build worklist + BuildWorkList(); + uint32 cnt = 0; + constexpr uint32 preLimit = UINT32_MAX; + while (!workList.empty()) { + ++cnt; + if (cnt == preLimit) { + beyondLimit = true; + } + workCand = workList.front(); + workCand->SetIndex(static_cast(cnt)); + workLr = regAllocator->GetLiveRange(static_cast(workCand->GetTheOperand())->GetRegisterNumber()); + DEBUG_ASSERT(workLr != nullptr, "exepected non null lr"); + workList.pop_front(); + if (workCand->GetRealOccs().empty()) { + continue; + } + + allOccs.clear(); + phiOccs.clear(); + // #1 Insert PHI; results in allOccs and phiOccs + ComputeVarAndDfPhis(); + CreateSortedOccs(); + if (workCand->GetRealOccs().empty()) { + continue; + } + // #2 Rename + Rename1(); + ComputeDS(); + ComputeAvail(); + CalLoadSites(); + // #6 CodeMotion and recompute worklist based on newly occurrence + CodeMotion(); + DEBUG_ASSERT(workLr->GetProcessed() == false, "exepected unprocessed"); + workLr->SetProcessed(); + } +} + +void GraphColorRegAllocator::OptCallerSave() +{ + CallerSavePre callerSavePre(this, *cgFunc, domInfo, *memPool, *memPool, kLoadPre, UINT32_MAX); + callerSavePre.SetDump(GCRA_DUMP); + callerSavePre.ApplySSAPRE(); +} + +void GraphColorRegAllocator::SplitVregAroundLoop(const CGFuncLoops &loop, const std::vector &lrs, + BB &headerPred, BB &exitSucc, const std::set &cands) +{ + size_t maxSplitCount = lrs.size() - intCalleeRegSet.size(); + maxSplitCount = maxSplitCount > kMaxSplitCount ? kMaxSplitCount : maxSplitCount; + uint32 splitCount = 0; + auto it = cands.begin(); + size_t candsSize = cands.size(); + maxSplitCount = maxSplitCount > candsSize ? candsSize : maxSplitCount; + for (auto &lr : lrs) { + if (lr->IsSpilled()) { + continue; + } + if (!AArch64Abi::IsCalleeSavedReg(static_cast(lr->GetAssignedRegNO()))) { + continue; + } + bool hasRef = false; + for (auto *bb : loop.GetLoopMembers()) { + LiveUnit *lu = lr->GetLiveUnitFromLuMap(bb->GetId()); + if (lu != nullptr && (lu->GetDefNum() != 0 || lu->GetUseNum() != 0)) { + hasRef = true; + break; + } + } + if (!hasRef) { + splitCount++; + RegOperand *ropnd = &cgFunc->GetOrCreateVirtualRegisterOperand(lr->GetRegNO()); + RegOperand &phyOpnd = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(lr->GetAssignedRegNO()), ropnd->GetSize(), (lr->GetRegType())); + + Insn *headerCom = &(static_cast(cgFunc)->CreateCommentInsn("split around loop begin")); + headerPred.AppendInsn(*headerCom); + Insn *last = headerPred.GetLastInsn(); + (void)SpillOperand(*last, *ropnd, true, static_cast(phyOpnd)); + + Insn *exitCom = &(static_cast(cgFunc)->CreateCommentInsn("split around loop end")); + exitSucc.InsertInsnBegin(*exitCom); + Insn *first = exitSucc.GetFirstInsn(); + (void)SpillOperand(*first, *ropnd, false, static_cast(phyOpnd)); + + LiveRange *replacedLr = lrMap[*it]; + replacedLr->SetAssignedRegNO(lr->GetAssignedRegNO()); + replacedLr->SetSpilled(false); + ++it; + } + if (splitCount >= maxSplitCount) { + break; + } + } +} + +bool GraphColorRegAllocator::LrGetBadReg(const LiveRange &lr) const +{ + if (lr.IsSpilled()) { + return true; + } + if (lr.GetNumCall() != 0 && !AArch64Abi::IsCalleeSavedReg(static_cast(lr.GetAssignedRegNO()))) { + return true; + } + return false; +} + +bool GraphColorRegAllocator::LoopNeedSplit(const CGFuncLoops &loop, std::set &cands) +{ + std::set regPressure; + const BB *header = loop.GetHeader(); + const MapleSet &liveIn = header->GetLiveInRegNO(); + std::set loopBBs; + for (auto *bb : loop.GetLoopMembers()) { + loopBBs.insert(bb); + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetId() == 0) { + continue; + } + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + continue; + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr && base->IsRegister()) { + RegOperand *regOpnd = static_cast(base); + regno_t regNO = regOpnd->GetRegisterNumber(); + LiveRange *lr = GetLiveRange(regNO); + if (lr != nullptr && lr->GetRegType() == kRegTyInt && LrGetBadReg(*lr) && + liveIn.find(regNO) == liveIn.end()) { + regPressure.insert(regOpnd->GetRegisterNumber()); + } + } + if (offset != nullptr && offset->IsRegister()) { + RegOperand *regOpnd = static_cast(offset); + regno_t regNO = regOpnd->GetRegisterNumber(); + LiveRange *lr = GetLiveRange(regNO); + if (lr != nullptr && lr->GetRegType() == kRegTyInt && LrGetBadReg(*lr) && + liveIn.find(regNO) == liveIn.end()) { + regPressure.insert(regOpnd->GetRegisterNumber()); + } + } + } else if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + LiveRange *lr = GetLiveRange(regNO); + if (lr != nullptr && lr->GetRegType() == kRegTyInt && LrGetBadReg(*lr) && + liveIn.find(regNO) == liveIn.end()) { + regPressure.insert(regOpnd.GetRegisterNumber()); + } + } + } + } + } + if (regPressure.size() != 0) { + for (auto reg : regPressure) { + LiveRange *lr = lrMap[reg]; + std::vector smember; + ForEachBBArrElem(lr->GetBBMember(), + [this, &smember](uint32 bbID) { (void)smember.emplace_back(bbVec[bbID]); }); + bool liveBeyondLoop = false; + for (auto bb : smember) { + if (loopBBs.find(bb) == loopBBs.end()) { + liveBeyondLoop = true; + break; + } + } + if (liveBeyondLoop) { + continue; + } + cands.insert(reg); + } + if (cands.empty()) { + return false; + } + return true; + } + return false; +} + +void GraphColorRegAllocator::AnalysisLoop(const CGFuncLoops &loop) +{ + const BB *header = loop.GetHeader(); + const MapleSet &liveIn = header->GetLiveInRegNO(); + std::vector lrs; + size_t intCalleeNum = intCalleeRegSet.size(); + if (loop.GetMultiEntries().size() != 0) { + return; + } + for (auto regno : liveIn) { + LiveRange *lr = GetLiveRange(regno); + if (lr != nullptr && lr->GetRegType() == kRegTyInt && lr->GetNumCall() != 0) { + lrs.emplace_back(lr); + } + } + if (lrs.size() < intCalleeNum) { + return; + } + bool hasCall = false; + std::set loopBBs; + for (auto *bb : loop.GetLoopMembers()) { + if (bb->HasCall()) { + hasCall = true; + } + loopBBs.insert(bb); + } + if (!hasCall) { + return; + } + auto comparator = [=](const LiveRange *lr1, const LiveRange *lr2) -> bool { + return lr1->GetPriority() < lr2->GetPriority(); + }; + std::sort(lrs.begin(), lrs.end(), comparator); + const MapleVector &exits = loop.GetExits(); + std::set loopExits; + for (auto &bb : exits) { + for (auto &succ : bb->GetSuccs()) { + if (loopBBs.find(succ) != loopBBs.end()) { + continue; + } + if (succ->IsSoloGoto() || succ->IsEmpty()) { + BB *realSucc = CGCFG::GetTargetSuc(*succ); + if (realSucc != nullptr) { + loopExits.insert(realSucc); + } + } else { + loopExits.insert(succ); + } + } + } + std::set loopEntra; + for (auto &pred : header->GetPreds()) { + if (loopBBs.find(pred) != loopBBs.end()) { + continue; + } + loopEntra.insert(pred); + } + if (loopEntra.size() != 1 || loopExits.size() != 1) { + return; + } + BB *headerPred = *loopEntra.begin(); + BB *exitSucc = *loopExits.begin(); + if (headerPred->GetKind() != BB::kBBFallthru) { + return; + } + if (exitSucc->GetPreds().size() != loop.GetExits().size()) { + return; + } + std::set cands; + if (!LoopNeedSplit(loop, cands)) { + return; + } + SplitVregAroundLoop(loop, lrs, *headerPred, *exitSucc, cands); +} +void GraphColorRegAllocator::AnalysisLoopPressureAndSplit(const CGFuncLoops &loop) +{ + if (loop.GetInnerLoops().empty()) { + // only handle inner-most loop + AnalysisLoop(loop); + return; + } + for (const auto *lp : loop.GetInnerLoops()) { + AnalysisLoopPressureAndSplit(*lp); + } +} + +/* Iterate through all instructions and change the vreg to preg. */ +void GraphColorRegAllocator::FinalizeRegisters() +{ + if (doMultiPass && hasSpill) { + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "In this round, spill vregs : \n"; + for (auto it : lrMap) { + LiveRange *lr = it.second; + if (lr->IsSpilled()) { + LogInfo::MapleLogger() << "R" << lr->GetRegNO() << " "; + } + } + LogInfo::MapleLogger() << "\n"; + } + bool done = SpillLiveRangeForSpills(); + if (done) { + return; + } + } + if (CLANG) { + if (!cgFunc->GetLoops().empty()) { + cgFunc->GetTheCFG()->InitInsnVisitor(*cgFunc); + for (const auto *lp : cgFunc->GetLoops()) { + AnalysisLoopPressureAndSplit(*lp); + } + } + OptCallerSave(); + } + for (auto *bb : bfs->sortedBBs) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (insn->IsImmaterialInsn()) { + continue; + } + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetId() == 0) { + continue; + } + + for (uint32 i = 0; i < kSpillMemOpndNum; ++i) { + operandSpilled[i] = false; + } + + FinalizeRegisterInfo *fInfo = memPool->New(alloc); + bool needProcces = true; + uint64 usedRegMask = FinalizeRegisterPreprocess(*fInfo, *insn, needProcces); + if (!needProcces) { + continue; + } + uint32 defSpillIdx = 0; + uint32 useSpillIdx = 0; + MemOperand *memOpnd = nullptr; + if (fInfo->GetBaseOperand()) { + memOpnd = static_cast(fInfo->GetBaseOperand())->Clone(*cgFunc->GetMemoryPool()); + insn->SetOperand(fInfo->GetMemOperandIdx(), *memOpnd); + Operand *base = memOpnd->GetBaseRegister(); + DEBUG_ASSERT(base != nullptr, "nullptr check"); + /* if base register is both defReg and useReg, defSpillIdx should also be increased. But it doesn't + * exist yet */ + RegOperand *phyOpnd = GetReplaceOpnd(*insn, *base, useSpillIdx, usedRegMask, false); + if (phyOpnd != nullptr) { + memOpnd->SetBaseRegister(*phyOpnd); + } + if (!memOpnd->IsIntactIndexed()) { + (void)GetReplaceOpnd(*insn, *base, useSpillIdx, usedRegMask, true); + } + } + if (fInfo->GetOffsetOperand()) { + DEBUG_ASSERT(memOpnd != nullptr, "memOpnd should not be nullptr"); + Operand *offset = memOpnd->GetIndexRegister(); + RegOperand *phyOpnd = GetReplaceOpnd(*insn, *offset, useSpillIdx, usedRegMask, false); + if (phyOpnd != nullptr) { + memOpnd->SetIndexRegister(*phyOpnd); + } + } + for (size_t i = 0; i < fInfo->GetDefOperandsSize(); ++i) { + if (insn->GetMachineOpcode() == MOP_asm) { + const Operand *defOpnd = fInfo->GetDefOperandsElem(i); + if (defOpnd->IsList()) { + ListOperand *outList = const_cast(static_cast(defOpnd)); + auto *a64CGFunc = static_cast(cgFunc); + auto *srcOpndsNew = a64CGFunc->CreateListOpnd(*a64CGFunc->GetFuncScopeAllocator()); + RegOperand *phyOpnd; + for (auto opnd : outList->GetOperands()) { + if (opnd->IsPhysicalRegister()) { + phyOpnd = opnd; + } else { + phyOpnd = GetReplaceOpnd(*insn, *opnd, useSpillIdx, usedRegMask, true); + } + srcOpndsNew->PushOpnd(*phyOpnd); + } + insn->SetOperand(kAsmOutputListOpnd, *srcOpndsNew); + continue; + } + } + const Operand *opnd = fInfo->GetDefOperandsElem(i); + RegOperand *phyOpnd = nullptr; + if (insn->IsSpecialIntrinsic()) { + phyOpnd = GetReplaceOpnd(*insn, *opnd, useSpillIdx, usedRegMask, true); + } else { + phyOpnd = GetReplaceOpnd(*insn, *opnd, defSpillIdx, usedRegMask, true); + } + if (phyOpnd != nullptr) { + insn->SetOperand(fInfo->GetDefIdxElem(i), *phyOpnd); + } + } + for (size_t i = 0; i < fInfo->GetUseOperandsSize(); ++i) { + if (insn->GetMachineOpcode() == MOP_asm) { + const Operand *useOpnd = fInfo->GetUseOperandsElem(i); + if (useOpnd->IsList()) { + ListOperand *inList = const_cast(static_cast(useOpnd)); + auto *a64CGFunc = static_cast(cgFunc); + auto *srcOpndsNew = a64CGFunc->CreateListOpnd(*a64CGFunc->GetFuncScopeAllocator()); + for (auto opnd : inList->GetOperands()) { + if ((static_cast(opnd))->GetRegisterNumber() < kAllRegNum) { + srcOpndsNew->PushOpnd(*opnd); + } else { + RegOperand *phyOpnd = GetReplaceOpnd(*insn, *opnd, useSpillIdx, usedRegMask, false); + srcOpndsNew->PushOpnd(*phyOpnd); + } + } + insn->SetOperand(kAsmInputListOpnd, *srcOpndsNew); + continue; + } + } + const Operand *opnd = fInfo->GetUseOperandsElem(i); + RegOperand *phyOpnd = GetReplaceOpnd(*insn, *opnd, useSpillIdx, usedRegMask, false); + if (phyOpnd != nullptr) { + insn->SetOperand(fInfo->GetUseIdxElem(i), *phyOpnd); + } + } + if (insn->GetMachineOpcode() == MOP_wmovrr || insn->GetMachineOpcode() == MOP_xmovrr) { + auto ®1 = static_cast(insn->GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn->GetOperand(kInsnSecondOpnd)); + /* remove mov x0,x0 when it cast i32 to i64 */ + if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { + bb->RemoveInsn(*insn); + } + } + } /* insn */ + } /* BB */ +} + +void GraphColorRegAllocator::MarkCalleeSaveRegs() +{ + for (auto regNO : intCalleeUsed) { + static_cast(cgFunc)->AddtoCalleeSaved(static_cast(regNO)); + } + for (auto regNO : fpCalleeUsed) { + static_cast(cgFunc)->AddtoCalleeSaved(static_cast(regNO)); + } +} + +bool GraphColorRegAllocator::AllocateRegisters() +{ +#ifdef RANDOM_PRIORITY + /* Change this seed for different random numbers */ + srand(0); +#endif /* RANDOM_PRIORITY */ + auto *a64CGFunc = static_cast(cgFunc); + + if (GCRA_DUMP && doMultiPass) { + LogInfo::MapleLogger() << "\n round start: \n"; + cgFunc->DumpCGIR(); + } + /* + * we store both FP/LR if using FP or if not using FP, but func has a call + * Using FP, record it for saving + */ + a64CGFunc->AddtoCalleeSaved(RFP); + a64CGFunc->AddtoCalleeSaved(RLR); + a64CGFunc->NoteFPLRAddedToCalleeSavedList(); + +#if DEBUG + int32 cnt = 0; + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + ++cnt; + } + } + DEBUG_ASSERT(cnt <= cgFunc->GetTotalNumberOfInstructions(), "Incorrect insn count"); +#endif + cgFunc->SetIsAfterRegAlloc(); + /* EBO propgation extent the live range and might need to be turned off. */ + Bfs localBfs(*cgFunc, *memPool); + bfs = &localBfs; + bfs->ComputeBlockOrder(); + + InitCCReg(); + + ComputeLiveRanges(); + + InitFreeRegPool(); + + BuildInterferenceGraph(); + + Separate(); + + SplitAndColor(); + +#ifdef USE_LRA + if (doLRA) { + LocalRegisterAllocator(true); + } +#endif /* USE_LRA */ + + FinalizeRegisters(); + + MarkCalleeSaveRegs(); + + if (!seenFP) { + cgFunc->UnsetSeenFP(); + } + if (GCRA_DUMP) { + cgFunc->DumpCGIR(); + } + + bfs = nullptr; /* bfs is not utilized outside the function. */ + + if (doMultiPass && hasSpill) { + return false; + } else { + return true; + } +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_dce.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_dce.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c1ce3bd1f16abd61f2278d1adc515381985c3a01 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_dce.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_dce.h" +#include "aarch64_operand.h" +namespace maplebe { +bool AArch64Dce::RemoveUnuseDef(VRegVersion &defVersion) +{ + /* delete defs which have no uses */ + if (defVersion.GetAllUseInsns().empty()) { + DUInsnInfo *defInsnInfo = defVersion.GetDefInsnInfo(); + if (defInsnInfo == nullptr) { + return false; + } + CHECK_FATAL(defInsnInfo->GetInsn() != nullptr, "Get def insn failed"); + Insn *defInsn = defInsnInfo->GetInsn(); + /* have not support asm/neon opt yet */ + if (defInsn->GetMachineOpcode() == MOP_asm || defInsn->IsVectorOp() || defInsn->IsAtomic()) { + return false; + } + std::set defRegs = defInsn->GetDefRegs(); + if (defRegs.size() != 1) { + return false; + } + uint32 bothDUIdx = defInsn->GetBothDefUseOpnd(); + if (!(bothDUIdx != kInsnMaxOpnd && defInsnInfo->GetOperands().count(bothDUIdx))) { + defInsn->GetBB()->RemoveInsn(*defInsn); + if (defInsn->IsPhi()) { + defInsn->GetBB()->RemovePhiInsn(defVersion.GetOriginalRegNO()); + } + defVersion.MarkDeleted(); + uint32 opndNum = defInsn->GetOperandSize(); + for (uint32 i = opndNum; i > 0; --i) { + Operand &opnd = defInsn->GetOperand(i - 1); + A64DeleteRegUseVisitor deleteUseRegVisitor(*GetSSAInfo(), defInsn->GetId()); + opnd.Accept(deleteUseRegVisitor); + } + return true; + } + } + return false; +} + +void A64DeleteRegUseVisitor::Visit(RegOperand *v) +{ + if (v->IsSSAForm()) { + VRegVersion *regVersion = GetSSAInfo()->FindSSAVersion(v->GetRegisterNumber()); + MapleUnorderedMap &useInfos = regVersion->GetAllUseInsns(); + auto it = useInfos.find(deleteInsnId); + if (it != useInfos.end()) { + useInfos.erase(it); + } + } +} +void A64DeleteRegUseVisitor::Visit(ListOperand *v) +{ + for (auto *regOpnd : v->GetOperands()) { + Visit(regOpnd); + } +} +void A64DeleteRegUseVisitor::Visit(MemOperand *a64MemOpnd) +{ + RegOperand *baseRegOpnd = a64MemOpnd->GetBaseRegister(); + RegOperand *indexRegOpnd = a64MemOpnd->GetIndexRegister(); + if (baseRegOpnd != nullptr && baseRegOpnd->IsSSAForm()) { + Visit(baseRegOpnd); + } + if (indexRegOpnd != nullptr && indexRegOpnd->IsSSAForm()) { + Visit(indexRegOpnd); + } +} + +void A64DeleteRegUseVisitor::Visit(PhiOperand *v) +{ + for (auto phiOpndIt : v->GetOperands()) { + Visit(phiOpndIt.second); + } +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_dependence.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_dependence.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5c7d8536f283cf464e12d07effc952517beaf242 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_dependence.cpp @@ -0,0 +1,1203 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_dependence.h" +#include "aarch64_cg.h" +#include "aarch64_operand.h" +#include "pressure.h" + +/* For building dependence graph, The entry is AArch64DepAnalysis::Run. */ +namespace maplebe { +/* constructor */ +AArch64DepAnalysis::AArch64DepAnalysis(CGFunc &func, MemPool &mp, MAD &mad, bool beforeRA) + : DepAnalysis(func, mp, mad, beforeRA), + stackUses(alloc.Adapter()), + stackDefs(alloc.Adapter()), + heapUses(alloc.Adapter()), + heapDefs(alloc.Adapter()), + mayThrows(alloc.Adapter()), + ambiInsns(alloc.Adapter()), + ehInRegs(alloc.Adapter()) +{ + uint32 maxRegNum; + if (beforeRA) { + maxRegNum = cgFunc.GetMaxVReg(); + } else { + maxRegNum = kAllRegNum; + } + regDefs = memPool.NewArray(maxRegNum); + regUses = memPool.NewArray(maxRegNum); +} + +/* print dep node information */ +void AArch64DepAnalysis::DumpDepNode(DepNode &node) const +{ + node.GetInsn()->Dump(); + uint32 num = node.GetUnitNum(); + LogInfo::MapleLogger() << "unit num : " << num << ", "; + for (uint32 i = 0; i < num; ++i) { + const Unit *unit = node.GetUnitByIndex(i); + if (unit != nullptr) { + PRINT_VAL(unit->GetName()); + } else { + PRINT_VAL("none"); + } + } + LogInfo::MapleLogger() << '\n'; + node.DumpSchedInfo(); + if (beforeRA) { + node.DumpRegPressure(); + } +} + +/* print dep link information */ +void AArch64DepAnalysis::DumpDepLink(DepLink &link, const DepNode *node) const +{ + PRINT_VAL(GetDepTypeName(link.GetDepType())); + PRINT_STR_VAL("Latency: ", link.GetLatency()); + if (node != nullptr) { + node->GetInsn()->Dump(); + return; + } + LogInfo::MapleLogger() << "from : "; + link.GetFrom().GetInsn()->Dump(); + LogInfo::MapleLogger() << "to : "; + link.GetTo().GetInsn()->Dump(); +} + +/* Append use register to the list. */ +void AArch64DepAnalysis::AppendRegUseList(Insn &insn, regno_t regNO) +{ + RegList *regList = memPool.New(); + regList->insn = &insn; + regList->next = nullptr; + if (regUses[regNO] == nullptr) { + regUses[regNO] = regList; + if (beforeRA) { + Insn *defInsn = regDefs[regNO]; + if (defInsn == nullptr) { + return; + } + DepNode *defNode = defInsn->GetDepNode(); + defNode->SetRegDefs(regNO, regList); + } + return; + } + RegList *lastRegList = regUses[regNO]; + while (lastRegList->next != nullptr) { + lastRegList = lastRegList->next; + } + lastRegList->next = regList; +} + +/* + * Add dependence edge. + * Two dependence node has a unique edge. + * True dependence overwirtes other dependences. + */ +void AArch64DepAnalysis::AddDependence(DepNode &fromNode, DepNode &toNode, DepType depType) +{ + /* Can not build a self loop dependence. */ + if (&fromNode == &toNode) { + return; + } + /* Check if exist edge. */ + if (!fromNode.GetSuccs().empty()) { + DepLink *depLink = fromNode.GetSuccs().back(); + if (&(depLink->GetTo()) == &toNode) { + if (depLink->GetDepType() != kDependenceTypeTrue) { + if (depType == kDependenceTypeTrue) { + /* Has exist edge, replace it. */ + depLink->SetDepType(kDependenceTypeTrue); + depLink->SetLatency(mad.GetLatency(*fromNode.GetInsn(), *toNode.GetInsn())); + } + } + return; + } + } + DepLink *depLink = memPool.New(fromNode, toNode, depType); + if (depType == kDependenceTypeTrue) { + depLink->SetLatency(mad.GetLatency(*fromNode.GetInsn(), *toNode.GetInsn())); + } + fromNode.AddSucc(*depLink); + toNode.AddPred(*depLink); +} + +void AArch64DepAnalysis::AddDependence4InsnInVectorByType(MapleVector &insns, Insn &insn, const DepType &type) +{ + for (auto anyInsn : insns) { + AddDependence(*anyInsn->GetDepNode(), *insn.GetDepNode(), type); + } +} + +void AArch64DepAnalysis::AddDependence4InsnInVectorByTypeAndCmp(MapleVector &insns, Insn &insn, + const DepType &type) +{ + for (auto anyInsn : insns) { + if (anyInsn != &insn) { + AddDependence(*anyInsn->GetDepNode(), *insn.GetDepNode(), type); + } + } +} + +/* Remove self dependence (self loop) in dependence graph. */ +void AArch64DepAnalysis::RemoveSelfDeps(Insn &insn) +{ + DepNode *node = insn.GetDepNode(); + DEBUG_ASSERT(node->GetSuccs().back()->GetTo().GetInsn() == &insn, "Is not a self dependence."); + DEBUG_ASSERT(node->GetPreds().back()->GetFrom().GetInsn() == &insn, "Is not a self dependence."); + node->RemoveSucc(); + node->RemovePred(); +} + +/* Build dependences of source register operand. */ +void AArch64DepAnalysis::BuildDepsUseReg(Insn &insn, regno_t regNO) +{ + DepNode *node = insn.GetDepNode(); + node->AddUseReg(regNO); + if (regDefs[regNO] != nullptr) { + /* Build true dependences. */ + AddDependence(*regDefs[regNO]->GetDepNode(), *insn.GetDepNode(), kDependenceTypeTrue); + } +} + +/* Build dependences of destination register operand. */ +void AArch64DepAnalysis::BuildDepsDefReg(Insn &insn, regno_t regNO) +{ + DepNode *node = insn.GetDepNode(); + node->AddDefReg(regNO); + /* Build anti dependences. */ + RegList *regList = regUses[regNO]; + while (regList != nullptr) { + CHECK_NULL_FATAL(regList->insn); + AddDependence(*regList->insn->GetDepNode(), *node, kDependenceTypeAnti); + regList = regList->next; + } + /* Build output depnedence. */ + if (regDefs[regNO] != nullptr) { + AddDependence(*regDefs[regNO]->GetDepNode(), *node, kDependenceTypeOutput); + } +} + +void AArch64DepAnalysis::ReplaceDepNodeWithNewInsn(DepNode &firstNode, DepNode &secondNode, Insn &newInsn, + bool isFromClinit) const +{ + if (isFromClinit) { + firstNode.AddClinitInsn(*firstNode.GetInsn()); + firstNode.AddClinitInsn(*secondNode.GetInsn()); + firstNode.SetCfiInsns(secondNode.GetCfiInsns()); + } else { + for (Insn *insn : secondNode.GetCfiInsns()) { + firstNode.AddCfiInsn(*insn); + } + for (Insn *insn : secondNode.GetComments()) { + firstNode.AddComments(*insn); + } + secondNode.ClearComments(); + } + firstNode.SetInsn(newInsn); + Reservation *rev = mad.FindReservation(newInsn); + CHECK_FATAL(rev != nullptr, "reservation is nullptr."); + firstNode.SetReservation(*rev); + firstNode.SetUnits(rev->GetUnit()); + firstNode.SetUnitNum(rev->GetUnitNum()); + newInsn.SetDepNode(firstNode); +} + +void AArch64DepAnalysis::ClearDepNodeInfo(DepNode &depNode) const +{ + Insn &insn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_pseudo_none); + insn.SetDepNode(depNode); + Reservation *seRev = mad.FindReservation(insn); + depNode.SetInsn(insn); + depNode.SetType(kNodeTypeEmpty); + depNode.SetReservation(*seRev); + depNode.SetUnitNum(0); + depNode.ClearCfiInsns(); + depNode.SetUnits(nullptr); +} + +/* Combine adrpldr&clinit_tail to clinit. */ +void AArch64DepAnalysis::CombineClinit(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator) +{ + DEBUG_ASSERT(firstNode.GetInsn()->GetMachineOpcode() == MOP_adrp_ldr, "first insn should be adrpldr"); + DEBUG_ASSERT(secondNode.GetInsn()->GetMachineOpcode() == MOP_clinit_tail, "second insn should be clinit_tail"); + DEBUG_ASSERT(firstNode.GetCfiInsns().empty(), "There should not be any comment/cfi instructions between clinit."); + DEBUG_ASSERT(secondNode.GetComments().empty(), "There should not be any comment/cfi instructions between clinit."); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_clinit, firstNode.GetInsn()->GetOperand(0), + firstNode.GetInsn()->GetOperand(1)); + newInsn.SetId(firstNode.GetInsn()->GetId()); + /* Replace first node with new insn. */ + ReplaceDepNodeWithNewInsn(firstNode, secondNode, newInsn, true); + /* Clear second node information. */ + ClearDepNodeInfo(secondNode); + CombineDependence(firstNode, secondNode, isAcrossSeparator); +} + +/* + * Combine memory access pair: + * 1.ldr to ldp. + * 2.str to stp. + */ +void AArch64DepAnalysis::CombineMemoryAccessPair(DepNode &firstNode, DepNode &secondNode, bool useFirstOffset) +{ + DEBUG_ASSERT(firstNode.GetInsn(), "the insn of first Node should not be nullptr"); + DEBUG_ASSERT(secondNode.GetInsn(), "the insn of second Node should not be nullptr"); + MOperator thisMop = firstNode.GetInsn()->GetMachineOpcode(); + MOperator mopPair = GetMopPair(thisMop); + DEBUG_ASSERT(mopPair != 0, "mopPair should not be zero"); + Operand *opnd0 = nullptr; + Operand *opnd1 = nullptr; + Operand *opnd2 = nullptr; + if (useFirstOffset) { + opnd0 = &(firstNode.GetInsn()->GetOperand(0)); + opnd1 = &(secondNode.GetInsn()->GetOperand(0)); + opnd2 = &(firstNode.GetInsn()->GetOperand(1)); + } else { + opnd0 = &(secondNode.GetInsn()->GetOperand(0)); + opnd1 = &(firstNode.GetInsn()->GetOperand(0)); + opnd2 = &(secondNode.GetInsn()->GetOperand(1)); + } + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mopPair, *opnd0, *opnd1, *opnd2); + newInsn.SetId(firstNode.GetInsn()->GetId()); + std::string newComment; + const MapleString &comment = firstNode.GetInsn()->GetComment(); + if (comment.c_str() != nullptr) { + newComment += comment.c_str(); + } + const MapleString &secondComment = secondNode.GetInsn()->GetComment(); + if (secondComment.c_str() != nullptr) { + newComment += " "; + newComment += secondComment.c_str(); + } + if ((newComment.c_str() != nullptr) && (strlen(newComment.c_str()) > 0)) { + newInsn.SetComment(newComment); + } + /* Replace first node with new insn. */ + ReplaceDepNodeWithNewInsn(firstNode, secondNode, newInsn, false); + /* Clear second node information. */ + ClearDepNodeInfo(secondNode); + CombineDependence(firstNode, secondNode, false, true); +} + +/* Combine two dependence nodes to one */ +void AArch64DepAnalysis::CombineDependence(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator, + bool isMemCombine) +{ + if (isAcrossSeparator) { + /* Clear all latency of the second node. */ + for (auto predLink : secondNode.GetPreds()) { + predLink->SetLatency(0); + } + for (auto succLink : secondNode.GetSuccs()) { + succLink->SetLatency(0); + } + return; + } + std::set uniqueNodes; + + for (auto predLink : firstNode.GetPreds()) { + if (predLink->GetDepType() == kDependenceTypeTrue) { + predLink->SetLatency(mad.GetLatency(*predLink->GetFrom().GetInsn(), *firstNode.GetInsn())); + } + (void)uniqueNodes.insert(&predLink->GetFrom()); + } + for (auto predLink : secondNode.GetPreds()) { + if (&predLink->GetFrom() != &firstNode) { + if (uniqueNodes.insert(&(predLink->GetFrom())).second) { + AddDependence(predLink->GetFrom(), firstNode, predLink->GetDepType()); + } + } + predLink->SetLatency(0); + } + uniqueNodes.clear(); + for (auto succLink : firstNode.GetSuccs()) { + if (succLink->GetDepType() == kDependenceTypeTrue) { + succLink->SetLatency(mad.GetLatency(*succLink->GetFrom().GetInsn(), *firstNode.GetInsn())); + } + (void)uniqueNodes.insert(&(succLink->GetTo())); + } + for (auto succLink : secondNode.GetSuccs()) { + if (uniqueNodes.insert(&(succLink->GetTo())).second) { + AddDependence(firstNode, succLink->GetTo(), succLink->GetDepType()); + if (isMemCombine) { + succLink->GetTo().IncreaseValidPredsSize(); + } + } + succLink->SetLatency(0); + } +} + +/* + * Build dependences of ambiguous instruction. + * ambiguous instruction : instructions that can not across may throw instructions. + */ +void AArch64DepAnalysis::BuildDepsAmbiInsn(Insn &insn) +{ + AddDependence4InsnInVectorByType(mayThrows, insn, kDependenceTypeThrow); + ambiInsns.emplace_back(&insn); +} + +/* Build dependences of may throw instructions. */ +void AArch64DepAnalysis::BuildDepsMayThrowInsn(Insn &insn) +{ + AddDependence4InsnInVectorByType(ambiInsns, insn, kDependenceTypeThrow); +} + +bool AArch64DepAnalysis::IsFrameReg(const RegOperand &opnd) const +{ + return (opnd.GetRegisterNumber() == RFP) || (opnd.GetRegisterNumber() == RSP); +} + +MemOperand *AArch64DepAnalysis::BuildNextMemOperandByByteSize(const MemOperand &aarchMemOpnd, uint32 byteSize) const +{ + MemOperand *nextMemOpnd = aarchMemOpnd.Clone(memPool); + Operand *nextOfstOpnd = nextMemOpnd->GetOffsetImmediate()->Clone(memPool); + OfstOperand *aarchNextOfstOpnd = static_cast(nextOfstOpnd); + CHECK_NULL_FATAL(aarchNextOfstOpnd); + int32 offsetVal = static_cast(aarchNextOfstOpnd->GetOffsetValue()); + aarchNextOfstOpnd->SetOffsetValue(offsetVal + byteSize); + nextMemOpnd->SetOffsetOperand(*aarchNextOfstOpnd); + return nextMemOpnd; +} + +/* Get the second memory access operand of stp/ldp instructions. */ +MemOperand *AArch64DepAnalysis::GetNextMemOperand(const Insn &insn, const MemOperand &aarchMemOpnd) const +{ + MemOperand *nextMemOpnd = nullptr; + switch (insn.GetMachineOpcode()) { + case MOP_wldp: + case MOP_sldp: + case MOP_xldpsw: + case MOP_wstp: + case MOP_sstp: { + nextMemOpnd = BuildNextMemOperandByByteSize(aarchMemOpnd, k4ByteSize); + break; + } + case MOP_xldp: + case MOP_dldp: + case MOP_xstp: + case MOP_dstp: { + nextMemOpnd = BuildNextMemOperandByByteSize(aarchMemOpnd, k8ByteSize); + break; + } + default: + break; + } + + return nextMemOpnd; +} + +/* + * Build dependences of symbol memory access. + * Memory access with symbol must be a heap memory access. + */ +void AArch64DepAnalysis::BuildDepsAccessStImmMem(Insn &insn, bool isDest) +{ + if (isDest) { + /* + * Heap memory + * Build anti dependences. + */ + AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); + /* Build output depnedence. */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); + heapDefs.emplace_back(&insn); + } else { + /* Heap memory */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeTrue); + heapUses.emplace_back(&insn); + } + if (memBarInsn != nullptr) { + AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } +} + +/* Build dependences of stack memory and heap memory uses. */ +void AArch64DepAnalysis::BuildDepsUseMem(Insn &insn, MemOperand &aarchMemOpnd) +{ + RegOperand *baseRegister = aarchMemOpnd.GetBaseRegister(); + MemOperand *nextMemOpnd = GetNextMemOperand(insn, aarchMemOpnd); + + aarchMemOpnd.SetAccessSize(insn.GetMemoryByteSize()); + /* Stack memory address */ + for (auto defInsn : stackDefs) { + if (defInsn->IsCall() || NeedBuildDepsMem(aarchMemOpnd, nextMemOpnd, *defInsn)) { + AddDependence(*defInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeTrue); + continue; + } + } + /* Heap memory */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeTrue); + if (((baseRegister != nullptr) && IsFrameReg(*baseRegister)) || aarchMemOpnd.IsStackMem()) { + stackUses.emplace_back(&insn); + } else { + heapUses.emplace_back(&insn); + } + if (memBarInsn != nullptr) { + AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } +} + +static bool NoAlias(const MemOperand &leftOpnd, const MemOperand &rightOpnd) +{ + if (leftOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && rightOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + leftOpnd.GetIndexOpt() == MemOperand::kIntact && rightOpnd.GetIndexOpt() == MemOperand::kIntact) { + if (leftOpnd.GetBaseRegister()->GetRegisterNumber() == RFP || + rightOpnd.GetBaseRegister()->GetRegisterNumber() == RFP) { + Operand *ofstOpnd = leftOpnd.GetOffsetOperand(); + Operand *rofstOpnd = rightOpnd.GetOffsetOperand(); + DEBUG_ASSERT(ofstOpnd != nullptr, "offset operand should not be null."); + DEBUG_ASSERT(rofstOpnd != nullptr, "offset operand should not be null."); + ImmOperand *ofst = static_cast(ofstOpnd); + ImmOperand *rofst = static_cast(rofstOpnd); + DEBUG_ASSERT(ofst != nullptr, "CG internal error, invalid type."); + DEBUG_ASSERT(rofst != nullptr, "CG internal error, invalid type."); + return (!ofst->ValueEquals(*rofst)); + } + } + return false; +} + +static bool NoOverlap(const MemOperand &leftOpnd, const MemOperand &rightOpnd) +{ + if (leftOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || rightOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || + leftOpnd.GetIndexOpt() != MemOperand::kIntact || rightOpnd.GetIndexOpt() != MemOperand::kIntact) { + return false; + } + if (leftOpnd.GetBaseRegister()->GetRegisterNumber() != RFP || + rightOpnd.GetBaseRegister()->GetRegisterNumber() != RFP) { + return false; + } + int64 ofset1 = leftOpnd.GetOffsetOperand()->GetValue(); + int64 ofset2 = rightOpnd.GetOffsetOperand()->GetValue(); + if (ofset1 < ofset2) { + return ((ofset1 + leftOpnd.GetAccessSize()) <= ofset2); + } else { + return ((ofset2 + rightOpnd.GetAccessSize()) <= ofset1); + } +} + +/* Return true if memInsn's memOpnd no alias with memOpnd and nextMemOpnd */ +bool AArch64DepAnalysis::NeedBuildDepsMem(const MemOperand &memOpnd, const MemOperand *nextMemOpnd, + const Insn &memInsn) const +{ + auto *memOpndOfmemInsn = static_cast(memInsn.GetMemOpnd()); + if (!NoAlias(memOpnd, *memOpndOfmemInsn) || + ((nextMemOpnd != nullptr) && !NoAlias(*nextMemOpnd, *memOpndOfmemInsn))) { + return true; + } + if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC && !memInsn.IsCall()) { + static_cast(memInsn.GetMemOpnd())->SetAccessSize(memInsn.GetMemoryByteSize()); + return (!NoOverlap(memOpnd, *memOpndOfmemInsn)); + } + MemOperand *nextMemOpndOfmemInsn = GetNextMemOperand(memInsn, *memOpndOfmemInsn); + if (nextMemOpndOfmemInsn != nullptr) { + if (!NoAlias(memOpnd, *nextMemOpndOfmemInsn) || + ((nextMemOpnd != nullptr) && !NoAlias(*nextMemOpnd, *nextMemOpndOfmemInsn))) { + return true; + } + } + return false; +} + +/* + * Build anti dependences between insn and other insn that use stack memroy. + * insn : the instruction that defines stack memory. + * memOpnd : insn's memOpnd + * nextMemOpnd : some memory pair operator instruction (like ldp/stp) defines two memory. + */ +void AArch64DepAnalysis::BuildAntiDepsDefStackMem(Insn &insn, MemOperand &memOpnd, const MemOperand *nextMemOpnd) +{ + memOpnd.SetAccessSize(insn.GetMemoryByteSize()); + for (auto *useInsn : stackUses) { + if (NeedBuildDepsMem(memOpnd, nextMemOpnd, *useInsn)) { + AddDependence(*useInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeAnti); + } + } +} + +/* + * Build output dependences between insn with other insn that define stack memroy. + * insn : the instruction that defines stack memory. + * memOpnd : insn's memOpnd + * nextMemOpnd : some memory pair operator instruction (like ldp/stp) defines two memory. + */ +void AArch64DepAnalysis::BuildOutputDepsDefStackMem(Insn &insn, MemOperand &memOpnd, const MemOperand *nextMemOpnd) +{ + memOpnd.SetAccessSize(insn.GetMemoryByteSize()); + for (auto defInsn : stackDefs) { + if (defInsn->IsCall() || NeedBuildDepsMem(memOpnd, nextMemOpnd, *defInsn)) { + AddDependence(*defInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeOutput); + } + } +} + +/* Build dependences of stack memory and heap memory definitions. */ +void AArch64DepAnalysis::BuildDepsDefMem(Insn &insn, MemOperand &aarchMemOpnd) +{ + RegOperand *baseRegister = aarchMemOpnd.GetBaseRegister(); + MemOperand *nextMemOpnd = GetNextMemOperand(insn, aarchMemOpnd); + + /* Build anti dependences. */ + BuildAntiDepsDefStackMem(insn, aarchMemOpnd, nextMemOpnd); + /* Build output depnedence. */ + BuildOutputDepsDefStackMem(insn, aarchMemOpnd, nextMemOpnd); + if (lastCallInsn != nullptr) { + /* Build a dependence between stack passed arguments and call. */ + DEBUG_ASSERT(baseRegister != nullptr, "baseRegister shouldn't be null here"); + if (baseRegister->GetRegisterNumber() == RSP) { + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeControl); + } + } + + /* Heap memory + * Build anti dependences. + */ + AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); + /* Build output depnedence. */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); + + if (((baseRegister != nullptr) && IsFrameReg(*baseRegister)) || aarchMemOpnd.IsStackMem()) { + stackDefs.emplace_back(&insn); + } else { + heapDefs.emplace_back(&insn); + } + if (memBarInsn != nullptr) { + AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } + /* Memory definition can not across may-throw insns. */ + AddDependence4InsnInVectorByType(mayThrows, insn, kDependenceTypeThrow); +} + +/* Build dependences of memory barrior instructions. */ +void AArch64DepAnalysis::BuildDepsMemBar(Insn &insn) +{ + AddDependence4InsnInVectorByTypeAndCmp(stackUses, insn, kDependenceTypeMembar); + AddDependence4InsnInVectorByTypeAndCmp(heapUses, insn, kDependenceTypeMembar); + AddDependence4InsnInVectorByTypeAndCmp(stackDefs, insn, kDependenceTypeMembar); + AddDependence4InsnInVectorByTypeAndCmp(heapDefs, insn, kDependenceTypeMembar); + memBarInsn = &insn; +} + +/* A pseudo separator node depends all the other nodes. */ +void AArch64DepAnalysis::BuildDepsSeparator(DepNode &newSepNode, MapleVector &nodes) +{ + uint32 nextSepIndex = (separatorIndex + kMaxDependenceNum) < nodes.size() ? (separatorIndex + kMaxDependenceNum) + : static_cast(nodes.size() - 1); + newSepNode.ReservePreds(nextSepIndex - separatorIndex); + newSepNode.ReserveSuccs(nextSepIndex - separatorIndex); + for (uint32 i = separatorIndex; i < nextSepIndex; ++i) { + AddDependence(*nodes[i], newSepNode, kDependenceTypeSeparator); + } +} + +/* Build control dependence for branch/ret instructions. */ +void AArch64DepAnalysis::BuildDepsControlAll(DepNode &depNode, const MapleVector &nodes) +{ + for (uint32 i = separatorIndex; i < depNode.GetIndex(); ++i) { + AddDependence(*nodes[i], depNode, kDependenceTypeControl); + } +} + +/* + * Build dependences of call instructions. + * Caller-saved physical registers will defined by a call instruction. + * Also a conditional register may modified by a call. + */ +void AArch64DepAnalysis::BuildCallerSavedDeps(Insn &insn) +{ + /* Build anti dependence and output dependence. */ + for (uint32 i = R0; i <= R7; ++i) { + BuildDepsDefReg(insn, i); + } + for (uint32 i = V0; i <= V7; ++i) { + BuildDepsDefReg(insn, i); + } + if (!beforeRA) { + for (uint32 i = R8; i <= R18; ++i) { + BuildDepsDefReg(insn, i); + } + for (uint32 i = RLR; i <= RSP; ++i) { + BuildDepsUseReg(insn, i); + } + for (uint32 i = V16; i <= V31; ++i) { + BuildDepsDefReg(insn, i); + } + } + /* For condition operand, such as NE, EQ, and so on. */ + if (cgFunc.GetRflag() != nullptr) { + BuildDepsDefReg(insn, kRFLAG); + } +} + +/* + * Build dependence between control register and last call instruction. + * insn : instruction that with control register operand. + * isDest : if the control register operand is a destination operand. + */ +void AArch64DepAnalysis::BuildDepsBetweenControlRegAndCall(Insn &insn, bool isDest) +{ + if (lastCallInsn == nullptr) { + return; + } + if (isDest) { + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeOutput); + return; + } + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeAnti); +} + +/* + * Build dependence between stack-define-instruction that deal with call-insn's args and a call-instruction. + * insn : a call instruction (call/tail-call) + */ +void AArch64DepAnalysis::BuildStackPassArgsDeps(Insn &insn) +{ + for (auto stackDefInsn : stackDefs) { + if (stackDefInsn->IsCall()) { + continue; + } + Operand *opnd = stackDefInsn->GetMemOpnd(); + DEBUG_ASSERT(opnd->IsMemoryAccessOperand(), "make sure opnd is memOpnd"); + MemOperand *memOpnd = static_cast(opnd); + RegOperand *baseReg = memOpnd->GetBaseRegister(); + if ((baseReg != nullptr) && (baseReg->GetRegisterNumber() == RSP)) { + AddDependence(*stackDefInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeControl); + } + } +} + +/* Some insns may dirty all stack memory, such as "bl MCC_InitializeLocalStackRef". */ +void AArch64DepAnalysis::BuildDepsDirtyStack(Insn &insn) +{ + /* Build anti dependences. */ + AddDependence4InsnInVectorByType(stackUses, insn, kDependenceTypeAnti); + /* Build output depnedence. */ + AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeOutput); + stackDefs.emplace_back(&insn); +} + +/* Some call insns may use all stack memory, such as "bl MCC_CleanupLocalStackRef_NaiveRCFast". */ +void AArch64DepAnalysis::BuildDepsUseStack(Insn &insn) +{ + /* Build true dependences. */ + AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeTrue); +} + +/* Some insns may dirty all heap memory, such as a call insn. */ +void AArch64DepAnalysis::BuildDepsDirtyHeap(Insn &insn) +{ + /* Build anti dependences. */ + AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); + /* Build output depnedence. */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); + if (memBarInsn != nullptr) { + AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } + heapDefs.emplace_back(&insn); +} + +/* Build a pseudo node to seperate dependence graph. */ +DepNode *AArch64DepAnalysis::BuildSeparatorNode() +{ + Insn &pseudoSepInsn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_pseudo_dependence_seperator); + DepNode *separatorNode = memPool.New(pseudoSepInsn, alloc); + separatorNode->SetType(kNodeTypeSeparator); + pseudoSepInsn.SetDepNode(*separatorNode); + if (beforeRA) { + RegPressure *regPressure = memPool.New(alloc); + separatorNode->SetRegPressure(*regPressure); + separatorNode->InitPressure(); + } + return separatorNode; +} + +/* Init depAnalysis data struction */ +void AArch64DepAnalysis::Init(BB &bb, MapleVector &nodes) +{ + curBB = &bb; + ClearAllDepData(); + lastComments.clear(); + /* Analysis live-in registers in catch BB. */ + AnalysisAmbiInsns(bb); + /* Clear all dependence nodes and push the first separator node. */ + nodes.clear(); + DepNode *pseudoSepNode = BuildSeparatorNode(); + nodes.emplace_back(pseudoSepNode); + separatorIndex = 0; + + if (beforeRA) { + /* assump first pseudo_dependence_seperator insn of current bb define live-in's registers */ + Insn *pseudoSepInsn = pseudoSepNode->GetInsn(); + for (auto ®NO : bb.GetLiveInRegNO()) { + regDefs[regNO] = pseudoSepInsn; + pseudoSepNode->AddDefReg(regNO); + pseudoSepNode->SetRegDefs(pseudoSepNode->GetDefRegnos().size(), nullptr); + } + } +} + +/* When a separator build, it is the same as a new basic block. */ +void AArch64DepAnalysis::ClearAllDepData() +{ + uint32 maxRegNum; + if (beforeRA) { + maxRegNum = cgFunc.GetMaxVReg(); + } else { + maxRegNum = kAllRegNum; + } + errno_t ret = memset_s(regDefs, sizeof(Insn *) * maxRegNum, 0, sizeof(Insn *) * maxRegNum); + CHECK_FATAL(ret == EOK, "call memset_s failed in Unit"); + ret = memset_s(regUses, sizeof(RegList *) * maxRegNum, 0, sizeof(RegList *) * maxRegNum); + CHECK_FATAL(ret == EOK, "call memset_s failed in Unit"); + memBarInsn = nullptr; + lastCallInsn = nullptr; + lastFrameDef = nullptr; + + stackUses.clear(); + stackDefs.clear(); + heapUses.clear(); + heapDefs.clear(); + mayThrows.clear(); + ambiInsns.clear(); +} + +/* Analysis live-in registers in catch bb and cleanup bb. */ +void AArch64DepAnalysis::AnalysisAmbiInsns(BB &bb) +{ + hasAmbiRegs = false; + if (bb.GetEhSuccs().empty()) { + return; + } + + /* Union all catch bb */ + for (auto succBB : bb.GetEhSuccs()) { + const MapleSet &liveInRegSet = succBB->GetLiveInRegNO(); + set_union(liveInRegSet.begin(), liveInRegSet.end(), ehInRegs.begin(), ehInRegs.end(), + inserter(ehInRegs, ehInRegs.begin())); + } + + /* Union cleanup entry bb. */ + const MapleSet ®NOSet = cgFunc.GetCleanupEntryBB()->GetLiveInRegNO(); + std::set_union(regNOSet.begin(), regNOSet.end(), ehInRegs.begin(), ehInRegs.end(), + inserter(ehInRegs, ehInRegs.begin())); + + /* Subtract R0 and R1, that is defined by eh runtime. */ + ehInRegs.erase(R0); + ehInRegs.erase(R1); + if (ehInRegs.empty()) { + return; + } + hasAmbiRegs = true; +} + +/* Check if regNO is in ehInRegs. */ +bool AArch64DepAnalysis::IfInAmbiRegs(regno_t regNO) const +{ + if (!hasAmbiRegs) { + return false; + } + if (ehInRegs.find(regNO) != ehInRegs.end()) { + return true; + } + return false; +} + +static bool IsYieldPoint(Insn &insn) +{ + /* + * It is a yieldpoint if loading from a dedicated + * register holding polling page address: + * ldr wzr, [RYP] + */ + if (insn.IsLoad() && !insn.IsLoadLabel()) { + auto mem = static_cast(insn.GetMemOpnd()); + return (mem != nullptr && mem->GetBaseRegister() != nullptr && + mem->GetBaseRegister()->GetRegisterNumber() == RYP); + } + return false; +} + +/* + * Build dependences of memory operand. + * insn : a instruction with the memory access operand. + * opnd : the memory access operand. + * regProp : operand property of the memory access operandess operand. + */ +void AArch64DepAnalysis::BuildMemOpndDependency(Insn &insn, Operand &opnd, const OpndDesc ®Prop) +{ + DEBUG_ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be memory Operand"); + MemOperand *memOpnd = static_cast(&opnd); + RegOperand *baseRegister = memOpnd->GetBaseRegister(); + if (baseRegister != nullptr) { + regno_t regNO = baseRegister->GetRegisterNumber(); + BuildDepsUseReg(insn, regNO); + if ((memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) && + (memOpnd->IsPostIndexed() || memOpnd->IsPreIndexed())) { + /* Base operand has changed. */ + BuildDepsDefReg(insn, regNO); + } + } + RegOperand *indexRegister = memOpnd->GetIndexRegister(); + if (indexRegister != nullptr) { + regno_t regNO = indexRegister->GetRegisterNumber(); + BuildDepsUseReg(insn, regNO); + } + if (regProp.IsUse()) { + BuildDepsUseMem(insn, *memOpnd); + } else { + BuildDepsDefMem(insn, *memOpnd); + BuildDepsAmbiInsn(insn); + } + if (IsYieldPoint(insn)) { + BuildDepsMemBar(insn); + BuildDepsDefReg(insn, kRFLAG); + } +} + +/* Build Dependency for each Operand of insn */ +void AArch64DepAnalysis::BuildOpndDependency(Insn &insn) +{ + const InsnDesc *md = insn.GetDesc(); + MOperator mOp = insn.GetMachineOpcode(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + const OpndDesc *regProp = md->opndMD[i]; + if (opnd.IsMemoryAccessOperand()) { + BuildMemOpndDependency(insn, opnd, *regProp); + } else if (opnd.IsStImmediate()) { + if (mOp != MOP_xadrpl12) { + BuildDepsAccessStImmMem(insn, false); + } + } else if (opnd.IsRegister()) { + RegOperand ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + + if (regProp->IsUse()) { + BuildDepsUseReg(insn, regNO); + } + + if (regProp->IsDef()) { + BuildDepsDefReg(insn, regNO); + } + } else if (opnd.IsConditionCode()) { + /* For condition operand, such as NE, EQ, and so on. */ + if (regProp->IsUse()) { + BuildDepsUseReg(insn, kRFLAG); + BuildDepsBetweenControlRegAndCall(insn, false); + } + + if (regProp->IsDef()) { + BuildDepsDefReg(insn, kRFLAG); + BuildDepsBetweenControlRegAndCall(insn, true); + } + } else if (opnd.IsList()) { + ListOperand &listOpnd = static_cast(opnd); + /* Build true dependences */ + for (auto lst : listOpnd.GetOperands()) { + regno_t regNO = lst->GetRegisterNumber(); + BuildDepsUseReg(insn, regNO); + } + } + } +} + +static bool IsLazyLoad(MOperator op) +{ + return (op == MOP_lazy_ldr) || (op == MOP_lazy_ldr_static) || (op == MOP_lazy_tail); +} + +/* + * Build dependences in some special issue (stack/heap/throw/clinit/lazy binding/control flow). + * insn : a instruction. + * depNode : insn's depNode. + * nodes : the dependence nodes inclue insn's depNode. + */ +void AArch64DepAnalysis::BuildSpecialInsnDependency(Insn &insn, DepNode &depNode, const MapleVector &nodes) +{ + const InsnDesc *md = insn.GetDesc(); + MOperator mOp = insn.GetMachineOpcode(); + if (insn.IsCall() || insn.IsTailCall()) { + /* Caller saved registers. */ + BuildCallerSavedDeps(insn); + BuildStackPassArgsDeps(insn); + + if (mOp == MOP_xbl) { + FuncNameOperand &target = static_cast(insn.GetOperand(0)); + if ((target.GetName() == "MCC_InitializeLocalStackRef") || (target.GetName() == "MCC_ClearLocalStackRef") || + (target.GetName() == "MCC_DecRefResetPair")) { + /* Write stack memory. */ + BuildDepsDirtyStack(insn); + } else if ((target.GetName() == "MCC_CleanupLocalStackRef_NaiveRCFast") || + (target.GetName() == "MCC_CleanupLocalStackRefSkip_NaiveRCFast") || + (target.GetName() == "MCC_CleanupLocalStackRefSkip")) { + /* UseStackMemory. */ + BuildDepsUseStack(insn); + } else if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC) { + /* potential C aliasing. */ + BuildDepsDirtyStack(insn); + } + } + BuildDepsDirtyHeap(insn); + BuildDepsAmbiInsn(insn); + if (lastCallInsn != nullptr) { + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeControl); + } + lastCallInsn = &insn; + } else if (insn.IsClinit() || IsLazyLoad(insn.GetMachineOpcode()) || + insn.GetMachineOpcode() == MOP_arrayclass_cache_ldr) { + BuildDepsDirtyHeap(insn); + BuildDepsDefReg(insn, kRFLAG); + if (insn.GetMachineOpcode() != MOP_adrp_ldr) { + BuildDepsDefReg(insn, R16); + BuildDepsDefReg(insn, R17); + } + } else if ((mOp == MOP_xret) || md->IsBranch()) { + BuildDepsControlAll(depNode, nodes); + } else if (insn.IsMemAccessBar()) { + BuildDepsMemBar(insn); + } else if (insn.IsSpecialIntrinsic()) { + BuildDepsDirtyHeap(insn); + } +} + +/* + * If the instruction's number of current basic block more than kMaxDependenceNum, + * then insert some pseudo separator node to split baic block. + */ +void AArch64DepAnalysis::SeperateDependenceGraph(MapleVector &nodes, uint32 &nodeSum) +{ + if ((nodeSum > 0) && ((nodeSum % kMaxDependenceNum) == 0)) { + DEBUG_ASSERT(nodeSum == nodes.size(), "CG internal error, nodeSum should equal to nodes.size."); + /* Add a pseudo node to seperate dependence graph. */ + DepNode *separatorNode = BuildSeparatorNode(); + separatorNode->SetIndex(nodeSum); + nodes.emplace_back(separatorNode); + BuildDepsSeparator(*separatorNode, nodes); + + if (beforeRA) { + /* for all live-out register of current bb */ + for (auto ®NO : curBB->GetLiveOutRegNO()) { + if (regDefs[regNO] != nullptr) { + AppendRegUseList(*(separatorNode->GetInsn()), regNO); + separatorNode->AddUseReg(regNO); + separatorNode->SetRegUses(*regUses[regNO]); + } + } + } + ClearAllDepData(); + separatorIndex = nodeSum++; + } +} + +/* + * Generate a depNode, + * insn : create depNode for the instruction. + * nodes : a vector to store depNode. + * nodeSum : the new depNode's index. + * comments : those comment insn between last no-comment's insn and insn. + */ +DepNode *AArch64DepAnalysis::GenerateDepNode(Insn &insn, MapleVector &nodes, int32 nodeSum, + const MapleVector &comments) +{ + DepNode *depNode = nullptr; + Reservation *rev = mad.FindReservation(insn); + DEBUG_ASSERT(rev != nullptr, "rev is nullptr"); + depNode = memPool.New(insn, alloc, rev->GetUnit(), rev->GetUnitNum(), *rev); + if (beforeRA) { + RegPressure *regPressure = memPool.New(alloc); + depNode->SetRegPressure(*regPressure); + depNode->InitPressure(); + } + depNode->SetIndex(nodeSum); + nodes.emplace_back(depNode); + insn.SetDepNode(*depNode); + + constexpr size_t vectorSize = 5; + depNode->ReservePreds(vectorSize); + depNode->ReserveSuccs(vectorSize); + + if (!comments.empty()) { + depNode->SetComments(comments); + } + return depNode; +} + +void AArch64DepAnalysis::BuildAmbiInsnDependency(Insn &insn) +{ + const auto &defRegnos = insn.GetDepNode()->GetDefRegnos(); + for (const auto ®NO : defRegnos) { + if (IfInAmbiRegs(regNO)) { + BuildDepsAmbiInsn(insn); + break; + } + } +} + +void AArch64DepAnalysis::BuildMayThrowInsnDependency(Insn &insn) +{ + /* build dependency for maythrow insn; */ + if (insn.MayThrow()) { + BuildDepsMayThrowInsn(insn); + if (lastFrameDef != nullptr) { + AddDependence(*lastFrameDef->GetDepNode(), *insn.GetDepNode(), kDependenceTypeThrow); + } + } +} + +void AArch64DepAnalysis::UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, MapleVector &nodes) +{ + const auto &useRegnos = depNode.GetUseRegnos(); + if (beforeRA) { + depNode.InitRegUsesSize(useRegnos.size()); + } + for (auto regNO : useRegnos) { + AppendRegUseList(insn, regNO); + if (beforeRA) { + depNode.SetRegUses(*regUses[regNO]); + if (regDefs[regNO] == nullptr) { + regDefs[regNO] = nodes[separatorIndex]->GetInsn(); + nodes[separatorIndex]->AddDefReg(regNO); + nodes[separatorIndex]->SetRegDefs(nodes[separatorIndex]->GetDefRegnos().size(), regUses[regNO]); + } + } + } + + const auto &defRegnos = depNode.GetDefRegnos(); + size_t i = 0; + if (beforeRA) { + depNode.InitRegDefsSize(defRegnos.size()); + } + for (const auto regNO : defRegnos) { + regDefs[regNO] = &insn; + regUses[regNO] = nullptr; + if (beforeRA) { + depNode.SetRegDefs(i, nullptr); + if (regNO >= R0 && regNO <= R3) { + depNode.SetHasPreg(true); + } else if (regNO == R8) { + depNode.SetHasNativeCallRegister(true); + } + } + ++i; + } +} + +/* Update stack and heap dependency */ +void AArch64DepAnalysis::UpdateStackAndHeapDependency(DepNode &depNode, Insn &insn, const Insn &locInsn) +{ + if (!insn.MayThrow()) { + return; + } + depNode.SetLocInsn(locInsn); + mayThrows.emplace_back(&insn); + AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeThrow); + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeThrow); +} + +/* Add a separatorNode to the end of a nodes + * * before RA: add all live-out registers to this separatorNode'Uses + * */ +void AArch64DepAnalysis::AddEndSeparatorNode(MapleVector &nodes) +{ + DepNode *separatorNode = BuildSeparatorNode(); + nodes.emplace_back(separatorNode); + BuildDepsSeparator(*separatorNode, nodes); + + if (beforeRA) { + /* for all live-out register of current bb */ + for (auto ®NO : curBB->GetLiveOutRegNO()) { + if (regDefs[regNO] != nullptr) { + AppendRegUseList(*(separatorNode->GetInsn()), regNO); + separatorNode->AddUseReg(regNO); + separatorNode->SetRegUses(*regUses[regNO]); + } + } + } +} + +/* + * Build dependence graph. + * 1: Build dependence nodes. + * 2: Build edges between dependence nodes. Edges are: + * 2.1) True dependences + * 2.2) Anti dependences + * 2.3) Output dependences + * 2.4) Barrier dependences + */ +void AArch64DepAnalysis::Run(BB &bb, MapleVector &nodes) +{ + /* Initial internal datas. */ + Init(bb, nodes); + uint32 nodeSum = 1; + MapleVector comments(alloc.Adapter()); + const Insn *locInsn = bb.GetFirstLoc(); + FOR_BB_INSNS(insn, (&bb)) { + if (!insn->IsMachineInstruction()) { + if (insn->IsImmaterialInsn()) { + if (!insn->IsComment()) { + locInsn = insn; + } else { + comments.emplace_back(insn); + } + } else if (insn->IsCfiInsn()) { + if (!nodes.empty()) { + nodes.back()->AddCfiInsn(*insn); + } + } + continue; + } + /* Add a pseudo node to seperate dependence graph when appropriate */ + SeperateDependenceGraph(nodes, nodeSum); + /* generate a DepNode */ + DepNode *depNode = GenerateDepNode(*insn, nodes, nodeSum, comments); + ++nodeSum; + comments.clear(); + /* Build Dependency for maythrow insn; */ + BuildMayThrowInsnDependency(*insn); + /* Build Dependency for each Operand of insn */ + BuildOpndDependency(*insn); + /* Build Dependency for special insn */ + BuildSpecialInsnDependency(*insn, *depNode, nodes); + /* Build Dependency for AmbiInsn if needed */ + BuildAmbiInsnDependency(*insn); + /* Update stack and heap dependency */ + UpdateStackAndHeapDependency(*depNode, *insn, *locInsn); + if (insn->IsFrameDef()) { + lastFrameDef = insn; + } + /* Seperator exists. */ + AddDependence(*nodes[separatorIndex], *insn->GetDepNode(), kDependenceTypeSeparator); + /* Update register use and register def */ + UpdateRegUseAndDef(*insn, *depNode, nodes); + } + + AddEndSeparatorNode(nodes); + + if (!comments.empty()) { + lastComments = comments; + } + comments.clear(); +} + +/* return dependence type name */ +const std::string &AArch64DepAnalysis::GetDepTypeName(DepType depType) const +{ + DEBUG_ASSERT(depType <= kDependenceTypeNone, "array boundary check failed"); + return kDepTypeName[depType]; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_ebo.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_ebo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1c857787ad274b36762b6444183494b1dbf4fcf2 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_ebo.cpp @@ -0,0 +1,1557 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_ebo.h" +#include "aarch64_cg.h" +#include "mpl_logging.h" +#include "aarch64_utils.h" + +namespace maplebe { +using namespace maple; +#define EBO_DUMP CG_DEBUG_FUNC(*cgFunc) + +enum AArch64Ebo::ExtOpTable : uint8 { AND, SXTB, SXTH, SXTW, ZXTB, ZXTH, ZXTW, ExtTableSize }; + +namespace { + +using PairMOperator = MOperator[2]; + +constexpr uint8 insPairsNum = 5; + +PairMOperator extInsnPairTable[ExtTableSize][insPairsNum] = { + /* {origMop, newMop} */ + {{MOP_wldrb, MOP_wldrb}, + {MOP_wldrsh, MOP_wldrb}, + {MOP_wldrh, MOP_wldrb}, + {MOP_xldrsw, MOP_wldrb}, + {MOP_wldr, MOP_wldrb}}, /* AND */ + {{MOP_wldrb, MOP_wldrsb}, + {MOP_wldr, MOP_wldrsb}, + {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* SXTB */ + {{MOP_wldrh, MOP_wldrsh}, + {MOP_wldrb, MOP_wldrb}, + {MOP_wldrsb, MOP_wldrsb}, + {MOP_wldrsh, MOP_wldrsh}, + {MOP_undef, MOP_undef}}, /* SXTH */ + {{MOP_wldrh, MOP_wldrh}, + {MOP_wldrsh, MOP_wldrsh}, + {MOP_wldrb, MOP_wldrb}, + {MOP_wldrsb, MOP_wldrsb}, + {MOP_wldr, MOP_xldrsw}}, /* SXTW */ + {{MOP_wldrb, MOP_wldrb}, + {MOP_wldrsb, MOP_wldrb}, + {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* ZXTB */ + {{MOP_wldrh, MOP_wldrh}, + {MOP_wldrb, MOP_wldrb}, + {MOP_wldr, MOP_wldrh}, + {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* ZXTH */ + {{MOP_wldr, MOP_wldr}, + {MOP_wldrh, MOP_wldrh}, + {MOP_wldrb, MOP_wldrb}, + {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}} /* ZXTW */ +}; + +} // anonymous namespace + +MOperator AArch64Ebo::ExtLoadSwitchBitSize(MOperator lowMop) const +{ + switch (lowMop) { + case MOP_wldrsb: + return MOP_xldrsb; + case MOP_wldrsh: + return MOP_xldrsh; + default: + break; + } + return lowMop; +} + +bool AArch64Ebo::IsFmov(const Insn &insn) const +{ + return ((insn.GetMachineOpcode() >= MOP_xvmovsr) && (insn.GetMachineOpcode() <= MOP_xvmovrd)); +} + +bool AArch64Ebo::IsAdd(const Insn &insn) const +{ + return ((insn.GetMachineOpcode() >= MOP_xaddrrr) && (insn.GetMachineOpcode() <= MOP_ssub)); +} + +bool AArch64Ebo::IsInvalidReg(const RegOperand &opnd) const +{ + return (opnd.GetRegisterNumber() == AArch64reg::kRinvalid); +} + +bool AArch64Ebo::IsZeroRegister(const Operand &opnd) const +{ + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + +bool AArch64Ebo::IsConstantImmOrReg(const Operand &opnd) const +{ + if (opnd.IsConstImmediate()) { + return true; + } + return IsZeroRegister(opnd); +} + +bool AArch64Ebo::IsClinitCheck(const Insn &insn) const +{ + MOperator mOp = insn.GetMachineOpcode(); + return ((mOp == MOP_clinit) || (mOp == MOP_clinit_tail)); +} + +bool AArch64Ebo::IsDecoupleStaticOp(Insn &insn) const +{ + if (insn.GetMachineOpcode() == MOP_lazy_ldr_static) { + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + CHECK_FATAL(opnd1 != nullptr, "opnd1 is null!"); + auto *stImmOpnd = static_cast(opnd1); + return StringUtils::StartsWith(stImmOpnd->GetName(), namemangler::kDecoupleStaticValueStr); + } + return false; +} + +static bool IsYieldPoint(Insn &insn) +{ + /* + * It is a yieldpoint if loading from a dedicated + * register holding polling page address: + * ldr wzr, [RYP] + */ + if (insn.IsLoad() && !insn.IsLoadLabel()) { + auto mem = static_cast(insn.GetMemOpnd()); + return (mem != nullptr && mem->GetBaseRegister() != nullptr && + mem->GetBaseRegister()->GetRegisterNumber() == RYP); + } + return false; +} + +/* retrun true if insn is globalneeded */ +bool AArch64Ebo::IsGlobalNeeded(Insn &insn) const +{ + /* Calls may have side effects. */ + if (insn.IsCall()) { + return true; + } + + /* Intrinsic call should not be removed. */ + if (insn.IsSpecialIntrinsic()) { + return true; + } + + /* Clinit should not be removed. */ + if (IsClinitCheck(insn)) { + return true; + } + + /* Yieldpoints should not be removed by optimizer. */ + if (cgFunc->GetCG()->GenYieldPoint() && IsYieldPoint(insn)) { + return true; + } + + std::set defRegs = insn.GetDefRegs(); + for (auto defRegNo : defRegs) { + if (defRegNo == RZR || defRegNo == RSP || (defRegNo == RFP && CGOptions::UseFramePointer())) { + return true; + } + } + return false; +} + +/* in aarch64,resOp will not be def and use in the same time */ +bool AArch64Ebo::ResIsNotDefAndUse(Insn &insn) const +{ + (void)insn; + return true; +} + +/* Return true if opnd live out of bb. */ +bool AArch64Ebo::LiveOutOfBB(const Operand &opnd, const BB &bb) const +{ + CHECK_FATAL(opnd.IsRegister(), "expect register here."); + /* when optimize_level < 2, there is need to anlyze live range. */ + if (live == nullptr) { + return false; + } + bool isLiveOut = false; + if (bb.GetLiveOut()->TestBit(static_cast(&opnd)->GetRegisterNumber())) { + isLiveOut = true; + } + return isLiveOut; +} + +bool AArch64Ebo::IsLastAndBranch(BB &bb, Insn &insn) const +{ + return (bb.GetLastInsn() == &insn) && insn.IsBranch(); +} + +bool AArch64Ebo::IsSameRedefine(BB &bb, Insn &insn, OpndInfo &opndInfo) const +{ + MOperator mOp = insn.GetMachineOpcode(); + if (!(mOp == MOP_wmovri32 || mOp == MOP_xmovri64 || mOp == MOP_wsfmovri || mOp == MOP_xdfmovri)) { + return false; + } + OpndInfo *sameInfo = opndInfo.same; + if (sameInfo == nullptr || sameInfo->insn == nullptr || sameInfo->bb != &bb || + sameInfo->insn->GetMachineOpcode() != mOp) { + return false; + } + Insn *prevInsn = sameInfo->insn; + if (!prevInsn->GetOperand(kInsnSecondOpnd).IsImmediate()) { + return false; + } + auto &sameOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto &opnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (sameOpnd.GetValue() == opnd.GetValue()) { + sameInfo->refCount += opndInfo.refCount; + return true; + } + return false; +} + +const RegOperand &AArch64Ebo::GetRegOperand(const Operand &opnd) const +{ + CHECK_FATAL(opnd.IsRegister(), "aarch64 shoud not have regShiftOp! opnd is not register!"); + const auto &res = static_cast(opnd); + return res; +} + +/* Create infomation for local_opnd from its def insn current_insn. */ +OpndInfo *AArch64Ebo::OperandInfoDef(BB ¤tBB, Insn ¤tInsn, Operand &localOpnd) +{ + int32 hashVal = localOpnd.IsRegister() ? -1 : ComputeOpndHash(localOpnd); + OpndInfo *opndInfoPrev = GetOpndInfo(localOpnd, hashVal); + OpndInfo *opndInfo = GetNewOpndInfo(currentBB, ¤tInsn, localOpnd, hashVal); + if (localOpnd.IsMemoryAccessOperand()) { + MemOpndInfo *memInfo = static_cast(opndInfo); + MemOperand *mem = static_cast(&localOpnd); + Operand *base = mem->GetBaseRegister(); + Operand *offset = mem->GetOffset(); + if (base != nullptr && base->IsRegister()) { + memInfo->SetBaseInfo(*OperandInfoUse(currentBB, *base)); + } + if (offset != nullptr && offset->IsRegister()) { + memInfo->SetOffsetInfo(*OperandInfoUse(currentBB, *offset)); + } + } + opndInfo->same = opndInfoPrev; + if ((opndInfoPrev != nullptr)) { + opndInfoPrev->redefined = true; + if (opndInfoPrev->bb == ¤tBB) { + opndInfoPrev->redefinedInBB = true; + opndInfoPrev->redefinedInsn = ¤tInsn; + } + UpdateOpndInfo(localOpnd, *opndInfoPrev, opndInfo, hashVal); + } else { + SetOpndInfo(localOpnd, opndInfo, hashVal); + } + return opndInfo; +} + +void AArch64Ebo::DefineClinitSpecialRegisters(InsnInfo &insnInfo) +{ + Insn *insn = insnInfo.insn; + CHECK_FATAL(insn != nullptr, "nullptr of currInsnInfo"); + RegOperand &phyOpnd1 = a64CGFunc->GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, phyOpnd1); + opndInfo->insnInfo = &insnInfo; + + RegOperand &phyOpnd2 = a64CGFunc->GetOrCreatePhysicalRegisterOperand(R17, k64BitSize, kRegTyInt); + opndInfo = OperandInfoDef(*insn->GetBB(), *insn, phyOpnd2); + opndInfo->insnInfo = &insnInfo; +} + +void AArch64Ebo::BuildCallerSaveRegisters() +{ + callerSaveRegTable.clear(); + RegOperand &phyOpndR0 = a64CGFunc->GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + RegOperand &phyOpndV0 = a64CGFunc->GetOrCreatePhysicalRegisterOperand(V0, k64BitSize, kRegTyFloat); + callerSaveRegTable.emplace_back(&phyOpndR0); + callerSaveRegTable.emplace_back(&phyOpndV0); + for (uint32 i = R1; i <= R18; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyInt); + callerSaveRegTable.emplace_back(&phyOpnd); + } + for (uint32 i = V1; i <= V7; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); + callerSaveRegTable.emplace_back(&phyOpnd); + } + for (uint32 i = V16; i <= V31; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); + callerSaveRegTable.emplace_back(&phyOpnd); + } + CHECK_FATAL(callerSaveRegTable.size() < kMaxCallerSaveReg, + "number of elements in callerSaveRegTable must less then 45!"); +} + +void AArch64Ebo::DefineAsmRegisters(InsnInfo &insnInfo) +{ + Insn *insn = insnInfo.insn; + DEBUG_ASSERT(insn->GetMachineOpcode() == MOP_asm, "insn should be a call insn."); + ListOperand &outList = + const_cast(static_cast(insn->GetOperand(kAsmOutputListOpnd))); + for (auto opnd : outList.GetOperands()) { + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, *opnd); + opndInfo->insnInfo = &insnInfo; + } + ListOperand &clobberList = + const_cast(static_cast(insn->GetOperand(kAsmClobberListOpnd))); + for (auto opnd : clobberList.GetOperands()) { + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, *opnd); + opndInfo->insnInfo = &insnInfo; + } + ListOperand &inList = + const_cast(static_cast(insn->GetOperand(kAsmInputListOpnd))); + for (auto opnd : inList.GetOperands()) { + OperandInfoUse(*(insn->GetBB()), *opnd); + } +} + +void AArch64Ebo::DefineCallerSaveRegisters(InsnInfo &insnInfo) +{ + Insn *insn = insnInfo.insn; + if (insn->IsAsmInsn()) { + DefineAsmRegisters(insnInfo); + return; + } + DEBUG_ASSERT(insn->IsCall() || insn->IsTailCall(), "insn should be a call insn."); + if (CGOptions::DoIPARA()) { + auto *targetOpnd = insn->GetCallTargetOperand(); + CHECK_FATAL(targetOpnd != nullptr, "target is null in Insn::IsCallToFunctionThatNeverReturns"); + if (targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + DEBUG_ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + for (auto preg : func->GetReferedRegs()) { + if (AArch64Abi::IsCalleeSavedReg(static_cast(preg))) { + continue; + } + RegOperand *opnd = &a64CGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(preg), k64BitSize, + AArch64isa::IsFPSIMDRegister(static_cast(preg)) ? kRegTyFloat : kRegTyInt); + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, *opnd); + opndInfo->insnInfo = &insnInfo; + } + return; + } + } + } + for (auto opnd : callerSaveRegTable) { + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, *opnd); + opndInfo->insnInfo = &insnInfo; + } +} + +void AArch64Ebo::DefineReturnUseRegister(Insn &insn) +{ + if (insn.GetMachineOpcode() != MOP_xret) { + return; + } + /* Define scalar callee save register and FP, LR. */ + for (uint32 i = R19; i <= R30; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpnd); + } + + /* Define SP */ + RegOperand &phyOpndSP = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(RSP), k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpndSP); + + /* Define FP callee save registers. */ + for (uint32 i = V8; i <= V15; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); + OperandInfoUse(*insn.GetBB(), phyOpnd); + } +} + +void AArch64Ebo::DefineCallUseSpecialRegister(Insn &insn) +{ + if (insn.GetMachineOpcode() == MOP_asm) { + return; + } + AArch64reg fpRegNO = RFP; + if (!beforeRegAlloc && cgFunc->UseFP()) { + fpRegNO = R29; + } + /* Define FP, LR. */ + RegOperand &phyOpndFP = a64CGFunc->GetOrCreatePhysicalRegisterOperand(fpRegNO, k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpndFP); + RegOperand &phyOpndLR = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(RLR), k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpndLR); + + /* Define SP */ + RegOperand &phyOpndSP = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(RSP), k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpndSP); +} + +/* return true if op1 == op2 */ +bool AArch64Ebo::OperandEqSpecial(const Operand &op1, const Operand &op2) const +{ + switch (op1.GetKind()) { + case Operand::kOpdRegister: { + const RegOperand ®1 = static_cast(op1); + const RegOperand ®2 = static_cast(op2); + return reg1 == reg2; + } + case Operand::kOpdImmediate: { + const ImmOperand &imm1 = static_cast(op1); + const ImmOperand &imm2 = static_cast(op2); + return imm1 == imm2; + } + case Operand::kOpdOffset: { + const OfstOperand &ofst1 = static_cast(op1); + const OfstOperand &ofst2 = static_cast(op2); + return ofst1 == ofst2; + } + case Operand::kOpdStImmediate: { + const StImmOperand &stImm1 = static_cast(op1); + const StImmOperand &stImm2 = static_cast(op2); + return stImm1 == stImm2; + } + case Operand::kOpdMem: { + const MemOperand &mem1 = static_cast(op1); + const MemOperand &mem2 = static_cast(op2); + if (mem1.GetAddrMode() == mem2.GetAddrMode()) { + DEBUG_ASSERT(mem1.GetBaseRegister() != nullptr, "nullptr check"); + DEBUG_ASSERT(mem2.GetBaseRegister() != nullptr, "nullptr check"); + } + return ((mem1.GetAddrMode() == mem2.GetAddrMode()) && + OperandEqual(*(mem1.GetBaseRegister()), *(mem2.GetBaseRegister())) && + OperandEqual(*(mem1.GetIndexRegister()), *(mem2.GetIndexRegister())) && + OperandEqual(*(mem1.GetOffsetOperand()), *(mem2.GetOffsetOperand())) && + (mem1.GetSymbol() == mem2.GetSymbol()) && (mem1.GetSize() == mem2.GetSize())); + } + default: { + return false; + } + } +} + +int32 AArch64Ebo::GetOffsetVal(const MemOperand &memOpnd) const +{ + OfstOperand *offset = memOpnd.GetOffsetImmediate(); + int32 val = 0; + if (offset != nullptr) { + val += static_cast(offset->GetOffsetValue()); + + if (offset->IsSymOffset() || offset->IsSymAndImmOffset()) { + val += offset->GetSymbol()->GetStIdx().Idx(); + } + } + return val; +} + +/* + * move vreg1, #1 + * move vreg2, vreg1 + * ===> + * move vreg1, #1 + * move vreg2, #1 + * return true if do simplify successfully. + */ +bool AArch64Ebo::DoConstProp(Insn &insn, uint32 idx, Operand &opnd) +{ + ImmOperand *src = static_cast(&opnd); + const InsnDesc *md = &AArch64CG::kMd[(insn.GetMachineOpcode())]; + /* avoid the invalid case "cmp wzr, #0"/"add w1, wzr, #100" */ + Operand &destOpnd = insn.GetOperand(idx); + if (src->IsZero() && destOpnd.IsRegister() && + (static_cast(destOpnd).GetRegisterType() == kRegTyInt) && + (insn.IsStore() || insn.IsMove() || md->IsCondDef())) { + insn.SetOperand(idx, *GetZeroOpnd(src->GetSize())); + return true; + } + MOperator mopCode = insn.GetMachineOpcode(); + switch (mopCode) { + case MOP_xmovrr: + case MOP_wmovrr: { + DEBUG_ASSERT(idx == kInsnSecondOpnd, "src const for move must be the second operand."); + uint32 targetSize = insn.GetOperand(idx).GetSize(); + if (src->GetSize() != targetSize) { + src = static_cast(src->Clone(*cgFunc->GetMemoryPool())); + CHECK_FATAL(src != nullptr, "pointer result is null"); + src->SetSize(targetSize); + } + if (src->IsSingleInstructionMovable() && (insn.GetOperand(kInsnFirstOpnd).GetSize() == targetSize)) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << " Do constprop:Prop constval " << src->GetValue() << "into insn:\n"; + insn.Dump(); + } + insn.SetOperand(kInsnSecondOpnd, *src); + MOperator mOp = (mopCode == MOP_wmovrr) ? MOP_wmovri32 : MOP_xmovri64; + insn.SetMOP(AArch64CG::kMd[mOp]); + if (EBO_DUMP) { + LogInfo::MapleLogger() << " after constprop the insn is:\n"; + insn.Dump(); + } + return true; + } + break; + } + case MOP_xaddrrr: + case MOP_waddrrr: + case MOP_xsubrrr: + case MOP_wsubrrr: { + if ((idx != kInsnThirdOpnd) || !src->IsInBitSize(kMaxImmVal24Bits, 0) || + !(src->IsInBitSize(kMaxImmVal12Bits, 0) || src->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + return false; + } + Operand &result = insn.GetOperand(0); + bool is64Bits = (result.GetSize() == k64BitSize); + if (EBO_DUMP) { + LogInfo::MapleLogger() << " Do constprop:Prop constval " << src->GetValue() << "into insn:\n"; + insn.Dump(); + } + if (src->IsZero()) { + MOperator mOp = is64Bits ? MOP_xmovrr : MOP_wmovrr; + insn.SetMOP(AArch64CG::kMd[mOp]); + insn.PopBackOperand(); + if (EBO_DUMP) { + LogInfo::MapleLogger() << " after constprop the insn is:\n"; + insn.Dump(); + } + return true; + } + insn.SetOperand(kInsnThirdOpnd, *src); + if ((mopCode == MOP_xaddrrr) || (mopCode == MOP_waddrrr)) { + is64Bits ? insn.SetMOP(AArch64CG::kMd[MOP_xaddrri12]) : insn.SetMOP(AArch64CG::kMd[MOP_waddrri12]); + } else if ((mopCode == MOP_xsubrrr) || (mopCode == MOP_wsubrrr)) { + is64Bits ? insn.SetMOP(AArch64CG::kMd[MOP_xsubrri12]) : insn.SetMOP(AArch64CG::kMd[MOP_wsubrri12]); + } + if (EBO_DUMP) { + LogInfo::MapleLogger() << " after constprop the insn is:\n"; + insn.Dump(); + } + return true; + } + default: + break; + } + return false; +} + +/* optimize csel to cset */ +bool AArch64Ebo::Csel2Cset(Insn &insn, const MapleVector &opnds) +{ + MOperator opCode = insn.GetMachineOpcode(); + /* csel ->cset */ + if ((opCode == MOP_wcselrrrc) || (opCode == MOP_xcselrrrc)) { + Operand *res = &insn.GetOperand(kInsnFirstOpnd); + DEBUG_ASSERT(res != nullptr, "expect a register"); + DEBUG_ASSERT(res->IsRegister(), "expect a register"); + /* only do integers */ + RegOperand *reg = static_cast(res); + if ((res == nullptr) || (!reg->IsOfIntClass())) { + return false; + } + Operand *op0 = opnds.at(kInsnSecondOpnd); + Operand *op1 = opnds.at(kInsnThirdOpnd); + ImmOperand *imm0 = nullptr; + ImmOperand *imm1 = nullptr; + if (op0->IsImmediate()) { + imm0 = static_cast(op0); + } + if (op1->IsImmediate()) { + imm1 = static_cast(op1); + } + + bool reverse = + (imm1 != nullptr) && imm1->IsOne() && (((imm0 != nullptr) && imm0->IsZero()) || IsZeroRegister(*op0)); + if (((imm0 != nullptr) && imm0->IsOne() && (((imm1 != nullptr) && imm1->IsZero()) || IsZeroRegister(*op1))) || + reverse) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << "change csel insn :\n"; + insn.Dump(); + } + AArch64CGFunc *aarFunc = static_cast(cgFunc); + Operand &condOperand = insn.GetOperand(kInsnFourthOpnd); + Operand &rflag = aarFunc->GetOrCreateRflag(); + if (!reverse) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + (opCode == MOP_xcselrrrc) ? MOP_xcsetrc : MOP_wcsetrc, *res, condOperand, rflag); + insn.GetBB()->ReplaceInsn(insn, newInsn); + if (EBO_DUMP) { + LogInfo::MapleLogger() << "to cset insn ====>\n"; + newInsn.Dump(); + } + } else { + auto &cond = static_cast(condOperand); + if (!CheckCondCode(cond)) { + return false; + } + CondOperand &reverseCond = a64CGFunc->GetCondOperand(GetReverseCond(cond)); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + (opCode == MOP_xcselrrrc) ? MOP_xcsetrc : MOP_wcsetrc, *res, reverseCond, rflag); + insn.GetBB()->ReplaceInsn(insn, newInsn); + if (EBO_DUMP) { + LogInfo::MapleLogger() << "to cset insn ====>\n"; + newInsn.Dump(); + } + } + return true; + } + } + return false; +} + +/* Look at an expression that has a constant operand and attempt to simplify the computations. */ +bool AArch64Ebo::SimplifyConstOperand(Insn &insn, const MapleVector &opnds, + const MapleVector &opndInfo) +{ + BB *bb = insn.GetBB(); + bool result = false; + if (insn.GetOperandSize() <= 1) { + return false; + } + DEBUG_ASSERT(opnds.size() > 1, "opnds size must greater than 1"); + Operand *op0 = opnds[kInsnSecondOpnd]; + Operand *op1 = opnds[kInsnThirdOpnd]; + Operand *res = &insn.GetOperand(kInsnFirstOpnd); + CHECK_FATAL(res != nullptr, "null ptr check"); + uint32 opndSize = insn.GetDesc()->GetOperandSize(); + bool op0IsConstant = IsConstantImmOrReg(*op0) && !IsConstantImmOrReg(*op1); + bool op1IsConstant = !IsConstantImmOrReg(*op0) && IsConstantImmOrReg(*op1); + bool bothConstant = IsConstantImmOrReg(*op0) && IsConstantImmOrReg(*op1); + ImmOperand *immOpnd = nullptr; + Operand *op = nullptr; + int32 idx0 = kInsnSecondOpnd; + if (op0IsConstant) { + // cannot convert zero reg (r30) to a immOperand + immOpnd = IsZeroRegister(*op0) ? &a64CGFunc->CreateImmOperand(0, op0->GetSize(), false) + : static_cast(op0); + op = op1; + if (op->IsMemoryAccessOperand()) { + op = &(insn.GetOperand(kInsnThirdOpnd)); + } + idx0 = kInsnThirdOpnd; + } else if (op1IsConstant) { + // cannot convert zero reg (r30) to a immOperand + immOpnd = IsZeroRegister(*op1) ? &a64CGFunc->CreateImmOperand(0, op1->GetSize(), false) + : static_cast(op1); + op = op0; + if (op->IsMemoryAccessOperand()) { + op = &(insn.GetOperand(kInsnSecondOpnd)); + } + } else if (bothConstant) { + ImmOperand *immOpnd0 = IsZeroRegister(*op0) ? &a64CGFunc->CreateImmOperand(0, op0->GetSize(), false) + : static_cast(op0); + ImmOperand *immOpnd1 = IsZeroRegister(*op1) ? &a64CGFunc->CreateImmOperand(0, op1->GetSize(), false) + : static_cast(op1); + return SimplifyBothConst(*insn.GetBB(), insn, *immOpnd0, *immOpnd1, opndSize); + } + CHECK_FATAL(immOpnd != nullptr, "constant operand required!"); + CHECK_FATAL(op != nullptr, "constant operand required!"); + /* For orr insn and one of the opnd is zero + * orr resOp, imm1, #0 | orr resOp, #0, imm1 + * =======> + * mov resOp, imm1 */ + if (((insn.GetMachineOpcode() == MOP_wiorrri12) || (insn.GetMachineOpcode() == MOP_xiorrri13)) && + immOpnd->IsZero()) { + MOperator mOp = opndSize == k64BitSize ? MOP_xmovrr : MOP_wmovrr; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *res, *op); + bb->ReplaceInsn(insn, newInsn); + return true; + } + /* For the imm is 0. Then replace the insn by a move insn. */ + if (((insn.GetMachineOpcode() >= MOP_xaddrrr) && (insn.GetMachineOpcode() <= MOP_sadd) && immOpnd->IsZero()) || + (op1IsConstant && (insn.GetMachineOpcode() >= MOP_xsubrrr) && (insn.GetMachineOpcode() <= MOP_ssub) && + immOpnd->IsZero())) { + Insn &newInsn = + cgFunc->GetInsnBuilder()->BuildInsn(opndSize == k64BitSize ? MOP_xmovrr : MOP_wmovrr, *res, *op); + bb->ReplaceInsn(insn, newInsn); + return true; + } + + if ((insn.GetMachineOpcode() == MOP_xaddrrr) || (insn.GetMachineOpcode() == MOP_waddrrr)) { + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { + /* + * ADD Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers + * ADD Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers + * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 + * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 + */ + if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) { + MOperator mOp = opndSize == k64BitSize ? MOP_xaddrri12 : MOP_waddrri12; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *res, *op, *immOpnd); + bb->ReplaceInsn(insn, newInsn); + result = true; + } + } + } + /* Look for the sequence which can be simpified. */ + if (result || (insn.GetMachineOpcode() == MOP_xaddrri12) || (insn.GetMachineOpcode() == MOP_waddrri12)) { + Insn *prev = opndInfo[idx0]->insn; + if ((prev != nullptr) && + ((prev->GetMachineOpcode() == MOP_xaddrri12) || (prev->GetMachineOpcode() == MOP_waddrri12))) { + OpndInfo *prevInfo0 = opndInfo[idx0]->insnInfo->origOpnd[kInsnSecondOpnd]; + /* if prevop0 has been redefined. skip this optimiztation. */ + if (prevInfo0->redefined) { + return result; + } + /* Implicit conversion */ + if (insn.GetOperand(kInsnFirstOpnd).GetSize() != insn.GetOperand(kInsnSecondOpnd).GetSize()) { + return result; + } + Operand &prevOpnd0 = prev->GetOperand(kInsnSecondOpnd); + ImmOperand &imm0 = static_cast(prev->GetOperand(kInsnThirdOpnd)); + int64_t val = imm0.GetValue() + immOpnd->GetValue(); + ImmOperand &imm1 = a64CGFunc->CreateImmOperand(val, opndSize, imm0.IsSignedValue()); + if (imm1.IsInBitSize(kMaxImmVal24Bits, 0) && + (imm1.IsInBitSize(kMaxImmVal12Bits, 0) || imm1.IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + MOperator mOp = (opndSize == k64BitSize ? MOP_xaddrri12 : MOP_waddrri12); + bb->ReplaceInsn(insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, *res, prevOpnd0, imm1)); + result = true; + } + } + } + return result; +} + +ConditionCode AArch64Ebo::GetReverseCond(const CondOperand &cond) const +{ + switch (cond.GetCode()) { + case CC_NE: + return CC_EQ; + case CC_EQ: + return CC_NE; + case CC_LT: + return CC_GE; + case CC_GE: + return CC_LT; + case CC_GT: + return CC_LE; + case CC_LE: + return CC_GT; + default: + CHECK_FATAL(0, "Not support yet."); + } + return kCcLast; +} + +/* return true if cond == CC_LE */ +bool AArch64Ebo::CheckCondCode(const CondOperand &cond) const +{ + switch (cond.GetCode()) { + case CC_NE: + case CC_EQ: + case CC_LT: + case CC_GE: + case CC_GT: + case CC_LE: + return true; + default: + return false; + } +} + +bool AArch64Ebo::SimplifyBothConst(BB &bb, Insn &insn, const ImmOperand &immOperand0, const ImmOperand &immOperand1, + uint32 opndSize) const +{ + MOperator mOp = insn.GetMachineOpcode(); + int64 val = 0; + /* do not support negative const simplify yet */ + if (immOperand0.GetValue() < 0 || immOperand1.GetValue() < 0) { + return false; + } + uint64 opndValue0 = static_cast(immOperand0.GetValue()); + uint64 opndValue1 = static_cast(immOperand1.GetValue()); + switch (mOp) { + case MOP_weorrri12: + case MOP_weorrrr: + case MOP_xeorrri13: + case MOP_xeorrrr: + val = static_cast(opndValue0 ^ opndValue1); + break; + case MOP_wandrri12: + case MOP_waddrri24: + case MOP_wandrrr: + case MOP_xandrri13: + case MOP_xandrrr: + val = static_cast(opndValue0 & opndValue1); + break; + case MOP_wiorrri12: + case MOP_wiorrrr: + case MOP_xiorrri13: + case MOP_xiorrrr: + val = static_cast(opndValue0 | opndValue1); + break; + default: + return false; + } + Operand *res = &insn.GetOperand(kInsnFirstOpnd); + ImmOperand *immOperand = &a64CGFunc->CreateImmOperand(val, opndSize, false); + if (!immOperand->IsSingleInstructionMovable()) { + DEBUG_ASSERT(res->IsRegister(), " expect a register operand"); + static_cast(cgFunc)->SplitMovImmOpndInstruction(val, *(static_cast(res)), &insn); + bb.RemoveInsn(insn); + } else { + MOperator newmOp = opndSize == k64BitSize ? MOP_xmovri64 : MOP_wmovri32; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newmOp, *res, *immOperand); + bb.ReplaceInsn(insn, newInsn); + } + return true; +} + +bool AArch64Ebo::OperandLiveAfterInsn(const RegOperand ®Opnd, Insn &insn) const +{ + for (Insn *nextInsn = insn.GetNext(); nextInsn != nullptr; nextInsn = nextInsn->GetNext()) { + if (!nextInsn->IsMachineInstruction()) { + continue; + } + int32 lastOpndId = static_cast(nextInsn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = nextInsn->GetOperand(static_cast(i)); + if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr && base->IsRegister()) { + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + } + + if (!opnd.IsRegister()) { + continue; + } + auto &tmpRegOpnd = static_cast(opnd); + if (tmpRegOpnd.GetRegisterNumber() != regOpnd.GetRegisterNumber()) { + continue; + } + auto *regProp = nextInsn->GetDesc()->opndMD[static_cast(i)]; + bool isUse = regProp->IsUse(); + /* if noUse Redefined, no need to check live-out. */ + return isUse; + } + } + return LiveOutOfBB(regOpnd, *insn.GetBB()); +} + +bool AArch64Ebo::ValidPatternForCombineExtAndLoad(OpndInfo *prevOpndInfo, Insn *insn, MOperator newMop, + MOperator oldMop, const RegOperand &opnd) +{ + if (newMop == oldMop) { + return true; + } + if (prevOpndInfo == nullptr || prevOpndInfo->refCount > 1) { + return false; + } + if (OperandLiveAfterInsn(opnd, *insn)) { + return false; + } + Insn *prevInsn = prevOpndInfo->insn; + MemOperand *memOpnd = static_cast(prevInsn->GetMemOpnd()); + DEBUG_ASSERT(!prevInsn->IsStorePair(), "do not do this opt for str pair"); + DEBUG_ASSERT(!prevInsn->IsLoadPair(), "do not do this opt for ldr pair"); + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi && + !a64CGFunc->IsOperandImmValid(newMop, prevInsn->GetMemOpnd(), kInsnSecondOpnd)) { + return false; + } + uint32 shiftAmount = memOpnd->ShiftAmount(); + if (shiftAmount == 0) { + return true; + } + const InsnDesc *md = &AArch64CG::kMd[newMop]; + uint32 memSize = md->GetOperandSize() / k8BitSize; + uint32 validShiftAmount = memSize == 8 ? 3 : memSize == 4 ? 2 : memSize == 2 ? 1 : 0; + if (shiftAmount != validShiftAmount) { + return false; + } + return true; +} + +bool AArch64Ebo::CombineExtensionAndLoad(Insn *insn, const MapleVector &origInfos, ExtOpTable idx, + bool is64bits) +{ + if (!beforeRegAlloc) { + return false; + } + OpndInfo *opndInfo = origInfos[kInsnSecondOpnd]; + if (opndInfo == nullptr) { + return false; + } + Insn *prevInsn = opndInfo->insn; + if (prevInsn == nullptr) { + return false; + } + + MOperator prevMop = prevInsn->GetMachineOpcode(); + DEBUG_ASSERT(prevMop != MOP_undef, "Invalid opcode of instruction!"); + PairMOperator *begin = &extInsnPairTable[idx][0]; + PairMOperator *end = &extInsnPairTable[idx][insPairsNum]; + auto pairIt = std::find_if(begin, end, [prevMop](const PairMOperator insPair) { return prevMop == insPair[0]; }); + if (pairIt == end) { + return false; + } + + auto &res = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + OpndInfo *prevOpndInfo = GetOpndInfo(res, -1); + MOperator newPreMop = (*pairIt)[1]; + DEBUG_ASSERT(newPreMop != MOP_undef, "Invalid opcode of instruction!"); + if (!ValidPatternForCombineExtAndLoad(prevOpndInfo, insn, newPreMop, prevMop, res)) { + return false; + } + auto *newMemOp = GetOrCreateMemOperandForNewMOP(*cgFunc, *prevInsn, newPreMop); + if (newMemOp == nullptr) { + return false; + } + prevInsn->SetMemOpnd(newMemOp); + if (is64bits && idx <= SXTW && idx >= SXTB) { + newPreMop = ExtLoadSwitchBitSize(newPreMop); + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + prevDstOpnd.SetSize(k64BitSize); + prevDstOpnd.SetValidBitsNum(k64BitSize); + } + prevInsn->SetMOP(AArch64CG::kMd[newPreMop]); + MOperator movOp = is64bits ? MOP_xmovrr : MOP_wmovrr; + if (insn->GetMachineOpcode() == MOP_wandrri12 || insn->GetMachineOpcode() == MOP_xandrri13) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(movOp, insn->GetOperand(kInsnFirstOpnd), + insn->GetOperand(kInsnSecondOpnd)); + insn->GetBB()->ReplaceInsn(*insn, newInsn); + } else { + insn->SetMOP(AArch64CG::kMd[movOp]); + } + return true; +} + +bool AArch64Ebo::CombineMultiplyAdd(Insn *insn, const Insn *prevInsn, InsnInfo *insnInfo, Operand *addOpnd, + bool is64bits, bool isFp) const +{ + /* don't use register if it was redefined. */ + OpndInfo *opndInfo1 = insnInfo->origOpnd[kInsnSecondOpnd]; + OpndInfo *opndInfo2 = insnInfo->origOpnd[kInsnThirdOpnd]; + if (((opndInfo1 != nullptr) && opndInfo1->redefined) || ((opndInfo2 != nullptr) && opndInfo2->redefined)) { + return false; + } + Operand &res = insn->GetOperand(kInsnFirstOpnd); + Operand &opnd1 = prevInsn->GetOperand(kInsnSecondOpnd); + Operand &opnd2 = prevInsn->GetOperand(kInsnThirdOpnd); + /* may overflow */ + if ((prevInsn->GetOperand(kInsnFirstOpnd).GetSize() == k32BitSize) && is64bits) { + return false; + } + MOperator mOp = isFp ? (is64bits ? MOP_dmadd : MOP_smadd) : (is64bits ? MOP_xmaddrrrr : MOP_wmaddrrrr); + insn->GetBB()->ReplaceInsn(*insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, opnd1, opnd2, *addOpnd)); + return true; +} + +bool AArch64Ebo::CheckCanDoMadd(Insn *insn, OpndInfo *opndInfo, int32 pos, bool is64bits, bool isFp) +{ + if ((opndInfo == nullptr) || (opndInfo->insn == nullptr)) { + return false; + } + if (!cgFunc->GetMirModule().IsCModule()) { + return false; + } + Insn *insn1 = opndInfo->insn; + InsnInfo *insnInfo = opndInfo->insnInfo; + if (insnInfo == nullptr) { + return false; + } + Operand &addOpnd = insn->GetOperand(static_cast(pos)); + MOperator opc1 = insn1->GetMachineOpcode(); + if ((isFp && ((opc1 == MOP_xvmuld) || (opc1 == MOP_xvmuls))) || + (!isFp && ((opc1 == MOP_xmulrrr) || (opc1 == MOP_wmulrrr)))) { + return CombineMultiplyAdd(insn, insn1, insnInfo, &addOpnd, is64bits, isFp); + } + return false; +} + +bool AArch64Ebo::CombineMultiplySub(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp) const +{ + if ((opndInfo == nullptr) || (opndInfo->insn == nullptr)) { + return false; + } + if (!cgFunc->GetMirModule().IsCModule()) { + return false; + } + Insn *insn1 = opndInfo->insn; + InsnInfo *insnInfo = opndInfo->insnInfo; + if (insnInfo == nullptr) { + return false; + } + Operand &subOpnd = insn->GetOperand(kInsnSecondOpnd); + MOperator opc1 = insn1->GetMachineOpcode(); + if ((isFp && ((opc1 == MOP_xvmuld) || (opc1 == MOP_xvmuls))) || + (!isFp && ((opc1 == MOP_xmulrrr) || (opc1 == MOP_wmulrrr)))) { + /* don't use register if it was redefined. */ + OpndInfo *opndInfo1 = insnInfo->origOpnd[kInsnSecondOpnd]; + OpndInfo *opndInfo2 = insnInfo->origOpnd[kInsnThirdOpnd]; + if (((opndInfo1 != nullptr) && opndInfo1->redefined) || ((opndInfo2 != nullptr) && opndInfo2->redefined)) { + return false; + } + Operand &res = insn->GetOperand(kInsnFirstOpnd); + Operand &opnd1 = insn1->GetOperand(kInsnSecondOpnd); + Operand &opnd2 = insn1->GetOperand(kInsnThirdOpnd); + /* may overflow */ + if ((insn1->GetOperand(kInsnFirstOpnd).GetSize() == k32BitSize) && is64bits) { + return false; + } + MOperator mOp = isFp ? (is64bits ? MOP_dmsub : MOP_smsub) : (is64bits ? MOP_xmsubrrrr : MOP_wmsubrrrr); + insn->GetBB()->ReplaceInsn(*insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, opnd1, opnd2, subOpnd)); + return true; + } + return false; +} + +bool CheckInsnRefField(const Insn &insn, size_t opndIndex) +{ + if (insn.IsAccessRefField() && insn.AccessMem()) { + Operand &opnd0 = insn.GetOperand(opndIndex); + if (opnd0.IsRegister()) { + return true; + } + } + return false; +} + +bool AArch64Ebo::CombineMultiplyNeg(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp) const +{ + if ((opndInfo == nullptr) || (opndInfo->insn == nullptr)) { + return false; + } + if (!cgFunc->GetMirModule().IsCModule()) { + return false; + } + Operand &res = insn->GetOperand(kInsnFirstOpnd); + Operand &src = insn->GetOperand(kInsnSecondOpnd); + if (res.GetSize() != src.GetSize()) { + return false; + } + Insn *insn1 = opndInfo->insn; + InsnInfo *insnInfo = opndInfo->insnInfo; + CHECK_NULL_FATAL(insnInfo); + MOperator opc1 = insn1->GetMachineOpcode(); + if ((isFp && ((opc1 == MOP_xvmuld) || (opc1 == MOP_xvmuls))) || + (!isFp && ((opc1 == MOP_xmulrrr) || (opc1 == MOP_wmulrrr)))) { + /* don't use register if it was redefined. */ + OpndInfo *opndInfo1 = insnInfo->origOpnd[kInsnSecondOpnd]; + OpndInfo *opndInfo2 = insnInfo->origOpnd[kInsnThirdOpnd]; + if (((opndInfo1 != nullptr) && opndInfo1->redefined) || ((opndInfo2 != nullptr) && opndInfo2->redefined)) { + return false; + } + Operand &opnd1 = insn1->GetOperand(kInsnSecondOpnd); + Operand &opnd2 = insn1->GetOperand(kInsnThirdOpnd); + MOperator mOp = isFp ? (is64bits ? MOP_dnmul : MOP_snmul) : (is64bits ? MOP_xmnegrrr : MOP_wmnegrrr); + insn->GetBB()->ReplaceInsn(*insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, opnd1, opnd2)); + return true; + } + return false; +} + +bool AArch64Ebo::CombineLsrAnd(Insn &insn, const OpndInfo &opndInfo, bool is64bits, bool isFp) const +{ + if (opndInfo.insn == nullptr) { + return false; + } + if (!cgFunc->GetMirModule().IsCModule()) { + return false; + } + AArch64CGFunc *aarchFunc = static_cast(cgFunc); + Insn *prevInsn = opndInfo.insn; + InsnInfo *insnInfo = opndInfo.insnInfo; + if (insnInfo == nullptr) { + return false; + } + CHECK_NULL_FATAL(insnInfo); + MOperator opc1 = prevInsn->GetMachineOpcode(); + if (!isFp && ((opc1 == MOP_xlsrrri6) || (opc1 == MOP_wlsrrri5))) { + /* don't use register if it was redefined. */ + OpndInfo *opndInfo1 = insnInfo->origOpnd[kInsnSecondOpnd]; + if ((opndInfo1 != nullptr) && opndInfo1->redefined) { + return false; + } + Operand &res = insn.GetOperand(kInsnFirstOpnd); + Operand &opnd1 = prevInsn->GetOperand(kInsnSecondOpnd); + int64 immVal1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + Operand &immOpnd1 = is64bits ? aarchFunc->CreateImmOperand(immVal1, kMaxImmVal6Bits, false) + : aarchFunc->CreateImmOperand(immVal1, kMaxImmVal5Bits, false); + int64 immVal2 = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + int64 immV2 = __builtin_ffsll(immVal2 + 1) - 1; + if (immVal1 + immV2 < k1BitSize || (is64bits && immVal1 + immV2 > k64BitSize) || + (!is64bits && immVal1 + immV2 > k32BitSize)) { + return false; + } + Operand &immOpnd2 = is64bits ? aarchFunc->CreateImmOperand(immV2, kMaxImmVal6Bits, false) + : aarchFunc->CreateImmOperand(immV2, kMaxImmVal5Bits, false); + MOperator mOp = (is64bits ? MOP_xubfxrri6i6 : MOP_wubfxrri5i5); + insn.GetBB()->ReplaceInsn(insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, opnd1, immOpnd1, immOpnd2)); + return true; + } + return false; +} + +/* Do some special pattern */ +bool AArch64Ebo::SpecialSequence(Insn &insn, const MapleVector &origInfos) +{ + MOperator opCode = insn.GetMachineOpcode(); + AArch64CGFunc *aarchFunc = static_cast(cgFunc); + switch (opCode) { + /* + * mov R503, R0 + * mov R0, R503 + * ==> mov R0, R0 + */ + case MOP_wmovrr: + case MOP_xmovrr: { + OpndInfo *opndInfo = origInfos[kInsnSecondOpnd]; + if (opndInfo == nullptr) { + return false; + } + Insn *prevInsn = opndInfo->insn; + if ((prevInsn != nullptr) && (prevInsn->GetMachineOpcode() == opCode) && + (prevInsn == insn.GetPreviousMachineInsn()) && + !RegistersIdentical(prevInsn->GetOperand(kInsnFirstOpnd), prevInsn->GetOperand(kInsnSecondOpnd)) && + !RegistersIdentical(insn.GetOperand(kInsnFirstOpnd), insn.GetOperand(kInsnSecondOpnd))) { + Operand ®1 = insn.GetOperand(kInsnFirstOpnd); + Operand ®2 = prevInsn->GetOperand(kInsnSecondOpnd); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(insn.GetMachineOpcode(), reg1, reg2); + insn.GetBB()->ReplaceInsn(insn, newInsn); + return true; + } + break; + } + /* + * Extension elimination. Look for load extension pair. There are two cases. + * 1) extension size == load size -> change the load type or eliminate the extension + * 2) extension size > load size -> possibly eliminating the extension + * + * Example of 1) + * ldrb x1, [] or ldrb x1, [] or ldrsb x1, [] or ldrsb x1, [] + * sxtb x1, x1 zxtb x1, x1 sxtb x1, x1 zxtb x1, x1 + * ===> ldrsb x1, [] ===> ldrb x1, [] ===> ldrsb x1, [] ===> ldrb x1, [] + * mov x1, x1 mov x1, x1 mov x1, x1 mov x1, x1 + * + * Example of 2) + * ldrb x1, [] or ldrb x1, [] or ldrsb x1, [] or ldrsb x1, [] + * sxth x1, x1 zxth x1, x1 sxth x1, x1 zxth x1, x1 + * ===> ldrb x1, [] ===> ldrb x1, [] ===> ldrsb x1, [] ===> no change + * mov x1, x1 mov x1, x1 mov x1, x1 + */ + case MOP_wandrri12: { + bool doAndOpt = false; + if (static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue() == 0xff) { + doAndOpt = CombineExtensionAndLoad(&insn, origInfos, AND, false); + } + if (doAndOpt) { + return doAndOpt; + } + /* + * lsr d0, d1, #6 + * and d0, d0, #1 + * ===> ubfx d0, d1, #6, #1 + */ + int64 immValue = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + if (!beforeRegAlloc && immValue != 0 && + (static_cast(immValue) & (static_cast(immValue) + 1)) == 0) { + /* immValue is (1 << n - 1) */ + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + return CombineLsrAnd(insn, *opndInfo, false, false); + } + break; + } + case MOP_xandrri13: { + bool doAndOpt = false; + if (static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue() == 0xff) { + doAndOpt = CombineExtensionAndLoad(&insn, origInfos, AND, true); + } + if (doAndOpt) { + return doAndOpt; + } + /* + * lsr d0, d1, #6 + * and d0, d0, #1 + * ===> ubfx d0, d1, #6, #1 + */ + int64 immValue = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + if (!beforeRegAlloc && immValue != 0 && + (static_cast(immValue) & (static_cast(immValue) + 1)) == 0) { + /* immValue is (1 << n - 1) */ + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + return CombineLsrAnd(insn, *opndInfo, true, false); + } + break; + } + case MOP_xsxtb32: + return CombineExtensionAndLoad(&insn, origInfos, SXTB, false); + case MOP_xsxtb64: + return CombineExtensionAndLoad(&insn, origInfos, SXTB, true); + case MOP_xsxth32: + return CombineExtensionAndLoad(&insn, origInfos, SXTH, false); + case MOP_xsxth64: + return CombineExtensionAndLoad(&insn, origInfos, SXTH, true); + case MOP_xsxtw64: + return CombineExtensionAndLoad(&insn, origInfos, SXTW, true); + case MOP_xuxtb32: + return CombineExtensionAndLoad(&insn, origInfos, ZXTB, false); + case MOP_xuxth32: + return CombineExtensionAndLoad(&insn, origInfos, ZXTH, false); + case MOP_xuxtw64: + return CombineExtensionAndLoad(&insn, origInfos, ZXTW, true); + /* + * lsl x1, x1, #3 + * add x0, x0, x1 + * ===> add x0, x0, x1, 3 + * + * mul x1, x1, x2 + * add x0, x0, x1 or add x0, x1, x0 + * ===> madd x0, x1, x2, x0 + */ + case MOP_xaddrrr: + case MOP_waddrrr: { + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnThirdOpnd); + if ((opndInfo != nullptr) && (opndInfo->insn != nullptr)) { + Insn *insn1 = opndInfo->insn; + InsnInfo *insnInfo1 = opndInfo->insnInfo; + if (insnInfo1 == nullptr) { + return false; + } + Operand &op0 = insn.GetOperand(kInsnSecondOpnd); + MOperator opc1 = insn1->GetMachineOpcode(); + if ((opc1 == MOP_xlslrri6) || (opc1 == MOP_wlslrri5)) { + /* don't use register if it was redefined. */ + if (cgFunc->GetMirModule().IsCModule()) { + /* global opt will do this pattern when is CMoudle */ + return false; + } + OpndInfo *opndInfo1 = insnInfo1->origOpnd[kInsnSecondOpnd]; + if ((opndInfo1 != nullptr) && opndInfo1->redefined) { + return false; + } + Operand &res = insn.GetOperand(kInsnFirstOpnd); + Operand &opnd1 = insn1->GetOperand(kInsnSecondOpnd); + auto &immOpnd = static_cast(insn1->GetOperand(kInsnThirdOpnd)); + uint32 xLslrriBitLen = 6; + uint32 wLslrriBitLen = 5; + Operand &shiftOpnd = aarchFunc->CreateBitShiftOperand( + BitShiftOperand::kLSL, static_cast(immOpnd.GetValue()), + static_cast((opCode == MOP_xlslrri6) ? xLslrriBitLen : wLslrriBitLen)); + MOperator mOp = (is64bits ? MOP_xaddrrrs : MOP_waddrrrs); + insn.GetBB()->ReplaceInsn(insn, + cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, op0, opnd1, shiftOpnd)); + return true; + } else if ((opc1 == MOP_xmulrrr) || (opc1 == MOP_wmulrrr)) { + return CombineMultiplyAdd(&insn, insn1, insnInfo1, &op0, is64bits, false); + } + } + opndInfo = origInfos.at(kInsnSecondOpnd); + return CheckCanDoMadd(&insn, opndInfo, kInsnThirdOpnd, is64bits, false); + } + /* + * fmul d1, d1, d2 + * fadd d0, d0, d1 or add d0, d1, d0 + * ===> fmadd d0, d1, d2, d0 + */ + case MOP_dadd: + case MOP_sadd: { + if (!CGOptions::IsFastMath()) { + return false; + } + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + if (CheckCanDoMadd(&insn, opndInfo, kInsnThirdOpnd, is64bits, true)) { + return true; + } + opndInfo = origInfos.at(kInsnThirdOpnd); + if (CheckCanDoMadd(&insn, opndInfo, kInsnSecondOpnd, is64bits, true)) { + return true; + } + break; + } + /* + * mul x1, x1, x2 + * sub x0, x0, x1 + * ===> msub x0, x1, x2, x0 + */ + case MOP_xsubrrr: + case MOP_wsubrrr: { + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnThirdOpnd); + if (CombineMultiplySub(&insn, opndInfo, is64bits, false)) { + return true; + } + break; + } + /* + * fmul d1, d1, d2 + * fsub d0, d0, d1 + * ===> fmsub d0, d1, d2, d0 + */ + case MOP_dsub: + case MOP_ssub: { + if (!CGOptions::IsFastMath()) { + return false; + } + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnThirdOpnd); + if (CombineMultiplySub(&insn, opndInfo, is64bits, true)) { + return true; + } + break; + } + /* + * mul x1, x1, x2 + * neg x0, x1 + * ===> mneg x0, x1, x2 + */ + case MOP_xinegrr: + case MOP_winegrr: { + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + if (CombineMultiplyNeg(&insn, opndInfo, is64bits, false)) { + return true; + } + break; + } + /* + * fmul d1, d1, d2 + * fneg d0, d1 + * ===> fnmul d0, d1, d2 + */ + case MOP_wfnegrr: + case MOP_xfnegrr: { + if (!CGOptions::IsFastMath()) { + return false; + } + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + if (CombineMultiplyNeg(&insn, opndInfo, is64bits, true)) { + return true; + } + break; + } + case MOP_xcsetrc: + case MOP_wcsetrc: { + /* i. cmp x0, x1 + * cset w0, EQ ===> cmp x0, x1 + * cmp w0, #0 cset w0, EQ + * cset w0, NE + * + * ii. cmp x0, x1 + * cset w0, EQ ===> cmp x0, x1 + * cmp w0, #0 cset w0, NE + * cset w0, EQ + * + * a.< -1 : 0x20ff25e0 > < 0 > cmp(226) (opnd0: vreg:C105 class: [CC]) (opnd1: vreg:R104 class: [I]) + * (opnd2: vreg:R106 class: [I]) b.< -1 : 0x20ff60a0 > < 0 > cset(72) (opnd0: vreg:R101 class: [I]) (opnd1: + * CC: EQ) c.< -1* : 0x20ff3870 > < 0 > cmp(223) (opnd0: vreg:C105 class: [CC]) (opnd1: vreg:R101 class: + * [I]) (opnd2: imm:0) d.< * -1 : 0x20ff3908 > < 0 > cset(72) (opnd0: vreg:R107 class: [I]) (opnd1: CC: NE) + * d1.< -1 : 0x20ff3908 > < 0 > * cset(72) (opnd0: vreg:R107 class: [I]) (opnd1: CC: EQ) i, d + * ===> mov R107 R101 ii, a,b,c,d1 ===> a,b,cset Rxx + * NE, c, mov R107 Rxx + */ + auto &cond = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if ((cond.GetCode() != CC_NE) && (cond.GetCode() != CC_EQ)) { + return false; + } + bool reverse = (cond.GetCode() == CC_EQ); + OpndInfo *condInfo = origInfos[kInsnSecondOpnd]; + if ((condInfo != nullptr) && condInfo->insn) { + Insn *cmp1 = condInfo->insn; + if ((cmp1->GetMachineOpcode() == MOP_xcmpri) || (cmp1->GetMachineOpcode() == MOP_wcmpri)) { + InsnInfo *cmpInfo1 = condInfo->insnInfo; + CHECK_FATAL(cmpInfo1 != nullptr, "pointor cmpInfo1 is null"); + OpndInfo *info0 = cmpInfo1->origOpnd[kInsnSecondOpnd]; + /* if R101 was not redefined. */ + if ((info0 != nullptr) && (info0->insnInfo != nullptr) && (info0->insn != nullptr) && + (reverse || !info0->redefined) && cmp1->GetOperand(kInsnThirdOpnd).IsImmediate()) { + Insn *csetInsn = info0->insn; + MOperator opc1 = csetInsn->GetMachineOpcode(); + if (((opc1 == MOP_xcsetrc) || (opc1 == MOP_wcsetrc)) && + static_cast(cmp1->GetOperand(kInsnThirdOpnd)).IsZero()) { + CondOperand &cond1 = static_cast(csetInsn->GetOperand(kInsnSecondOpnd)); + if (!CheckCondCode(cond1)) { + return false; + } + if (EBO_DUMP) { + LogInfo::MapleLogger() + << "< === do specical condition optimization, replace insn ===> \n"; + insn.Dump(); + } + Operand *result = &insn.GetOperand(kInsnFirstOpnd); + CHECK_FATAL(result != nullptr, "pointor result is null"); + uint32 size = result->GetSize(); + if (reverse) { + /* After regalloction, we can't create a new register. */ + if (!beforeRegAlloc) { + return false; + } + AArch64CGFunc *aarFunc = static_cast(cgFunc); + Operand &r = aarFunc->CreateRegisterOperandOfType( + static_cast(result)->GetRegisterType(), size / kBitsPerByte); + /* after generate a new vreg, check if the size of DataInfo is big enough */ + EnlargeSpaceForLA(*csetInsn); + CondOperand &cond2 = aarFunc->GetCondOperand(GetReverseCond(cond1)); + Operand &rflag = aarFunc->GetOrCreateRflag(); + Insn &newCset = cgFunc->GetInsnBuilder()->BuildInsn( + result->GetSize() == k64BitSize ? MOP_xcsetrc : MOP_wcsetrc, r, cond2, rflag); + /* new_cset use the same cond as cset_insn. */ + IncRef(*info0->insnInfo->origOpnd[kInsnSecondOpnd]); + csetInsn->GetBB()->InsertInsnAfter(*csetInsn, newCset); + MOperator mOp = (result->GetSize() == k64BitSize ? MOP_xmovrr : MOP_wmovrr); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *result, r); + insn.GetBB()->ReplaceInsn(insn, newInsn); + if (EBO_DUMP) { + LogInfo::MapleLogger() << "< === with new insn ===> \n"; + newInsn.Dump(); + } + } else { + Operand *result1 = &csetInsn->GetOperand(kInsnFirstOpnd); + MOperator mOp = ((result->GetSize() == k64BitSize) ? MOP_xmovrr : MOP_wmovrr); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *result, *result1); + insn.GetBB()->ReplaceInsn(insn, newInsn); + if (EBO_DUMP) { + LogInfo::MapleLogger() << "< === with new insn ===> \n"; + newInsn.Dump(); + } + } + return true; + } + } + } + } + } /* end case MOP_wcsetrc */ + [[clang::fallthrough]]; + default: + break; + } + return false; +} + +/* + * *iii. mov w16, v10.s[1] // FMOV from simd 105 ---> replace_insn + * mov w1, w16 ----->insn + * ==> + * mov w1, v10.s[1] + */ +bool AArch64Ebo::IsMovToSIMDVmov(Insn &insn, const Insn &replaceInsn) const +{ + if (insn.GetMachineOpcode() == MOP_wmovrr && replaceInsn.GetMachineOpcode() == MOP_xvmovrv) { + insn.SetMOP(AArch64CG::kMd[replaceInsn.GetMachineOpcode()]); + return true; + } + return false; +} + +bool AArch64Ebo::IsPseudoRet(Insn &insn) const +{ + MOperator mop = insn.GetMachineOpcode(); + if (mop == MOP_pseudo_ret_int || mop == MOP_pseudo_ret_float) { + return true; + } + return false; +} + +bool AArch64Ebo::ChangeLdrMop(Insn &insn, const Operand &opnd) const +{ + DEBUG_ASSERT(insn.IsLoad(), "expect insn is load in ChangeLdrMop"); + DEBUG_ASSERT(opnd.IsRegister(), "expect opnd is a register in ChangeLdrMop"); + + const RegOperand *regOpnd = static_cast(&opnd); + if (static_cast(insn.GetOperand(kInsnFirstOpnd)).GetRegisterType() != regOpnd->GetRegisterType()) { + return false; + } + + if (static_cast(insn.GetOperand(kInsnSecondOpnd)).GetIndexRegister()) { + return false; + } + + bool bRet = true; + if (regOpnd->GetRegisterType() == kRegTyFloat) { + switch (insn.GetMachineOpcode()) { + case MOP_wldrb: + insn.SetMOP(AArch64CG::kMd[MOP_bldr]); + break; + case MOP_wldrh: + insn.SetMOP(AArch64CG::kMd[MOP_hldr]); + break; + case MOP_wldr: + insn.SetMOP(AArch64CG::kMd[MOP_sldr]); + break; + case MOP_xldr: + insn.SetMOP(AArch64CG::kMd[MOP_dldr]); + break; + case MOP_wldli: + insn.SetMOP(AArch64CG::kMd[MOP_sldli]); + break; + case MOP_xldli: + insn.SetMOP(AArch64CG::kMd[MOP_dldli]); + break; + case MOP_wldrsb: + case MOP_wldrsh: + default: + bRet = false; + break; + } + } else if (regOpnd->GetRegisterType() == kRegTyInt) { + switch (insn.GetMachineOpcode()) { + case MOP_bldr: + insn.SetMOP(AArch64CG::kMd[MOP_wldrb]); + break; + case MOP_hldr: + insn.SetMOP(AArch64CG::kMd[MOP_wldrh]); + break; + case MOP_sldr: + insn.SetMOP(AArch64CG::kMd[MOP_wldr]); + break; + case MOP_dldr: + insn.SetMOP(AArch64CG::kMd[MOP_xldr]); + break; + case MOP_sldli: + insn.SetMOP(AArch64CG::kMd[MOP_wldli]); + break; + case MOP_dldli: + insn.SetMOP(AArch64CG::kMd[MOP_xldli]); + break; + default: + bRet = false; + break; + } + } else { + DEBUG_ASSERT(false, "Internal error."); + } + return bRet; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_emitter.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_emitter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2fa0f57fefaba82e94324815a1502c72bb2e3e23 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_emitter.cpp @@ -0,0 +1,2122 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_emitter.h" +#include +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" +#include "metadata_layout.h" +#include "cfi.h" +#include "dbg.h" +#include "aarch64_obj_emitter.h" + +namespace { +using namespace maple; +const std::unordered_set kJniNativeFuncList = { + "Landroid_2Fos_2FParcel_3B_7CnativeWriteString_7C_28JLjava_2Flang_2FString_3B_29V_native", + "Landroid_2Fos_2FParcel_3B_7CnativeReadString_7C_28J_29Ljava_2Flang_2FString_3B_native", + "Landroid_2Fos_2FParcel_3B_7CnativeWriteInt_7C_28JI_29V_native", + "Landroid_2Fos_2FParcel_3B_7CnativeReadInt_7C_28J_29I_native", + "Landroid_2Fos_2FParcel_3B_7CnativeWriteInterfaceToken_7C_28JLjava_2Flang_2FString_3B_29V_native", + "Landroid_2Fos_2FParcel_3B_7CnativeEnforceInterface_7C_28JLjava_2Flang_2FString_3B_29V_native"}; +constexpr uint32 kBinSearchInsnCount = 56; +// map func name to pair +using Func2CodeInsnMap = std::unordered_map>; +Func2CodeInsnMap func2CodeInsnMap { + {"Ljava_2Flang_2FString_3B_7ChashCode_7C_28_29I", {"maple/mrt/codetricks/arch/arm64/hashCode.s", 29}}, + {"Ljava_2Flang_2FString_3B_7Cequals_7C_28Ljava_2Flang_2FObject_3B_29Z", + {"maple/mrt/codetricks/arch/arm64/stringEquals.s", 50}}}; +constexpr uint32 kQuadInsnCount = 2; + +void GetMethodLabel(const std::string &methodName, std::string &methodLabel) +{ + methodLabel = ".Lmethod_desc." + methodName; +} +} // namespace + +namespace maplebe { +using namespace maple; + +void AArch64AsmEmitter::EmitRefToMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) +{ + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + if (!cgFunc.GetFunction().IsJava()) { + return; + } + std::string methodDescLabel; + GetMethodLabel(cgFunc.GetFunction().GetName(), methodDescLabel); + (void)emitter.Emit("\t.word " + methodDescLabel + "-.\n"); + emitter.IncreaseJavaInsnCount(); +} + +void AArch64AsmEmitter::EmitRefToMethodInfo(FuncEmitInfo &funcEmitInfo, Emitter &emitter) +{ + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + if (cgFunc.GetFunction().GetModule()->IsJavaModule()) { + std::string labelName = ".Label.name." + cgFunc.GetFunction().GetName(); + (void)emitter.Emit("\t.word " + labelName + " - .\n"); + } +} + +/* + * emit java method description which contains address and size of local reference area + * as well as method metadata. + */ +void AArch64AsmEmitter::EmitMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) +{ + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + if (!cgFunc.GetFunction().IsJava()) { + return; + } + (void)emitter.Emit("\t.section\t.rodata\n"); + (void)emitter.Emit("\t.align\t2\n"); + std::string methodInfoLabel; + GetMethodLabel(cgFunc.GetFunction().GetName(), methodInfoLabel); + (void)emitter.Emit(methodInfoLabel + ":\n"); + EmitRefToMethodInfo(funcEmitInfo, emitter); + /* local reference area */ + AArch64MemLayout *memLayout = static_cast(cgFunc.GetMemlayout()); + int32 refOffset = memLayout->GetRefLocBaseLoc(); + uint32 refNum = memLayout->GetSizeOfRefLocals() / kOffsetAlign; + /* for ea usage */ + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + IntrinsiccallNode *cleanEANode = aarchCGFunc.GetCleanEANode(); + if (cleanEANode != nullptr) { + refNum += static_cast(cleanEANode->NumOpnds()); + refOffset -= static_cast(cleanEANode->NumOpnds() * kIntregBytelen); + } + (void)emitter.Emit("\t.short ").Emit(refOffset).Emit("\n"); + (void)emitter.Emit("\t.short ").Emit(refNum).Emit("\n"); +} + +/* the fast_exception_handling lsda */ +void AArch64AsmEmitter::EmitFastLSDA(FuncEmitInfo &funcEmitInfo) +{ + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + + Emitter *emitter = currCG->GetEmitter(); + PUIdx pIdx = currCG->GetMIRModule()->CurFunction()->GetPuidx(); + const std::string &idx = strdup(std::to_string(pIdx).c_str()); + /* + * .word 0xFFFFFFFF + * .word .Label.LTest_3B_7C_3Cinit_3E_7C_28_29V3-func_start_label + */ + (void)emitter->Emit("\t.word 0xFFFFFFFF\n"); + (void)emitter->Emit("\t.word .L." + idx + "__"); + if (aarchCGFunc.NeedCleanup()) { + emitter->Emit(cgFunc.GetCleanupLabel()->GetLabelIdx()); + } else { + DEBUG_ASSERT(!cgFunc.GetExitBBsVec().empty(), "exitbbsvec is empty in AArch64AsmEmitter::EmitFastLSDA"); + emitter->Emit(cgFunc.GetExitBB(0)->GetLabIdx()); + } + emitter->Emit("-.L." + idx + "__").Emit(cgFunc.GetStartLabel()->GetLabelIdx()).Emit("\n"); + emitter->IncreaseJavaInsnCount(); +} + +/* the normal gcc_except_table */ +void AArch64AsmEmitter::EmitFullLSDA(FuncEmitInfo &funcEmitInfo) +{ + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + EHFunc *ehFunc = cgFunc.GetEHFunc(); + Emitter *emitter = currCG->GetEmitter(); + /* emit header */ + emitter->Emit("\t.align 3\n"); + emitter->Emit("\t.section .gcc_except_table,\"a\",@progbits\n"); + emitter->Emit("\t.align 3\n"); + /* emit LSDA header */ + LSDAHeader *lsdaHeader = ehFunc->GetLSDAHeader(); + emitter->EmitStmtLabel(lsdaHeader->GetLSDALabel()->GetLabelIdx()); + emitter->Emit("\t.byte ").Emit(lsdaHeader->GetLPStartEncoding()).Emit("\n"); + emitter->Emit("\t.byte ").Emit(lsdaHeader->GetTTypeEncoding()).Emit("\n"); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(lsdaHeader->GetTTypeOffset()); + emitter->EmitStmtLabel(lsdaHeader->GetTTypeOffset().GetStartOffset()->GetLabelIdx()); + /* emit call site table */ + emitter->Emit("\t.byte ").Emit(lsdaHeader->GetCallSiteEncoding()).Emit("\n"); + /* callsite table size */ + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(ehFunc->GetLSDACallSiteTable()->GetCSTable()); + /* callsite start */ + emitter->EmitStmtLabel(ehFunc->GetLSDACallSiteTable()->GetCSTable().GetStartOffset()->GetLabelIdx()); + ehFunc->GetLSDACallSiteTable()->SortCallSiteTable([&aarchCGFunc](const LSDACallSite *a, const LSDACallSite *b) { + CHECK_FATAL(a != nullptr, "nullptr check"); + CHECK_FATAL(b != nullptr, "nullptr check"); + LabelIDOrder id1 = aarchCGFunc.GetLabelOperand(a->csStart.GetEndOffset()->GetLabelIdx())->GetLabelOrder(); + LabelIDOrder id2 = aarchCGFunc.GetLabelOperand(b->csStart.GetEndOffset()->GetLabelIdx())->GetLabelOrder(); + /* id1 and id2 should not be default value -1u */ + CHECK_FATAL(id1 != 0xFFFFFFFF, "illegal label order assigned"); + CHECK_FATAL(id2 != 0xFFFFFFFF, "illegal label order assigned"); + return id1 < id2; + }); + const MapleVector &callSiteTable = ehFunc->GetLSDACallSiteTable()->GetCallSiteTable(); + for (size_t i = 0; i < callSiteTable.size(); ++i) { + LSDACallSite *lsdaCallSite = callSiteTable[i]; + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(lsdaCallSite->csStart); + + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(lsdaCallSite->csLength); + + if (lsdaCallSite->csLandingPad.GetStartOffset()) { + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(lsdaCallSite->csLandingPad); + } else { + DEBUG_ASSERT(lsdaCallSite->csAction == 0, "csAction error!"); + emitter->Emit("\t.uleb128 "); + if (aarchCGFunc.NeedCleanup()) { + /* if landing pad is 0, we emit this call site as cleanup code */ + LabelPair cleaupCode; + cleaupCode.SetStartOffset(cgFunc.GetStartLabel()); + cleaupCode.SetEndOffset(cgFunc.GetCleanupLabel()); + emitter->EmitLabelPair(cleaupCode); + } else if (cgFunc.GetFunction().IsJava()) { + DEBUG_ASSERT(!cgFunc.GetExitBBsVec().empty(), "exitbbsvec is empty in AArch64Emitter::EmitFullLSDA"); + PUIdx pIdx = cgFunc.GetMirModule().CurFunction()->GetPuidx(); + const std::string &idx = strdup(std::to_string(pIdx).c_str()); + (void)emitter->Emit(".L." + idx).Emit("__").Emit(cgFunc.GetExitBB(0)->GetLabIdx()); + (void)emitter->Emit(" - .L." + idx).Emit("__").Emit(cgFunc.GetStartLabel()->GetLabelIdx()).Emit("\n"); + } else { + emitter->Emit("0\n"); + } + } + emitter->Emit("\t.uleb128 ").Emit(lsdaCallSite->csAction).Emit("\n"); + } + + /* + * quick hack: insert a call site entry for the whole function body. + * this will hand in any pending (uncaught) exception to its caller. Note that + * __gxx_personality_v0 in libstdc++ is coded so that if exception table exists, + * the call site table must have an entry for any possibly raised exception, + * otherwise __cxa_call_terminate will be invoked immediately, thus the caller + * does not get the chance to take charge. + */ + if (aarchCGFunc.NeedCleanup() || cgFunc.GetFunction().IsJava()) { + /* call site for clean-up */ + LabelPair funcStart; + funcStart.SetStartOffset(cgFunc.GetStartLabel()); + funcStart.SetEndOffset(cgFunc.GetStartLabel()); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(funcStart); + LabelPair funcLength; + funcLength.SetStartOffset(cgFunc.GetStartLabel()); + funcLength.SetEndOffset(cgFunc.GetCleanupLabel()); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(funcLength); + LabelPair cleaupCode; + cleaupCode.SetStartOffset(cgFunc.GetStartLabel()); + cleaupCode.SetEndOffset(cgFunc.GetCleanupLabel()); + emitter->Emit("\t.uleb128 "); + if (aarchCGFunc.NeedCleanup()) { + emitter->EmitLabelPair(cleaupCode); + } else { + DEBUG_ASSERT(!cgFunc.GetExitBBsVec().empty(), "exitbbsvec is empty in AArch64AsmEmitter::EmitFullLSDA"); + PUIdx pIdx = cgFunc.GetMirModule().CurFunction()->GetPuidx(); + const std::string &idx = strdup(std::to_string(pIdx).c_str()); + (void)emitter->Emit(".L." + idx).Emit("__").Emit(cgFunc.GetExitBB(0)->GetLabIdx()); + (void)emitter->Emit(" - .L." + idx).Emit("__").Emit(cgFunc.GetStartLabel()->GetLabelIdx()).Emit("\n"); + } + emitter->Emit("\t.uleb128 0\n"); + if (!cgFunc.GetFunction().IsJava()) { + /* call site for stack unwind */ + LabelPair unwindStart; + unwindStart.SetStartOffset(cgFunc.GetStartLabel()); + unwindStart.SetEndOffset(cgFunc.GetCleanupLabel()); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(unwindStart); + LabelPair unwindLength; + unwindLength.SetStartOffset(cgFunc.GetCleanupLabel()); + unwindLength.SetEndOffset(cgFunc.GetEndLabel()); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(unwindLength); + emitter->Emit("\t.uleb128 0\n"); + emitter->Emit("\t.uleb128 0\n"); + } + } + /* callsite end label */ + emitter->EmitStmtLabel(ehFunc->GetLSDACallSiteTable()->GetCSTable().GetEndOffset()->GetLabelIdx()); + /* tt */ + const LSDAActionTable *lsdaActionTable = ehFunc->GetLSDAActionTable(); + for (size_t i = 0; i < lsdaActionTable->Size(); ++i) { + LSDAAction *lsdaAction = lsdaActionTable->GetActionTable().at(i); + emitter->Emit("\t.byte ").Emit(lsdaAction->GetActionIndex()).Emit("\n"); + emitter->Emit("\t.byte ").Emit(lsdaAction->GetActionFilter()).Emit("\n"); + } + emitter->Emit("\t.align 3\n"); + for (int32 i = ehFunc->GetEHTyTableSize() - 1; i >= 0; i--) { + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ehFunc->GetEHTyTableMember(i)); + MIRTypeKind typeKind = mirType->GetKind(); + if (((typeKind == kTypeScalar) && (mirType->GetPrimType() == PTY_void)) || + (typeKind == kTypeStructIncomplete) || (typeKind == kTypeInterfaceIncomplete)) { + continue; + } + CHECK_FATAL((typeKind == kTypeClass) || (typeKind == kTypeClassIncomplete), "NYI"); + const std::string &tyName = GlobalTables::GetStrTable().GetStringFromStrIdx(mirType->GetNameStrIdx()); + std::string dwRefString(".LDW.ref."); + dwRefString += CLASSINFO_PREFIX_STR; + dwRefString += tyName; + dwRefString += " - ."; + emitter->Emit("\t.4byte " + dwRefString + "\n"); + } + /* end of lsda */ + emitter->EmitStmtLabel(lsdaHeader->GetTTypeOffset().GetEndOffset()->GetLabelIdx()); +} + +void AArch64AsmEmitter::EmitBBHeaderLabel(FuncEmitInfo &funcEmitInfo, const std::string &name, LabelIdx labIdx) +{ + (void)name; + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + Emitter &emitter = *(currCG->GetEmitter()); + LabelOperand &label = aarchCGFunc.GetOrCreateLabelOperand(labIdx); + /* if label order is default value -1, set new order */ + if (label.GetLabelOrder() == 0xFFFFFFFF) { + label.SetLabelOrder(currCG->GetLabelOrderCnt()); + currCG->IncreaseLabelOrderCnt(); + } + PUIdx pIdx = currCG->GetMIRModule()->CurFunction()->GetPuidx(); + char *puIdx = strdup(std::to_string(pIdx).c_str()); + const std::string &labelName = cgFunc.GetFunction().GetLabelTab()->GetName(labIdx); + if (currCG->GenerateVerboseCG()) { + (void)emitter.Emit(".L.") + .Emit(puIdx) + .Emit("__") + .Emit(labIdx) + .Emit(":\t//label order ") + .Emit(label.GetLabelOrder()); + if (!labelName.empty() && labelName.at(0) != '@') { + /* If label name has @ as its first char, it is not from MIR */ + (void)emitter.Emit(", MIR: @").Emit(labelName).Emit("\n"); + } else { + (void)emitter.Emit("\n"); + } + } else { + (void)emitter.Emit(".L.").Emit(puIdx).Emit("__").Emit(labIdx).Emit(":\n"); + } + free(puIdx); + puIdx = nullptr; +} + +void AArch64AsmEmitter::EmitJavaInsnAddr(FuncEmitInfo &funcEmitInfo) +{ + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + if (cgFunc.GetFunction().IsJava()) { + Emitter *emitter = cgFunc.GetCG()->GetEmitter(); + /* emit a comment of current address from the begining of java text section */ + std::stringstream ss; + ss << "\n\t// addr: 0x" << std::hex << (emitter->GetJavaInsnCount() * kInsnSize) << "\n"; + cgFunc.GetCG()->GetEmitter()->Emit(ss.str()); + } +} + +void AArch64AsmEmitter::RecordRegInfo(FuncEmitInfo &funcEmitInfo) const +{ + if (!CGOptions::DoIPARA() || funcEmitInfo.GetCGFunc().GetFunction().IsJava()) { + return; + } + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + + std::set referedRegs; + MIRFunction &mirFunc = cgFunc.GetFunction(); + FOR_ALL_BB_REV(bb, &aarchCGFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsCall() || insn->IsTailCall()) { + auto *targetOpnd = insn->GetCallTargetOperand(); + bool safeCheck = false; + CHECK_FATAL(targetOpnd != nullptr, + "target is null in AArch64Emitter::IsCallToFunctionThatNeverReturns"); + if (targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + DEBUG_ASSERT(funcSt->GetSKind() == maple::kStFunc, "funcst must be a function name symbol"); + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + safeCheck = true; + for (auto preg : func->GetReferedRegs()) { + referedRegs.insert(preg); + } + } + } + if (!safeCheck) { + mirFunc.SetReferedRegsValid(false); + return; + } + } + if (referedRegs.size() == kMaxRegNum) { + break; + } + uint32 opndNum = insn->GetOperandSize(); + const InsnDesc *md = &AArch64CG::kMd[insn->GetMachineOpcode()]; + for (uint32 i = 0; i < opndNum; ++i) { + if (insn->GetMachineOpcode() == MOP_asm) { + if (i == kAsmOutputListOpnd || i == kAsmClobberListOpnd) { + for (auto opnd : static_cast(insn->GetOperand(i)).GetOperands()) { + if (opnd->IsRegister()) { + referedRegs.insert(static_cast(opnd)->GetRegisterNumber()); + } + } + } + continue; + } + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + /* all use, skip it */ + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if (!memOpnd.IsIntactIndexed()) { + referedRegs.insert(base->GetRegisterNumber()); + } + } else if (opnd.IsRegister()) { + RegType regType = static_cast(opnd).GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + continue; + } + bool isDef = md->GetOpndDes(i)->IsRegDef(); + if (isDef) { + referedRegs.insert(static_cast(opnd).GetRegisterNumber()); + } + } + } + } + } + mirFunc.SetReferedRegsValid(true); +#ifdef DEBUG + for (auto reg : referedRegs) { + if (reg > kMaxRegNum) { + DEBUG_ASSERT(0, "unexpected preg"); + } + } +#endif + mirFunc.CopyReferedRegs(referedRegs); +} + +void AArch64AsmEmitter::Run(FuncEmitInfo &funcEmitInfo) +{ + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + /* emit header of this function */ + Emitter &emitter = *currCG->GetEmitter(); + // insert for __cxx_global_var_init + if (cgFunc.GetName() == "__cxx_global_var_init") { + (void)emitter.Emit("\t.section\t.init_array,\"aw\"\n"); + (void)emitter.Emit("\t.quad\t").Emit(cgFunc.GetName()).Emit("\n"); + } + if (cgFunc.GetFunction().GetAttr(FUNCATTR_initialization)) { + (void)emitter.Emit("\t.section\t.init_array,\"aw\"\n"); + (void)emitter.Emit("\t.quad\t").Emit(cgFunc.GetName()).Emit("\n"); + } + if (cgFunc.GetFunction().GetAttr(FUNCATTR_termination)) { + (void)emitter.Emit("\t.section\t.fini_array,\"aw\"\n"); + (void)emitter.Emit("\t.quad\t").Emit(cgFunc.GetName()).Emit("\n"); + } + (void)emitter.Emit("\n"); + EmitMethodDesc(funcEmitInfo, emitter); + /* emit java code to the java section. */ + if (cgFunc.GetFunction().IsJava()) { + std::string sectionName = namemangler::kMuidJavatextPrefixStr; + (void)emitter.Emit("\t.section ." + sectionName + ",\"ax\"\n"); + } else if (cgFunc.GetFunction().GetAttr(FUNCATTR_section)) { + const std::string §ionName = cgFunc.GetFunction().GetAttrs().GetPrefixSectionName(); + (void)emitter.Emit("\t.section " + sectionName).Emit(",\"ax\",@progbits\n"); + } else if (CGOptions::IsFunctionSections()) { + (void)emitter.Emit("\t.section .text.").Emit(cgFunc.GetName()).Emit(",\"ax\",@progbits\n"); + } else if (cgFunc.GetFunction().GetAttr(FUNCATTR_constructor_priority)) { + (void)emitter.Emit("\t.section\t.text.startup").Emit(",\"ax\",@progbits\n"); + } else { + (void)emitter.Emit("\t.text\n"); + } + if (CGOptions::GetFuncAlignPow() != 0) { + (void)emitter.Emit("\t.align ").Emit(CGOptions::GetFuncAlignPow()).Emit("\n"); + } + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc.GetFunction().GetStIdx().Idx()); + const std::string &funcName = std::string(cgFunc.GetShortFuncName().c_str()); + + // manually replace function with optimized assembly language + if (CGOptions::IsReplaceASM()) { + auto it = func2CodeInsnMap.find(funcSt->GetName()); + if (it != func2CodeInsnMap.end()) { + std::string optFile = it->second.first; + struct stat buffer; + if (stat(optFile.c_str(), &buffer) == 0) { + std::ifstream codetricksFd(optFile); + if (!codetricksFd.is_open()) { + ERR(kLncErr, " %s open failed!", optFile.c_str()); + LogInfo::MapleLogger() << "wrong" << '\n'; + } else { + std::string contend; + while (getline(codetricksFd, contend)) { + (void)emitter.Emit(contend + "\n"); + } + } + } + emitter.IncreaseJavaInsnCount(it->second.second); +#ifdef EMIT_INSN_COUNT + EmitJavaInsnAddr(funcEmitInfo); +#endif /* ~EMIT_INSN_COUNT */ + return; + } + } + std::string funcStName = funcSt->GetName(); + if (funcSt->GetFunction()->GetAttr(FUNCATTR_weak)) { + (void)emitter.Emit("\t.weak\t" + funcStName + "\n"); + (void)emitter.Emit("\t.hidden\t" + funcStName + "\n"); + } else if (funcSt->GetFunction()->GetAttr(FUNCATTR_local)) { + (void)emitter.Emit("\t.local\t" + funcStName + "\n"); + } else if (funcSt->GetFunction() && (!funcSt->GetFunction()->IsJava()) && funcSt->GetFunction()->IsStatic()) { + // nothing + } else { + /* should refer to function attribute */ + (void)emitter.Emit("\t.globl\t").Emit(funcSt->GetName()).Emit("\n"); + if (!currCG->GetMIRModule()->IsCModule()) { + (void)emitter.Emit("\t.hidden\t").Emit(funcSt->GetName()).Emit("\n"); + } + } + (void)emitter.Emit("\t.type\t" + funcStName + ", %function\n"); + /* add these messege , solve the performance tool error */ + EmitRefToMethodDesc(funcEmitInfo, emitter); + (void)emitter.Emit(funcStName + ":\n"); + + /* if the last insn is call, then insert nop */ + bool found = false; + FOR_ALL_BB_REV(bb, &aarchCGFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (insn->IsMachineInstruction()) { + if (insn->IsCall()) { + Insn &newInsn = aarchCGFunc.GetInsnBuilder()->BuildInsn(MOP_nop); + bb->InsertInsnAfter(*insn, newInsn); + } + found = true; + break; + } + } + if (found) { + break; + } + } + + RecordRegInfo(funcEmitInfo); + + /* emit instructions */ + FOR_ALL_BB(bb, &aarchCGFunc) { + if (bb->IsUnreachable()) { + continue; + } + if (currCG->GenerateVerboseCG()) { + (void)emitter.Emit("# freq:").Emit(bb->GetFrequency()).Emit("\n"); + } + /* emit bb headers */ + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + if (aarchCGFunc.GetMirModule().IsCModule() && bb->IsBBNeedAlign() && + bb->GetAlignNopNum() != kAlignMovedFlag) { + uint32 power = bb->GetAlignPower(); + (void)emitter.Emit("\t.p2align ").Emit(power).Emit("\n"); + } + EmitBBHeaderLabel(funcEmitInfo, funcName, bb->GetLabIdx()); + } + + FOR_BB_INSNS(insn, bb) { + if (insn->IsCfiInsn()) { + EmitAArch64CfiInsn(emitter, *insn); + } else if (insn->IsDbgInsn()) { + EmitAArch64DbgInsn(emitter, *insn); + } else { + EmitAArch64Insn(emitter, *insn); + } + } + } + if (CGOptions::IsMapleLinker()) { + /* Emit a label for calculating method size */ + (void)emitter.Emit(".Label.end." + funcStName + ":\n"); + } + (void)emitter.Emit("\t.size\t" + funcStName + ", .-").Emit(funcStName + "\n"); + + auto constructorAttr = funcSt->GetFunction()->GetAttrs().GetConstructorPriority(); + if (constructorAttr != -1) { + (void)emitter.Emit("\t.section\t.init_array." + std::to_string(constructorAttr) + ",\"aw\"\n"); + (void)emitter.Emit("\t.align 3\n"); + (void)emitter.Emit("\t.xword\t" + funcStName + "\n"); + } + + EHFunc *ehFunc = cgFunc.GetEHFunc(); + /* emit LSDA */ + if (cgFunc.GetFunction().IsJava() && (ehFunc != nullptr)) { + if (!cgFunc.GetHasProEpilogue()) { + (void)emitter.Emit("\t.word 0x55555555\n"); + emitter.IncreaseJavaInsnCount(); + } else if (ehFunc->NeedFullLSDA()) { + LSDAHeader *lsdaHeader = ehFunc->GetLSDAHeader(); + PUIdx pIdx = emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + const std::string &idx = strdup(std::to_string(pIdx).c_str()); + /* .word .Label.lsda_label-func_start_label */ + (void)emitter.Emit("\t.word .L." + idx).Emit("__").Emit(lsdaHeader->GetLSDALabel()->GetLabelIdx()); + (void)emitter.Emit("-.L." + idx).Emit("__").Emit(cgFunc.GetStartLabel()->GetLabelIdx()).Emit("\n"); + emitter.IncreaseJavaInsnCount(); + } else if (ehFunc->NeedFastLSDA()) { + EmitFastLSDA(funcEmitInfo); + } + } + + for (auto &it : cgFunc.GetEmitStVec()) { + /* emit switch table only here */ + MIRSymbol *st = it.second; + DEBUG_ASSERT(st->IsReadOnly(), "NYI"); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t.align 3\n"); + emitter.IncreaseJavaInsnCount(0, true); /* just aligned */ + (void)emitter.Emit(st->GetName() + ":\n"); + MIRAggConst *arrayConst = safe_cast(st->GetKonst()); + CHECK_FATAL(arrayConst != nullptr, "null ptr check"); + PUIdx pIdx = cgFunc.GetMirModule().CurFunction()->GetPuidx(); + char *idx = strdup(std::to_string(pIdx).c_str()); + for (size_t i = 0; i < arrayConst->GetConstVec().size(); i++) { + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + CHECK_FATAL(lblConst != nullptr, "null ptr check"); + (void)emitter.Emit("\t.quad\t.L.").Emit(idx).Emit("__").Emit(lblConst->GetValue()); + (void)emitter.Emit(" - " + st->GetName() + "\n"); + emitter.IncreaseJavaInsnCount(kQuadInsnCount); + } + free(idx); + idx = nullptr; + } + /* insert manually optimized assembly language */ + if (funcSt->GetName() == "Landroid_2Futil_2FContainerHelpers_3B_7C_3Cinit_3E_7C_28_29V") { + std::string optFile = "maple/mrt/codetricks/arch/arm64/ContainerHelpers_binarySearch.s"; + struct stat buffer; + if (stat(optFile.c_str(), &buffer) == 0) { + std::ifstream binarySearchFileFD(optFile); + if (!binarySearchFileFD.is_open()) { + ERR(kLncErr, " %s open failed!", optFile.c_str()); + } else { + std::string contend; + while (getline(binarySearchFileFD, contend)) { + (void)emitter.Emit(contend + "\n"); + } + } + } + emitter.IncreaseJavaInsnCount(kBinSearchInsnCount); + } + + for (const auto &mpPair : cgFunc.GetLabelAndValueMap()) { + LabelOperand &labelOpnd = aarchCGFunc.GetOrCreateLabelOperand(mpPair.first); + A64OpndEmitVisitor visitor(emitter, nullptr); + labelOpnd.Accept(visitor); + (void)emitter.Emit(":\n"); + (void)emitter.Emit("\t.quad ").Emit(static_cast(mpPair.second)).Emit("\n"); + emitter.IncreaseJavaInsnCount(kQuadInsnCount); + } + + if (ehFunc != nullptr && ehFunc->NeedFullLSDA()) { + EmitFullLSDA(funcEmitInfo); + } +#ifdef EMIT_INSN_COUNT + if (cgFunc.GetFunction().IsJava()) { + EmitJavaInsnAddr(funcEmitInfo); + } +#endif /* ~EMIT_INSN_COUNT */ +} + +void AArch64AsmEmitter::EmitAArch64Insn(maplebe::Emitter &emitter, Insn &insn) const +{ + MOperator mOp = insn.GetMachineOpcode(); + emitter.SetCurrentMOP(mOp); + const InsnDesc *md = insn.GetDesc(); + + if (!GetCG()->GenerateVerboseAsm() && !GetCG()->GenerateVerboseCG() && insn.IsComment()) { + return; + } + + switch (mOp) { + case MOP_clinit: { + EmitClinit(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_adrp_ldr: { + uint32 adrpldrInsnCount = md->GetAtomicNum(); + emitter.IncreaseJavaInsnCount(adrpldrInsnCount); + EmitAdrpLdr(emitter, insn); + if (CGOptions::IsLazyBinding() && !GetCG()->IsLibcore()) { + EmitLazyBindingRoutine(emitter, insn); + emitter.IncreaseJavaInsnCount(adrpldrInsnCount + 1); + } + return; + } + case MOP_counter: { + EmitCounter(emitter, insn); + return; + } + case MOP_asm: { + EmitInlineAsm(emitter, insn); + return; + } + case MOP_clinit_tail: { + EmitClinitTail(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_lazy_ldr: { + EmitLazyLoad(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_adrp_label: { + EmitAdrpLabel(emitter, insn); + return; + } + case MOP_lazy_tail: { + /* No need to emit this pseudo instruction. */ + return; + } + case MOP_lazy_ldr_static: { + EmitLazyLoadStatic(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_arrayclass_cache_ldr: { + EmitArrayClassCacheLoad(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_get_and_addI: + case MOP_get_and_addL: { + EmitGetAndAddInt(emitter, insn); + return; + } + case MOP_get_and_setI: + case MOP_get_and_setL: { + EmitGetAndSetInt(emitter, insn); + return; + } + case MOP_compare_and_swapI: + case MOP_compare_and_swapL: { + EmitCompareAndSwapInt(emitter, insn); + return; + } + case MOP_string_indexof: { + EmitStringIndexOf(emitter, insn); + return; + } + case MOP_pseudo_none: + case MOP_pseduo_tls_release: { + return; + } + case MOP_tls_desc_call: { + EmitCTlsDescCall(emitter, insn); + return; + } + case MOP_tls_desc_rel: { + EmitCTlsDescRel(emitter, insn); + return; + } + case MOP_sync_lock_test_setI: + case MOP_sync_lock_test_setL: { + EmitSyncLockTestSet(emitter, insn); + return; + } + default: + break; + } + + if (CGOptions::IsNativeOpt() && mOp == MOP_xbl) { + auto *nameOpnd = static_cast(&insn.GetOperand(kInsnFirstOpnd)); + if (nameOpnd->GetName() == "MCC_CheckThrowPendingException") { + EmitCheckThrowPendingException(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + } + + std::string format(md->format); + (void)emitter.Emit("\t").Emit(md->name).Emit("\t"); + size_t opndSize = insn.GetOperandSize(); + std::vector seq(opndSize, -1); + std::vector prefix(opndSize); /* used for print prefix like "*" in icall *rax */ + uint32 index = 0; + uint32 commaNum = 0; + for (uint32 i = 0; i < format.length(); ++i) { + char c = format[i]; + if (c >= '0' && c <= '5') { + seq[index++] = c - '0'; + ++commaNum; + } else if (c != ',') { + prefix[index].push_back(c); + } + } + + bool isRefField = + (opndSize == 0) ? false : CheckInsnRefField(insn, static_cast(static_cast(seq[0]))); + if (insn.IsComment()) { + emitter.IncreaseJavaInsnCount(); + } + uint32 compositeOpnds = 0; + for (uint32 i = 0; i < commaNum; ++i) { + if (seq[i] == -1) { + continue; + } + if (prefix[i].length() > 0) { + (void)emitter.Emit(prefix[i]); + } + if (emitter.NeedToDealWithHugeSo() && (mOp == MOP_xbl || mOp == MOP_tail_call_opt_xbl)) { + auto *nameOpnd = static_cast(&insn.GetOperand(kInsnFirstOpnd)); + /* Suport huge so here + * As the PLT section is just before java_text section, when java_text section is larger + * then 128M, instrunction of "b" and "bl" would fault to branch to PLT stub functions. Here, to save + * instuctions space, we change the branch target to a local target within 120M address, and add non-plt + * call to the target function. + */ + emitter.InsertHugeSoTarget(nameOpnd->GetName()); + (void)emitter.Emit(nameOpnd->GetName() + emitter.HugeSoPostFix()); + break; + } + auto *opnd = &insn.GetOperand(static_cast(seq[i])); + if (opnd && opnd->IsRegister()) { + auto *regOpnd = static_cast(opnd); + if ((md->opndMD[static_cast(seq[i])])->IsVectorOperand()) { + regOpnd->SetVecLanePosition(-1); + regOpnd->SetVecLaneSize(0); + regOpnd->SetVecElementSize(0); + if (insn.IsVectorOp()) { + PrepareVectorOperand(regOpnd, compositeOpnds, insn); + if (compositeOpnds != 0) { + (void)emitter.Emit("{"); + } + } + } + } + A64OpndEmitVisitor visitor(emitter, md->opndMD[static_cast(seq[i])]); + + insn.GetOperand(static_cast(seq[i])).Accept(visitor); + if (compositeOpnds == 1) { + (void)emitter.Emit("}"); + } + if (compositeOpnds > 0) { + --compositeOpnds; + } + /* reset opnd0 ref-field flag, so following instruction has correct register */ + if (isRefField && (i == 0)) { + static_cast(&insn.GetOperand(static_cast(seq[0])))->SetRefField(false); + } + /* Temporary comment the label:.Label.debug.callee */ + if (i != (commaNum - 1)) { + (void)emitter.Emit(", "); + } + const uint32 commaNumForEmitLazy = 2; + if (!CGOptions::IsLazyBinding() || GetCG()->IsLibcore() || (mOp != MOP_wldr && mOp != MOP_xldr) || + commaNum != commaNumForEmitLazy || i != 1 || + !insn.GetOperand(static_cast(seq[1])).IsMemoryAccessOperand()) { + continue; + } + /* + * Only check the last operand of ldr in lo12 mode. + * Check the second operand, if it's [AArch64MemOperand::kAddrModeLo12Li] + */ + auto *memOpnd = static_cast(&insn.GetOperand(static_cast(seq[1]))); + if (memOpnd == nullptr || memOpnd->GetAddrMode() != MemOperand::kAddrModeLo12Li) { + continue; + } + const MIRSymbol *sym = memOpnd->GetSymbol(); + if (sym->IsMuidFuncDefTab() || sym->IsMuidFuncUndefTab() || sym->IsMuidDataDefTab() || + sym->IsMuidDataUndefTab()) { + (void)emitter.Emit("\n"); + EmitLazyBindingRoutine(emitter, insn); + emitter.IncreaseJavaInsnCount(1); + } + } + if (GetCG()->GenerateVerboseCG() || (GetCG()->GenerateVerboseAsm() && insn.IsComment())) { + const char *comment = insn.GetComment().c_str(); + if (comment != nullptr && strlen(comment) > 0) { + (void)emitter.Emit("\t\t// ").Emit(comment); + } + } + + (void)emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitClinit(Emitter &emitter, const Insn &insn) const +{ + /* + * adrp x3, __muid_data_undef_tab$$GetBoolean_bytecode+144 + * ldr x3, [x3, #:lo12:__muid_data_undef_tab$$GetBoolean_bytecode+144] + * or, + * adrp x3, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B + * ldr x3, [x3, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] + * + * ldr x3, [x3,#112] + * ldr wzr, [x3] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_clinit]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitClinit"); + /* emit nop for breakpoint */ + if (GetCG()->GetCGOptions().WithDwarf()) { + (void)emitter.Emit("\t").Emit("nop").Emit("\n"); + } + + if (stImmOpnd->GetSymbol()->IsMuidDataUndefTab()) { + /* emit adrp */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("\n"); + /* emit ldr */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); + } else { + /* adrp x3, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B */ + (void)emitter.Emit("\tadrp\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit(namemangler::kPtrPrefixStr + stImmOpnd->GetName()); + (void)emitter.Emit("\n"); + + /* ldr x3, [x3, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] */ + (void)emitter.Emit("\tldr\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", ["); + opnd0->Accept(visitor); + (void)emitter.Emit(", #:lo12:"); + (void)emitter.Emit(namemangler::kPtrPrefixStr + stImmOpnd->GetName()); + (void)emitter.Emit("]\n"); + } + /* emit "ldr x0,[x0,#48]" */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(",#"); + (void)emitter.Emit(static_cast(ClassMetadata::OffsetOfInitState())); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); + + /* emit "ldr xzr, [x0]" */ + (void)emitter.Emit("\t").Emit("ldr\txzr, ["); + opnd0->Accept(visitor); + (void)emitter.Emit("]\n"); +} + +static void AsmStringOutputRegNum(bool isInt, uint32 regno, uint32 intBase, uint32 fpBase, std::string &strToEmit) +{ + regno_t newRegno; + if (isInt) { + newRegno = regno - intBase; + } else { + newRegno = regno - fpBase; + } + if (newRegno > (kDecimalMax - 1)) { + uint32 tenth = newRegno / kDecimalMax; + strToEmit += '0' + static_cast(tenth); + newRegno -= (kDecimalMax * tenth); + } + strToEmit += newRegno + '0'; +} + +void AArch64AsmEmitter::EmitInlineAsm(Emitter &emitter, const Insn &insn) const +{ + (void)emitter.Emit("\t//Inline asm begin\n\t"); + auto &list1 = static_cast(insn.GetOperand(kAsmOutputListOpnd)); + std::vector outOpnds; + for (auto *regOpnd : list1.GetOperands()) { + outOpnds.push_back(regOpnd); + } + auto &list2 = static_cast(insn.GetOperand(kAsmInputListOpnd)); + std::vector inOpnds; + for (auto *regOpnd : list2.GetOperands()) { + inOpnds.push_back(regOpnd); + } + auto &list6 = static_cast(insn.GetOperand(kAsmOutputRegPrefixOpnd)); + auto &list7 = static_cast(insn.GetOperand(kAsmInputRegPrefixOpnd)); + MapleString asmStr = static_cast(insn.GetOperand(kAsmStringOpnd)).GetComment(); + std::string stringToEmit; + auto IsMemAccess = [](char c) -> bool { return c == '['; }; + auto EmitRegister = [&](const char *p, bool isInt, uint32 regNO, bool unDefRegSize) -> void { + if (IsMemAccess(p[0])) { + stringToEmit += "[x"; + AsmStringOutputRegNum(isInt, regNO, R0, V0, stringToEmit); + stringToEmit += "]"; + } else { + DEBUG_ASSERT((p[0] == 'w' || p[0] == 'x' || p[0] == 's' || p[0] == 'd' || p[0] == 'v'), + "Asm invalid register type"); + if ((p[0] == 'w' || p[0] == 'x') && unDefRegSize) { + stringToEmit += 'x'; + } else { + stringToEmit += p[0]; + } + if (!unDefRegSize) { + isInt = (p[0] == 'w' || p[0] == 'x'); + } + AsmStringOutputRegNum(isInt, regNO, R0, V0, stringToEmit); + } + }; + for (size_t i = 0; i < asmStr.length(); ++i) { + switch (asmStr[i]) { + case '$': { + char c = asmStr[++i]; + if ((c >= '0') && (c <= '9')) { + auto val = static_cast(c - '0'); + if (asmStr[i + 1] >= '0' && asmStr[i + 1] <= '9') { + val = val * kDecimalMax + static_cast(asmStr[++i] - '0'); + } + if (val < outOpnds.size()) { + const char *prefix = list6.stringList[val]->GetComment().c_str(); + RegOperand *opnd = outOpnds[val]; + EmitRegister(prefix, opnd->IsOfIntClass(), opnd->GetRegisterNumber(), true); + } else { + val -= static_cast(outOpnds.size()); + CHECK_FATAL(val < inOpnds.size(), "Inline asm : invalid register constraint number"); + RegOperand *opnd = inOpnds[val]; + /* input is a immediate */ + const char *prefix = list7.stringList[val]->GetComment().c_str(); + if (prefix[0] == 'i') { + stringToEmit += '#'; + for (size_t k = 1; k < list7.stringList[val]->GetComment().length(); ++k) { + stringToEmit += prefix[k]; + } + } else { + EmitRegister(prefix, opnd->IsOfIntClass(), opnd->GetRegisterNumber(), true); + } + } + } else if (c == '{') { + c = asmStr[++i]; + CHECK_FATAL(((c >= '0') && (c <= '9')), "Inline asm : invalid register constraint number"); + auto val = static_cast(c - '0'); + if (asmStr[i + 1] >= '0' && asmStr[i + 1] <= '9') { + val = val * kDecimalMax + static_cast(asmStr[++i] - '0'); + } + regno_t regno; + bool isAddr = false; + if (val < outOpnds.size()) { + RegOperand *opnd = outOpnds[val]; + regno = opnd->GetRegisterNumber(); + isAddr = IsMemAccess(list6.stringList[val]->GetComment().c_str()[0]); + } else { + val -= static_cast(outOpnds.size()); + CHECK_FATAL(val < inOpnds.size(), "Inline asm : invalid register constraint number"); + RegOperand *opnd = inOpnds[val]; + regno = opnd->GetRegisterNumber(); + isAddr = IsMemAccess(list7.stringList[val]->GetComment().c_str()[0]); + } + c = asmStr[++i]; + CHECK_FATAL(c == ':', "Parsing error in inline asm string during emit"); + c = asmStr[++i]; + std::string prefix(1, c); + if (c == 'a' || isAddr) { + prefix = "[x"; + } + EmitRegister(prefix.c_str(), true, regno, false); + c = asmStr[++i]; + CHECK_FATAL(c == '}', "Parsing error in inline asm string during emit"); + } + break; + } + case '\n': { + stringToEmit += "\n\t"; + break; + } + default: + stringToEmit += asmStr[i]; + } + } + (void)emitter.Emit(stringToEmit); + (void)emitter.Emit("\n\t//Inline asm end\n"); +} + +void AArch64AsmEmitter::EmitClinitTail(Emitter &emitter, const Insn &insn) const +{ + /* + * ldr x17, [xs, #112] + * ldr wzr, [x17] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_clinit_tail]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + + /* emit "ldr x17,[xs,#112]" */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\tx17, ["); + opnd0->Accept(visitor); + (void)emitter.Emit(", #"); + (void)emitter.Emit(static_cast(ClassMetadata::OffsetOfInitState())); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); + + /* emit "ldr xzr, [x17]" */ + (void)emitter.Emit("\t").Emit("ldr\txzr, [x17]\n"); +} + +void AArch64AsmEmitter::EmitLazyLoad(Emitter &emitter, const Insn &insn) const +{ + /* + * ldr wd, [xs] # xd and xs should be differenct register + * ldr wd, [xd] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_lazy_ldr]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + const OpndDesc *prop1 = md->opndMD[1]; + A64OpndEmitVisitor visitor(emitter, prop0); + A64OpndEmitVisitor visitor1(emitter, prop1); + + /* emit "ldr wd, [xs]" */ + (void)emitter.Emit("\t").Emit("ldr\t"); +#ifdef USE_32BIT_REF + opnd0->Accept(visitor); +#else + opnd0->Accept(visitor1); +#endif + (void)emitter.Emit(", ["); + opnd1->Accept(visitor1); + (void)emitter.Emit("]\t// lazy load.\n"); + + /* emit "ldr wd, [xd]" */ + (void)emitter.Emit("\t").Emit("ldr\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", ["); + opnd1->Accept(visitor1); + (void)emitter.Emit("]\t// lazy load.\n"); +} + +void AArch64AsmEmitter::EmitCounter(Emitter &emitter, const Insn &insn) const +{ + /* + * adrp x1, __profile_bb_table$$GetBoolean_bytecode+4 + * ldr w17, [x1, #:lo12:__profile_bb_table$$GetBoolean_bytecode+4] + * add w17, w17, #1 + * str w17, [x1, #:lo12:__profile_bb_table$$GetBoolean_bytecode+4] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_counter]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[kInsnFirstOpnd]; + A64OpndEmitVisitor visitor(emitter, prop0); + StImmOperand *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitCounter"); + /* emit nop for breakpoint */ + if (GetCG()->GetCGOptions().WithDwarf()) { + (void)emitter.Emit("\t").Emit("nop").Emit("\n"); + } + + /* emit adrp */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("\n"); + /* emit ldr */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\tw17, ["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); + /* emit add */ + (void)emitter.Emit("\t").Emit("add").Emit("\tw17, w17, #1"); + (void)emitter.Emit("\n"); + /* emit str */ + (void)emitter.Emit("\t").Emit("str").Emit("\tw17, ["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitAdrpLabel(Emitter &emitter, const Insn &insn) const +{ + /* adrp xd, label + * add xd, xd, #lo12:label + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_adrp_label]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + auto lidx = static_cast(opnd1)->GetValue(); + + /* adrp xd, label */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + char *idx; + idx = + strdup(std::to_string(Globals::GetInstance()->GetBECommon()->GetMIRModule().CurFunction()->GetPuidx()).c_str()); + (void)emitter.Emit(".L.").Emit(idx).Emit("__").Emit(lidx).Emit("\n"); + + /* add xd, xd, #lo12:label */ + (void)emitter.Emit("\tadd\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + (void)emitter.Emit(":lo12:").Emit(".L.").Emit(idx).Emit("__").Emit(lidx).Emit("\n"); + (void)emitter.Emit("\n"); + free(idx); + idx = nullptr; +} + +void AArch64AsmEmitter::EmitAdrpLdr(Emitter &emitter, const Insn &insn) const +{ + /* + * adrp xd, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B + * ldr xd, [xd, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_adrp_ldr]; + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitAdrpLdr"); + /* emit nop for breakpoint */ + if (GetCG()->GetCGOptions().WithDwarf()) { + (void)emitter.Emit("\t").Emit("nop").Emit("\n"); + } + + /* adrp xd, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + (void)emitter.Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("\n"); + + /* ldr xd, [xd, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] */ + (void)emitter.Emit("\tldr\t"); + static_cast(opnd0)->SetRefField(true); + opnd0->Accept(visitor); + static_cast(opnd0)->SetRefField(false); + (void)emitter.Emit(", "); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("]\n"); +} + +void AArch64AsmEmitter::EmitLazyLoadStatic(Emitter &emitter, const Insn &insn) const +{ + /* adrp xd, :got:__staticDecoupleValueOffset$$xxx+offset + * ldr wd, [xd, #:got_lo12:__staticDecoupleValueOffset$$xxx+offset] + * ldr wzr, [xd] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_lazy_ldr_static]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->GetOpndDes(0); + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitLazyLoadStatic"); + + /* emit "adrp xd, :got:__staticDecoupleValueOffset$$xxx+offset" */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + (void)emitter.Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("\t// lazy load static.\n"); + + /* emit "ldr wd, [xd, #:got_lo12:__staticDecoupleValueOffset$$xxx+offset]" */ + (void)emitter.Emit("\tldr\t"); + static_cast(opnd0)->SetRefField(true); +#ifdef USE_32BIT_REF + const OpndDesc prop2(prop0->GetOperandType(), prop0->GetRegProp(), prop0->GetSize() / 2); + opnd0->Emit(emitter, &prop2); /* ldr wd, ... for terminal system */ +#else + opnd0->Accept(visitor); /* ldr xd, ... for qemu */ +#endif /* USE_32BIT_REF */ + static_cast(opnd0)->SetRefField(false); + (void)emitter.Emit(", "); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("]\t// lazy load static.\n"); + + /* emit "ldr wzr, [xd]" */ + (void)emitter.Emit("\t").Emit("ldr\twzr, ["); + opnd0->Accept(visitor); + (void)emitter.Emit("]\t// lazy load static.\n"); +} + +void AArch64AsmEmitter::EmitArrayClassCacheLoad(Emitter &emitter, const Insn &insn) const +{ + /* adrp xd, :got:__arrayClassCacheTable$$xxx+offset + * ldr wd, [xd, #:got_lo12:__arrayClassCacheTable$$xxx+offset] + * ldr wzr, [xd] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_arrayclass_cache_ldr]; + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->GetOpndDes(kInsnFirstOpnd); + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitLazyLoadStatic"); + + /* emit "adrp xd, :got:__arrayClassCacheTable$$xxx+offset" */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + (void)emitter.Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("\t// load array class.\n"); + + /* emit "ldr wd, [xd, #:got_lo12:__arrayClassCacheTable$$xxx+offset]" */ + (void)emitter.Emit("\tldr\t"); + static_cast(opnd0)->SetRefField(true); +#ifdef USE_32BIT_REF + const OpndDesc prop2(prop0->GetOperandType(), prop0->GetRegProp(), prop0->GetSize() / 2); + A64OpndEmitVisitor visitor2(emitter, prop2); + opnd0->Accept(visitor2); /* ldr wd, ... for terminal system */ +#else + opnd0->Accept(visitor); /* ldr xd, ... for qemu */ +#endif /* USE_32BIT_REF */ + static_cast(opnd0)->SetRefField(false); + (void)emitter.Emit(", "); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("]\t// load array class.\n"); + + /* emit "ldr wzr, [xd]" */ + (void)emitter.Emit("\t").Emit("ldr\twzr, ["); + opnd0->Accept(visitor); + (void)emitter.Emit("]\t// check resolve array class.\n"); +} + +/* + * intrinsic_get_add_int w0, xt, wt, ws, x1, x2, w3, label + * add xt, x1, x2 + * label: + * ldaxr w0, [xt] + * add wt, w0, w3 + * stlxr ws, wt, [xt] + * cbnz ws, label + */ +void AArch64AsmEmitter::EmitGetAndAddInt(Emitter &emitter, const Insn &insn) const +{ + DEBUG_ASSERT(insn.GetOperandSize() > kInsnEighthOpnd, "ensure the oprands number"); + (void)emitter.Emit("\t//\tstart of Unsafe.getAndAddInt.\n"); + Operand *tempOpnd0 = &insn.GetOperand(kInsnSecondOpnd); + Operand *tempOpnd1 = &insn.GetOperand(kInsnThirdOpnd); + Operand *tempOpnd2 = &insn.GetOperand(kInsnFourthOpnd); + Operand *objOpnd = &insn.GetOperand(kInsnFifthOpnd); + Operand *offsetOpnd = &insn.GetOperand(kInsnSixthOpnd); + Operand *deltaOpnd = &insn.GetOperand(kInsnSeventhOpnd); + Operand *labelOpnd = &insn.GetOperand(kInsnEighthOpnd); + A64OpndEmitVisitor visitor(emitter, nullptr); + /* emit add. */ + (void)emitter.Emit("\t").Emit("add").Emit("\t"); + tempOpnd0->Accept(visitor); + (void)emitter.Emit(", "); + objOpnd->Accept(visitor); + (void)emitter.Emit(", "); + offsetOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* emit label. */ + labelOpnd->Accept(visitor); + (void)emitter.Emit(":\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + const MOperator mOp = insn.GetMachineOpcode(); + const InsnDesc *md = &AArch64CG::kMd[mOp]; + const OpndDesc *retProp = md->opndMD[kInsnFirstOpnd]; + A64OpndEmitVisitor retVisitor(emitter, retProp); + /* emit ldaxr */ + (void)emitter.Emit("\t").Emit("ldaxr").Emit("\t"); + retVal->Accept(retVisitor); + (void)emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + (void)emitter.Emit("]\n"); + /* emit add. */ + (void)emitter.Emit("\t").Emit("add").Emit("\t"); + tempOpnd1->Accept(retVisitor); + (void)emitter.Emit(", "); + retVal->Accept(retVisitor); + (void)emitter.Emit(", "); + deltaOpnd->Accept(retVisitor); + (void)emitter.Emit("\n"); + /* emit stlxr. */ + (void)emitter.Emit("\t").Emit("stlxr").Emit("\t"); + tempOpnd2->Accept(visitor); + (void)emitter.Emit(", "); + tempOpnd1->Accept(retVisitor); + (void)emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + (void)emitter.Emit("]\n"); + /* emit cbnz. */ + (void)emitter.Emit("\t").Emit("cbnz").Emit("\t"); + tempOpnd2->Accept(visitor); + (void)emitter.Emit(", "); + labelOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t//\tend of Unsafe.getAndAddInt.\n"); +} + +/* + * intrinsic_get_set_int w0, xt, ws, x1, x2, w3, label + * add xt, x1, x2 + * label: + * ldaxr w0, [xt] + * stlxr ws, w3, [xt] + * cbnz ws, label + */ +void AArch64AsmEmitter::EmitGetAndSetInt(Emitter &emitter, const Insn &insn) const +{ + /* MOP_get_and_setI and MOP_get_and_setL have 7 operands */ + DEBUG_ASSERT(insn.GetOperandSize() > kInsnSeventhOpnd, "ensure the operands number"); + Operand *tempOpnd0 = &insn.GetOperand(kInsnSecondOpnd); + Operand *tempOpnd1 = &insn.GetOperand(kInsnThirdOpnd); + Operand *objOpnd = &insn.GetOperand(kInsnFourthOpnd); + Operand *offsetOpnd = &insn.GetOperand(kInsnFifthOpnd); + A64OpndEmitVisitor visitor(emitter, nullptr); + /* add x1, x1, x2 */ + (void)emitter.Emit("\tadd\t"); + tempOpnd0->Accept(visitor); + (void)emitter.Emit(", "); + objOpnd->Accept(visitor); + (void)emitter.Emit(", "); + offsetOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *labelOpnd = &insn.GetOperand(kInsnSeventhOpnd); + /* label: */ + labelOpnd->Accept(visitor); + (void)emitter.Emit(":\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + /* ldaxr w0, [xt] */ + (void)emitter.Emit("\tldaxr\t"); + retVal->Accept(visitor); + (void)emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + (void)emitter.Emit("]\n"); + Operand *newValueOpnd = &insn.GetOperand(kInsnSixthOpnd); + /* stlxr ws, w3, [xt] */ + (void)emitter.Emit("\tstlxr\t"); + tempOpnd1->Accept(visitor); + (void)emitter.Emit(", "); + newValueOpnd->Accept(visitor); + (void)emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + (void)emitter.Emit("]\n"); + /* cbnz w2, label */ + (void)emitter.Emit("\tcbnz\t"); + tempOpnd1->Accept(visitor); + (void)emitter.Emit(", "); + labelOpnd->Accept(visitor); + (void)emitter.Emit("\n"); +} + +/* + * intrinsic_string_indexof w0, x1, w2, x3, w4, x5, x6, x7, x8, x9, w10, + * Label.FIRST_LOOP, Label.STR2_NEXT, Label.STR1_LOOP, + * Label.STR1_NEXT, Label.LAST_WORD, Label.NOMATCH, Label.RET + * cmp w4, w2 + * b.gt .Label.NOMATCH + * sub w2, w2, w4 + * sub w4, w4, #8 + * mov w10, w2 + * uxtw x4, w4 + * uxtw x2, w2 + * add x3, x3, x4 + * add x1, x1, x2 + * neg x4, x4 + * neg x2, x2 + * ldr x5, [x3,x4] + * .Label.FIRST_LOOP: + * ldr x7, [x1,x2] + * cmp x5, x7 + * b.eq .Label.STR1_LOOP + * .Label.STR2_NEXT: + * adds x2, x2, #1 + * b.le .Label.FIRST_LOOP + * b .Label.NOMATCH + * .Label.STR1_LOOP: + * adds x8, x4, #8 + * add x9, x2, #8 + * b.ge .Label.LAST_WORD + * .Label.STR1_NEXT: + * ldr x6, [x3,x8] + * ldr x7, [x1,x9] + * cmp x6, x7 + * b.ne .Label.STR2_NEXT + * adds x8, x8, #8 + * add x9, x9, #8 + * b.lt .Label.STR1_NEXT + * .Label.LAST_WORD: + * ldr x6, [x3] + * sub x9, x1, x4 + * ldr x7, [x9,x2] + * cmp x6, x7 + * b.ne .Label.STR2_NEXT + * add w0, w10, w2 + * b .Label.RET + * .Label.NOMATCH: + * mov w0, #-1 + * .Label.RET: + */ +void AArch64AsmEmitter::EmitStringIndexOf(Emitter &emitter, const Insn &insn) const +{ + /* MOP_string_indexof has 18 operands */ + DEBUG_ASSERT(insn.GetOperandSize() == 18, "ensure the operands number"); + Operand *patternLengthOpnd = &insn.GetOperand(kInsnFifthOpnd); + Operand *srcLengthOpnd = &insn.GetOperand(kInsnThirdOpnd); + const std::string patternLengthReg = + AArch64CG::intRegNames[AArch64CG::kR64List][static_cast(patternLengthOpnd)->GetRegisterNumber()]; + const std::string srcLengthReg = + AArch64CG::intRegNames[AArch64CG::kR64List][static_cast(srcLengthOpnd)->GetRegisterNumber()]; + A64OpndEmitVisitor visitor(emitter, nullptr); + /* cmp w4, w2 */ + (void)emitter.Emit("\tcmp\t"); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 16th operand of MOP_string_indexof is Label.NOMATCH */ + Operand *labelNoMatch = &insn.GetOperand(16); + /* b.gt Label.NOMATCH */ + (void)emitter.Emit("\tb.gt\t"); + labelNoMatch->Accept(visitor); + (void)emitter.Emit("\n"); + /* sub w2, w2, w4 */ + (void)emitter.Emit("\tsub\t"); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit(", "); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* sub w4, w4, #8 */ + (void)emitter.Emit("\tsub\t"); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit(", "); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit(", #8\n"); + /* the 10th operand of MOP_string_indexof is w10 */ + Operand *resultTmp = &insn.GetOperand(10); + /* mov w10, w2 */ + (void)emitter.Emit("\tmov\t"); + resultTmp->Accept(visitor); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* uxtw x4, w4 */ + (void)emitter.Emit("\tuxtw\t").Emit(patternLengthReg); + (void)emitter.Emit(", "); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* uxtw x2, w2 */ + (void)emitter.Emit("\tuxtw\t").Emit(srcLengthReg); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *patternStringBaseOpnd = &insn.GetOperand(kInsnFourthOpnd); + /* add x3, x3, x4 */ + (void)emitter.Emit("\tadd\t"); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", "); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", ").Emit(patternLengthReg); + (void)emitter.Emit("\n"); + Operand *srcStringBaseOpnd = &insn.GetOperand(kInsnSecondOpnd); + /* add x1, x1, x2 */ + (void)emitter.Emit("\tadd\t"); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", "); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit("\n"); + /* neg x4, x4 */ + (void)emitter.Emit("\tneg\t").Emit(patternLengthReg); + (void)emitter.Emit(", ").Emit(patternLengthReg); + (void)emitter.Emit("\n"); + /* neg x2, x2 */ + (void)emitter.Emit("\tneg\t").Emit(srcLengthReg); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit("\n"); + Operand *first = &insn.GetOperand(kInsnSixthOpnd); + /* ldr x5, [x3,x4] */ + (void)emitter.Emit("\tldr\t"); + first->Accept(visitor); + (void)emitter.Emit(", ["); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(",").Emit(patternLengthReg); + (void)emitter.Emit("]\n"); + /* the 11th operand of MOP_string_indexof is Label.FIRST_LOOP */ + Operand *labelFirstLoop = &insn.GetOperand(11); + /* .Label.FIRST_LOOP: */ + labelFirstLoop->Accept(visitor); + (void)emitter.Emit(":\n"); + /* the 7th operand of MOP_string_indexof is x7 */ + Operand *ch2 = &insn.GetOperand(7); + /* ldr x7, [x1,x2] */ + (void)emitter.Emit("\tldr\t"); + ch2->Accept(visitor); + (void)emitter.Emit(", ["); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(",").Emit(srcLengthReg); + (void)emitter.Emit("]\n"); + /* cmp x5, x7 */ + (void)emitter.Emit("\tcmp\t"); + first->Accept(visitor); + (void)emitter.Emit(", "); + ch2->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 13th operand of MOP_string_indexof is Label.STR1_LOOP */ + Operand *labelStr1Loop = &insn.GetOperand(13); + /* b.eq .Label.STR1_LOOP */ + (void)emitter.Emit("\tb.eq\t"); + labelStr1Loop->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 12th operand of MOP_string_indexof is Label.STR2_NEXT */ + Operand *labelStr2Next = &insn.GetOperand(12); + /* .Label.STR2_NEXT: */ + labelStr2Next->Accept(visitor); + (void)emitter.Emit(":\n"); + /* adds x2, x2, #1 */ + (void)emitter.Emit("\tadds\t").Emit(srcLengthReg); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit(", #1\n"); + /* b.le .Label.FIRST_LOOP */ + (void)emitter.Emit("\tb.le\t"); + labelFirstLoop->Accept(visitor); + (void)emitter.Emit("\n"); + /* b .Label.NOMATCH */ + (void)emitter.Emit("\tb\t"); + labelNoMatch->Accept(visitor); + (void)emitter.Emit("\n"); + /* .Label.STR1_LOOP: */ + labelStr1Loop->Accept(visitor); + (void)emitter.Emit(":\n"); + /* the 8th operand of MOP_string_indexof is x8 */ + Operand *tmp1 = &insn.GetOperand(kInsnEighthOpnd); + /* adds x8, x4, #8 */ + (void)emitter.Emit("\tadds\t"); + tmp1->Accept(visitor); + (void)emitter.Emit(", ").Emit(patternLengthReg); + (void)emitter.Emit(", #8\n"); + /* the 9th operand of MOP_string_indexof is x9 */ + Operand *tmp2 = &insn.GetOperand(9); + /* add x9, x2, #8 */ + (void)emitter.Emit("\tadd\t"); + tmp2->Accept(visitor); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit(", #8\n"); + /* the 15th operand of MOP_string_indexof is Label.LAST_WORD */ + Operand *labelLastWord = &insn.GetOperand(15); + /* b.ge .Label.LAST_WORD */ + (void)emitter.Emit("\tb.ge\t"); + labelLastWord->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 14th operand of MOP_string_indexof is Label.STR1_NEXT */ + Operand *labelStr1Next = &insn.GetOperand(14); + /* .Label.STR1_NEXT: */ + labelStr1Next->Accept(visitor); + (void)emitter.Emit(":\n"); + /* the 6th operand of MOP_string_indexof is x6 */ + Operand *ch1 = &insn.GetOperand(6); + /* ldr x6, [x3,x8] */ + (void)emitter.Emit("\tldr\t"); + ch1->Accept(visitor); + (void)emitter.Emit(", ["); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(","); + tmp1->Accept(visitor); + (void)emitter.Emit("]\n"); + /* ldr x7, [x1,x9] */ + (void)emitter.Emit("\tldr\t"); + ch2->Accept(visitor); + (void)emitter.Emit(", ["); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(","); + tmp2->Accept(visitor); + (void)emitter.Emit("]\n"); + /* cmp x6, x7 */ + (void)emitter.Emit("\tcmp\t"); + ch1->Accept(visitor); + (void)emitter.Emit(", "); + ch2->Accept(visitor); + (void)emitter.Emit("\n"); + /* b.ne .Label.STR2_NEXT */ + (void)emitter.Emit("\tb.ne\t"); + labelStr2Next->Accept(visitor); + (void)emitter.Emit("\n"); + /* adds x8, x8, #8 */ + (void)emitter.Emit("\tadds\t"); + tmp1->Accept(visitor); + (void)emitter.Emit(", "); + tmp1->Accept(visitor); + (void)emitter.Emit(", #8\n"); + /* add x9, x9, #8 */ + (void)emitter.Emit("\tadd\t"); + tmp2->Accept(visitor); + (void)emitter.Emit(", "); + tmp2->Accept(visitor); + (void)emitter.Emit(", #8\n"); + /* b.lt .Label.STR1_NEXT */ + (void)emitter.Emit("\tb.lt\t"); + labelStr1Next->Accept(visitor); + (void)emitter.Emit("\n"); + /* .Label.LAST_WORD: */ + labelLastWord->Accept(visitor); + (void)emitter.Emit(":\n"); + /* ldr x6, [x3] */ + (void)emitter.Emit("\tldr\t"); + ch1->Accept(visitor); + (void)emitter.Emit(", ["); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit("]\n"); + /* sub x9, x1, x4 */ + (void)emitter.Emit("\tsub\t"); + tmp2->Accept(visitor); + (void)emitter.Emit(", "); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", ").Emit(patternLengthReg); + (void)emitter.Emit("\n"); + /* ldr x7, [x9,x2] */ + (void)emitter.Emit("\tldr\t"); + ch2->Accept(visitor); + (void)emitter.Emit(", ["); + tmp2->Accept(visitor); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit("]\n"); + /* cmp x6, x7 */ + (void)emitter.Emit("\tcmp\t"); + ch1->Accept(visitor); + (void)emitter.Emit(", "); + ch2->Accept(visitor); + (void)emitter.Emit("\n"); + /* b.ne .Label.STR2_NEXT */ + (void)emitter.Emit("\tb.ne\t"); + labelStr2Next->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + /* add w0, w10, w2 */ + (void)emitter.Emit("\tadd\t"); + retVal->Accept(visitor); + (void)emitter.Emit(", "); + resultTmp->Accept(visitor); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 17th operand of MOP_string_indexof Label.ret */ + Operand *labelRet = &insn.GetOperand(17); + /* b .Label.ret */ + (void)emitter.Emit("\tb\t"); + labelRet->Accept(visitor); + (void)emitter.Emit("\n"); + /* .Label.NOMATCH: */ + labelNoMatch->Accept(visitor); + (void)emitter.Emit(":\n"); + /* mov w0, #-1 */ + (void)emitter.Emit("\tmov\t"); + retVal->Accept(visitor); + (void)emitter.Emit(", #-1\n"); + /* .Label.ret: */ + labelRet->Accept(visitor); + (void)emitter.Emit(":\n"); +} + +/* + * intrinsic_compare_swap_int x0, xt, xs, x1, x2, w3, w4, lable1, label2 + * add xt, x1, x2 + * label1: + * ldaxr ws, [xt] + * cmp ws, w3 + * b.ne label2 + * stlxr ws, w4, [xt] + * cbnz ws, label1 + * label2: + * cset x0, eq + */ +void AArch64AsmEmitter::EmitCompareAndSwapInt(Emitter &emitter, const Insn &insn) const +{ + /* MOP_compare_and_swapI and MOP_compare_and_swapL have 8 operands */ + DEBUG_ASSERT(insn.GetOperandSize() > kInsnEighthOpnd, "ensure the operands number"); + const MOperator mOp = insn.GetMachineOpcode(); + const InsnDesc *md = &AArch64CG::kMd[mOp]; + Operand *temp0 = &insn.GetOperand(kInsnSecondOpnd); + Operand *temp1 = &insn.GetOperand(kInsnThirdOpnd); + Operand *obj = &insn.GetOperand(kInsnFourthOpnd); + Operand *offset = &insn.GetOperand(kInsnFifthOpnd); + A64OpndEmitVisitor visitor(emitter, nullptr); + /* add xt, x1, x2 */ + (void)emitter.Emit("\tadd\t"); + temp0->Accept(visitor); + (void)emitter.Emit(", "); + obj->Accept(visitor); + (void)emitter.Emit(", "); + offset->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *label1 = &insn.GetOperand(kInsnEighthOpnd); + /* label1: */ + label1->Accept(visitor); + (void)emitter.Emit(":\n"); + /* ldaxr ws, [xt] */ + (void)emitter.Emit("\tldaxr\t"); + temp1->Accept(visitor); + (void)emitter.Emit(", ["); + temp0->Accept(visitor); + (void)emitter.Emit("]\n"); + Operand *expectedValue = &insn.GetOperand(kInsnSixthOpnd); + const OpndDesc *expectedValueProp = md->opndMD[kInsnSixthOpnd]; + /* cmp ws, w3 */ + (void)emitter.Emit("\tcmp\t"); + temp1->Accept(visitor); + (void)emitter.Emit(", "); + A64OpndEmitVisitor visitorExpect(emitter, expectedValueProp); + expectedValue->Accept(visitorExpect); + (void)emitter.Emit("\n"); + constexpr uint32 kInsnNinethOpnd = 8; + Operand *label2 = &insn.GetOperand(kInsnNinethOpnd); + /* b.ne label2 */ + (void)emitter.Emit("\tbne\t"); + label2->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *newValue = &insn.GetOperand(kInsnSeventhOpnd); + /* stlxr ws, w4, [xt] */ + (void)emitter.Emit("\tstlxr\t"); + (void)emitter.Emit( + AArch64CG::intRegNames[AArch64CG::kR32List][static_cast(temp1)->GetRegisterNumber()]); + (void)emitter.Emit(", "); + newValue->Accept(visitor); + (void)emitter.Emit(", ["); + temp0->Accept(visitor); + (void)emitter.Emit("]\n"); + /* cbnz ws, label1 */ + (void)emitter.Emit("\tcbnz\t"); + (void)emitter.Emit( + AArch64CG::intRegNames[AArch64CG::kR32List][static_cast(temp1)->GetRegisterNumber()]); + (void)emitter.Emit(", "); + label1->Accept(visitor); + (void)emitter.Emit("\n"); + /* label2: */ + label2->Accept(visitor); + (void)emitter.Emit(":\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + /* cset x0, eq */ + (void)emitter.Emit("\tcset\t"); + retVal->Accept(visitor); + (void)emitter.Emit(", EQ\n"); +} + +void AArch64AsmEmitter::EmitCTlsDescRel(Emitter &emitter, const Insn &insn) const +{ + const InsnDesc *md = &AArch64CG::kMd[MOP_tls_desc_rel]; + Operand *result = &insn.GetOperand(kInsnFirstOpnd); + Operand *src = &insn.GetOperand(kInsnSecondOpnd); + Operand *symbol = &insn.GetOperand(kInsnThirdOpnd); + auto stImmOpnd = static_cast(symbol); + A64OpndEmitVisitor resultVisitor(emitter, md->opndMD[0]); + A64OpndEmitVisitor srcVisitor(emitter, md->opndMD[1]); + (void)emitter.Emit("\t").Emit("add").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", "); + src->Accept(srcVisitor); + (void)emitter.Emit(", #:tprel_hi12:").Emit(stImmOpnd->GetName()).Emit(", lsl #12\n"); + (void)emitter.Emit("\t").Emit("add").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", "); + result->Accept(resultVisitor); + (void)emitter.Emit(", #:tprel_lo12_nc:").Emit(stImmOpnd->GetName()).Emit("\n"); +} + +void AArch64AsmEmitter::EmitCTlsDescCall(Emitter &emitter, const Insn &insn) const +{ + const InsnDesc *md = &AArch64CG::kMd[MOP_tls_desc_call]; + Operand *func = &insn.GetOperand(kInsnFirstOpnd); + Operand *symbol = &insn.GetOperand(kInsnThirdOpnd); + const OpndDesc *prop = md->opndMD[0]; + auto *stImmOpnd = static_cast(symbol); + const std::string &symName = stImmOpnd->GetName(); + A64OpndEmitVisitor funcVisitor(emitter, prop); + /* adrp x0, :tlsdesc:symbol */ + (void)emitter.Emit("\t").Emit("adrp\tx0, :tlsdesc:").Emit(symName).Emit("\n"); + /* ldr x1, [x0, #tlsdesc_lo12:symbol] */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\t"); + func->Accept(funcVisitor); + (void)emitter.Emit(", [x0, #:tlsdesc_lo12:").Emit(symName).Emit("]\n"); + /* add x0 ,#tlsdesc_lo12:symbol */ + (void)emitter.Emit("\t").Emit("add\tx0, x0, :tlsdesc_lo12:").Emit(symName).Emit("\n"); + /* .tlsdesccall */ + (void)emitter.Emit("\t").Emit(".tlsdesccall").Emit("\t").Emit(symName).Emit("\n"); + /* blr xd */ + (void)emitter.Emit("\t").Emit("blr").Emit("\t"); + func->Accept(funcVisitor); + (void)emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitSyncLockTestSet(Emitter &emitter, const Insn &insn) const +{ + const InsnDesc *md = &AArch64CG::kMd[insn.GetMachineOpcode()]; + auto *result = &insn.GetOperand(kInsnFirstOpnd); + auto *temp = &insn.GetOperand(kInsnSecondOpnd); + auto *addr = &insn.GetOperand(kInsnThirdOpnd); + auto *value = &insn.GetOperand(kInsnFourthOpnd); + auto *label = &insn.GetOperand(kInsnFifthOpnd); + A64OpndEmitVisitor resultVisitor(emitter, md->opndMD[kInsnFirstOpnd]); + A64OpndEmitVisitor tempVisitor(emitter, md->opndMD[kInsnSecondOpnd]); + A64OpndEmitVisitor addrVisitor(emitter, md->opndMD[kInsnThirdOpnd]); + A64OpndEmitVisitor valueVisitor(emitter, md->opndMD[kInsnFourthOpnd]); + A64OpndEmitVisitor labelVisitor(emitter, md->opndMD[kInsnFifthOpnd]); + /* label: */ + label->Accept(labelVisitor); + (void)emitter.Emit(":\n"); + /* ldxr x0, [x2] */ + (void)emitter.Emit("\t").Emit("ldxr").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", ["); + addr->Accept(addrVisitor); + (void)emitter.Emit("]\n"); + /* stxr w1, x3, [x2] */ + (void)emitter.Emit("\t").Emit("stxr").Emit("\t"); + temp->Accept(tempVisitor); + (void)emitter.Emit(", "); + value->Accept(valueVisitor); + (void)emitter.Emit(", ["); + addr->Accept(addrVisitor); + (void)emitter.Emit("]\n"); + /* cbnz w1, label */ + (void)emitter.Emit("\t").Emit("cbnz").Emit("\t"); + temp->Accept(tempVisitor); + (void)emitter.Emit(", "); + label->Accept(labelVisitor); + (void)emitter.Emit("\n"); + /* dmb ish */ + (void)emitter.Emit("\t").Emit("dmb").Emit("\t").Emit("ish").Emit("\n"); +} + +void AArch64AsmEmitter::EmitCheckThrowPendingException(Emitter &emitter, Insn &insn) const +{ + /* + * mrs x16, TPIDR_EL0 + * ldr x16, [x16, #64] + * ldr x16, [x16, #8] + * cbz x16, .lnoexception + * bl MCC_ThrowPendingException + * .lnoexception: + */ + (void)emitter.Emit("\t").Emit("mrs").Emit("\tx16, TPIDR_EL0"); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t").Emit("ldr").Emit("\tx16, [x16, #64]"); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t").Emit("ldr").Emit("\tx16, [x16, #8]"); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t").Emit("cbz").Emit("\tx16, .lnoeh.").Emit(maplebe::CG::GetCurCGFunc()->GetName()); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t").Emit("bl").Emit("\tMCC_ThrowPendingException"); + (void)emitter.Emit("\n"); + (void)emitter.Emit(".lnoeh.").Emit(maplebe::CG::GetCurCGFunc()->GetName()).Emit(":"); + (void)emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitLazyBindingRoutine(Emitter &emitter, const Insn &insn) const +{ + /* ldr xzr, [xs] */ + const InsnDesc *md = &AArch64CG::kMd[MOP_adrp_ldr]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + + /* emit "ldr xzr,[xs]" */ +#ifdef USE_32BIT_REF + (void)emitter.Emit("\t").Emit("ldr").Emit("\twzr, ["); +#else + (void)emitter.Emit("\t").Emit("ldr").Emit("\txzr, ["); +#endif /* USE_32BIT_REF */ + opnd0->Accept(visitor); + (void)emitter.Emit("]"); + (void)emitter.Emit("\t// Lazy binding\n"); +} + +void AArch64AsmEmitter::PrepareVectorOperand(RegOperand *regOpnd, uint32 &compositeOpnds, Insn &insn) const +{ + VectorRegSpec *vecSpec = static_cast(insn).GetAndRemoveRegSpecFromList(); + compositeOpnds = vecSpec->compositeOpnds ? vecSpec->compositeOpnds : compositeOpnds; + regOpnd->SetVecLanePosition(vecSpec->vecLane); + switch (insn.GetMachineOpcode()) { + case MOP_vanduuu: + case MOP_vxoruuu: + case MOP_voruuu: + case MOP_vnotuu: + case MOP_vextuuui: { + regOpnd->SetVecLaneSize(k8ByteSize); + regOpnd->SetVecElementSize(k8BitSize); + break; + } + case MOP_vandvvv: + case MOP_vxorvvv: + case MOP_vorvvv: + case MOP_vnotvv: + case MOP_vextvvvi: { + regOpnd->SetVecLaneSize(k16ByteSize); + regOpnd->SetVecElementSize(k8BitSize); + break; + } + default: { + regOpnd->SetVecLaneSize(vecSpec->vecLaneMax); + regOpnd->SetVecElementSize(vecSpec->vecElementSize); + break; + } + } +} + +struct CfiDescr { + const std::string name; + uint32 opndCount; + /* create 3 OperandType array to store cfi instruction's operand type */ + std::array opndTypes; +}; + +static CfiDescr cfiDescrTable[cfi::kOpCfiLast + 1] = { +#define CFI_DEFINE(k, sub, n, o0, o1, o2) {".cfi_" #k, n, {Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2}}, +#define ARM_DIRECTIVES_DEFINE(k, sub, n, o0, o1, o2) \ + {"." #k, n, {Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2}}, +#include "cfi.def" +#undef CFI_DEFINE +#undef ARM_DIRECTIVES_DEFINE + {".cfi_undef", 0, {Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef}}}; + +void AArch64AsmEmitter::EmitAArch64CfiInsn(Emitter &emitter, const Insn &insn) const +{ + MOperator mOp = insn.GetMachineOpcode(); + CfiDescr &cfiDescr = cfiDescrTable[mOp]; + (void)emitter.Emit("\t").Emit(cfiDescr.name); + for (uint32 i = 0; i < cfiDescr.opndCount; ++i) { + (void)emitter.Emit(" "); + Operand &curOperand = insn.GetOperand(i); + cfi::CFIOpndEmitVisitor cfiOpndEmitVisitor(emitter); + curOperand.Accept(cfiOpndEmitVisitor); + if (i < (cfiDescr.opndCount - 1)) { + (void)emitter.Emit(","); + } + } + (void)emitter.Emit("\n"); +} + +struct DbgDescr { + const std::string name; + uint32 opndCount; + /* create 3 OperandType array to store dbg instruction's operand type */ + std::array opndTypes; +}; + +static DbgDescr dbgDescrTable[mpldbg::kOpDbgLast + 1] = { +#define DBG_DEFINE(k, sub, n, o0, o1, o2) {#k, n, {Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2}}, +#include "dbg.def" +#undef DBG_DEFINE + {"undef", 0, {Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef}}}; + +void AArch64AsmEmitter::EmitAArch64DbgInsn(Emitter &emitter, const Insn &insn) const +{ + MOperator mOp = insn.GetMachineOpcode(); + DbgDescr &dbgDescr = dbgDescrTable[mOp]; + (void)emitter.Emit("\t.").Emit(dbgDescr.name); + for (uint32 i = 0; i < dbgDescr.opndCount; ++i) { + (void)emitter.Emit(" "); + Operand &curOperand = insn.GetOperand(i); + mpldbg::DBGOpndEmitVisitor dbgOpndEmitVisitor(emitter); + curOperand.Accept(dbgOpndEmitVisitor); + } + (void)emitter.Emit("\n"); +} + +bool AArch64AsmEmitter::CheckInsnRefField(const Insn &insn, size_t opndIndex) const +{ + if (insn.IsAccessRefField() && insn.AccessMem()) { + Operand &opnd0 = insn.GetOperand(opndIndex); + if (opnd0.IsRegister()) { + static_cast(opnd0).SetRefField(true); + return true; + } + } + return false; +} + +/* new phase manager */ +bool CgEmission::PhaseRun(maplebe::CGFunc &f) +{ + Emitter *emitter = f.GetCG()->GetEmitter(); + CHECK_NULL_FATAL(emitter); + if (CGOptions::GetEmitFileType() == CGOptions::kAsm) { + AsmFuncEmitInfo funcEmitInfo(f); + emitter->EmitLocalVariable(f); + static_cast(emitter)->Run(funcEmitInfo); + emitter->EmitHugeSoRoutines(); + } else { + FuncEmitInfo &funcEmitInfo = static_cast(emitter)->CreateFuncEmitInfo(f); + static_cast(emitter)->Run(funcEmitInfo); + f.SetFuncEmitInfo(&funcEmitInfo); + } + + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgEmission, cgemit) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5cf8028deddfc1863296c2360c1f42b3d4459304 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_fixshortbranch.h" +#include "cg.h" +#include "mpl_logging.h" +#include "common_utils.h" + +namespace maplebe { +uint32 AArch64FixShortBranch::CalculateAlignRange(const BB &bb, uint32 addr) const +{ + if (addr == 0) { + return addr; + } + uint32 alignPower = bb.GetAlignPower(); + /* + * The algorithm can avoid the problem that alignment causes conditional branch out of range in two stages. + * 1. asm: .mpl -> .s + * The pseudo-instruction [.p2align 5] is 12B. + * kAlignPseudoSize = 12 / 4 = 3 + * 2. link: .s -> .o + * The pseudo-instruction will be expanded to nop. + * eg. .p2align 5 + * alignPower = 5, alignValue = 2^5 = 32 + * range = (32 - ((addr - 1) * 4) % 32) / 4 - 1 + * + * =======> max[range, kAlignPseudoSize] + */ + uint32 range = ((1U << alignPower) - (((addr - 1) * kInsnSize) & ((1U << alignPower) - 1))) / kInsnSize - 1; + return range > kAlignPseudoSize ? range : kAlignPseudoSize; +} + +void AArch64FixShortBranch::SetInsnId() const +{ + uint32 i = 0; + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + FOR_ALL_BB(bb, aarch64CGFunc) { + if (aarch64CGFunc->GetMirModule().IsCModule() && bb->IsBBNeedAlign() && bb->GetAlignNopNum() != 0) { + i = i + CalculateAlignRange(*bb, i); + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + i += insn->GetAtomicNum(); + insn->SetId(i); + if (insn->GetMachineOpcode() == MOP_adrp_ldr && CGOptions::IsLazyBinding() && + !cgFunc->GetCG()->IsLibcore()) { + /* For 1 additional EmitLazyBindingRoutine in lazybinding + * see function AArch64Insn::Emit in file aarch64_insn.cpp + */ + ++i; + } + } + } +} + +/* + * TBZ/TBNZ instruction is generated under -O2, these branch instructions only have a range of +/-32KB. + * If the branch target is not reachable, we split tbz/tbnz into combination of ubfx and cbz/cbnz, which + * will clobber one extra register. With LSRA under -O2, we can use one of the reserved registers R16 for + * that purpose. To save compile time, we do this change when there are more than 32KB / 4 instructions + * in the function. + */ +void AArch64FixShortBranch::FixShortBranches() +{ + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + bool change = false; + do { + change = false; + SetInsnId(); + for (auto *bb = aarch64CGFunc->GetFirstBB(); bb != nullptr && !change; bb = bb->GetNext()) { + /* Do a backward scan searching for short branches */ + for (auto *insn = bb->GetLastInsn(); insn != nullptr && !change; insn = insn->GetPrev()) { + if (!insn->IsMachineInstruction()) { + continue; + } + MOperator thisMop = insn->GetMachineOpcode(); + if (thisMop != MOP_wtbz && thisMop != MOP_wtbnz && thisMop != MOP_xtbz && thisMop != MOP_xtbnz) { + continue; + } + LabelOperand &label = static_cast(insn->GetOperand(kInsnThirdOpnd)); + /* should not be commented out after bug fix */ + if (aarch64CGFunc->DistanceCheck(*bb, label.GetLabelIndex(), insn->GetId())) { + continue; + } + auto ® = static_cast(insn->GetOperand(kInsnFirstOpnd)); + ImmOperand &bitSize = aarch64CGFunc->CreateImmOperand(1, k8BitSize, false); + auto &bitPos = static_cast(insn->GetOperand(kInsnSecondOpnd)); + MOperator ubfxOp = MOP_undef; + MOperator cbOp = MOP_undef; + switch (thisMop) { + case MOP_wtbz: + ubfxOp = MOP_wubfxrri5i5; + cbOp = MOP_wcbz; + break; + case MOP_wtbnz: + ubfxOp = MOP_wubfxrri5i5; + cbOp = MOP_wcbnz; + break; + case MOP_xtbz: + ubfxOp = MOP_xubfxrri6i6; + cbOp = MOP_xcbz; + break; + case MOP_xtbnz: + ubfxOp = MOP_xubfxrri6i6; + cbOp = MOP_xcbnz; + break; + default: + break; + } + RegOperand &tmp = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand( + R16, (ubfxOp == MOP_wubfxrri5i5) ? k32BitSize : k64BitSize, kRegTyInt); + (void)bb->InsertInsnAfter(*insn, cgFunc->GetInsnBuilder()->BuildInsn(cbOp, tmp, label)); + (void)bb->InsertInsnAfter(*insn, + cgFunc->GetInsnBuilder()->BuildInsn(ubfxOp, tmp, reg, bitPos, bitSize)); + bb->RemoveInsn(*insn); + change = true; + } + } + } while (change); +} + +bool CgFixShortBranch::PhaseRun(maplebe::CGFunc &f) +{ + auto *fixShortBranch = GetPhaseAllocator()->New(&f); + CHECK_FATAL(fixShortBranch != nullptr, "AArch64FixShortBranch instance create failure"); + fixShortBranch->FixShortBranches(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgFixShortBranch, fixshortbranch) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_global.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_global.cpp new file mode 100644 index 0000000000000000000000000000000000000000..660a5aae9dd665b2c97fa3d4aacf9ab274823889 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_global.cpp @@ -0,0 +1,2335 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_global.h" +#include "aarch64_reaching.h" +#include "aarch64_cg.h" +#include "aarch64_live.h" + +namespace maplebe { +using namespace maple; +#define GLOBAL_DUMP CG_DEBUG_FUNC(cgFunc) + +constexpr uint32 kExMOpTypeSize = 9; +constexpr uint32 kLsMOpTypeSize = 15; + +MOperator exMOpTable[kExMOpTypeSize] = {MOP_undef, MOP_xxwaddrrre, MOP_wwwaddrrre, MOP_xxwsubrrre, MOP_wwwsubrrre, + MOP_xwcmnrre, MOP_wwcmnrre, MOP_xwcmprre, MOP_wwcmprre}; +MOperator lsMOpTable[kLsMOpTypeSize] = {MOP_undef, MOP_xaddrrrs, MOP_waddrrrs, MOP_xsubrrrs, MOP_wsubrrrs, + MOP_xcmnrrs, MOP_wcmnrrs, MOP_xcmprrs, MOP_wcmprrs, MOP_xeorrrrs, + MOP_weorrrrs, MOP_xinegrrs, MOP_winegrrs, MOP_xiorrrrs, MOP_wiorrrrs}; + +/* Optimize ExtendShiftOptPattern: + * ========================================================== + * nosuffix LSL LSR ASR extrn (def) + * nosuffix | F | LSL | LSR | ASR | extrn | + * LSL | F | LSL | F | F | extrn | + * LSR | F | F | LSR | F | F | + * ASR | F | F | F | ASR | F | + * exten | F | F | F | F |exten(self)| + * (use) + * =========================================================== + */ +constexpr uint32 kExtenAddShift = 5; +ExtendShiftOptPattern::SuffixType doOptimize[kExtenAddShift][kExtenAddShift] = { + {ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kLSL, ExtendShiftOptPattern::kLSR, + ExtendShiftOptPattern::kASR, ExtendShiftOptPattern::kExten}, + {ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kLSL, ExtendShiftOptPattern::kNoSuffix, + ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kExten}, + {ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kLSR, + ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kNoSuffix}, + {ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kNoSuffix, + ExtendShiftOptPattern::kASR, ExtendShiftOptPattern::kNoSuffix}, + {ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kNoSuffix, + ExtendShiftOptPattern::kNoSuffix, ExtendShiftOptPattern::kExten}}; + +static bool IsZeroRegister(const Operand &opnd) +{ + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + +void AArch64GlobalOpt::Run() +{ + OptimizeManager optManager(cgFunc); + bool hasSpillBarrier = (cgFunc.NumBBs() > kMaxBBNum) || (cgFunc.GetRD()->GetMaxInsnNO() > kMaxInsnNum); + if (cgFunc.IsAfterRegAlloc()) { + optManager.Optimize(); + optManager.Optimize(); + return; + } + if (!hasSpillBarrier) { + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + } + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); +} + +/* if used Operand in insn is defined by zero in all define insn, return true */ +bool OptimizePattern::OpndDefByZero(Insn &insn, int32 useIdx) const +{ + DEBUG_ASSERT(insn.GetOperand(useIdx).IsRegister(), "the used Operand must be Register"); + /* Zero Register don't need be defined */ + if (IsZeroRegister(insn.GetOperand(static_cast(useIdx)))) { + return true; + } + + InsnSet defInsns = cgFunc.GetRD()->FindDefForRegOpnd(insn, useIdx); + if (defInsns.empty()) { + return false; + } + for (auto &defInsn : defInsns) { + if (!InsnDefZero(*defInsn)) { + return false; + } + } + return true; +} + +/* if used Operand in insn is defined by one in all define insn, return true */ +bool OptimizePattern::OpndDefByOne(Insn &insn, int32 useIdx) const +{ + DEBUG_ASSERT(insn.GetOperand(useIdx).IsRegister(), "the used Operand must be Register"); + /* Zero Register don't need be defined */ + if (IsZeroRegister(insn.GetOperand(static_cast(useIdx)))) { + return false; + } + InsnSet defInsns = cgFunc.GetRD()->FindDefForRegOpnd(insn, useIdx); + if (defInsns.empty()) { + return false; + } + for (auto &defInsn : defInsns) { + if (!InsnDefOne(*defInsn)) { + return false; + } + } + return true; +} + +/* if used Operand in insn is defined by one valid bit in all define insn, return true */ +bool OptimizePattern::OpndDefByOneOrZero(Insn &insn, int32 useIdx) const +{ + if (IsZeroRegister(insn.GetOperand(static_cast(useIdx)))) { + return true; + } + + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, useIdx); + if (defInsnSet.empty()) { + return false; + } + + for (auto &defInsn : defInsnSet) { + if (!InsnDefOneOrZero(*defInsn)) { + return false; + } + } + return true; +} + +/* if defined operand(must be first insn currently) in insn is const one, return true */ +bool OptimizePattern::InsnDefOne(const Insn &insn) +{ + MOperator defMop = insn.GetMachineOpcode(); + switch (defMop) { + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &srcOpnd = insn.GetOperand(1); + DEBUG_ASSERT(srcOpnd.IsIntImmediate(), "expects ImmOperand"); + ImmOperand &srcConst = static_cast(srcOpnd); + int64 srcConstValue = srcConst.GetValue(); + if (srcConstValue == 1) { + return true; + } + return false; + } + default: + return false; + } +} + +/* if defined operand(must be first insn currently) in insn is const zero, return true */ +bool OptimizePattern::InsnDefZero(const Insn &insn) +{ + MOperator defMop = insn.GetMachineOpcode(); + switch (defMop) { + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &srcOpnd = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(srcOpnd.IsIntImmediate(), "expects ImmOperand"); + ImmOperand &srcConst = static_cast(srcOpnd); + int64 srcConstValue = srcConst.GetValue(); + if (srcConstValue == 0) { + return true; + } + return false; + } + case MOP_xmovrr: + case MOP_wmovrr: + return IsZeroRegister(insn.GetOperand(kInsnSecondOpnd)); + default: + return false; + } +} + +/* if defined operand(must be first insn currently) in insn has only one valid bit, return true */ +bool OptimizePattern::InsnDefOneOrZero(const Insn &insn) +{ + MOperator defMop = insn.GetMachineOpcode(); + switch (defMop) { + case MOP_wcsetrc: + case MOP_xcsetrc: + return true; + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &defOpnd = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(defOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &defConst = static_cast(defOpnd); + int64 defConstValue = defConst.GetValue(); + if (defConstValue != 0 && defConstValue != 1) { + return false; + } else { + return true; + } + } + case MOP_xmovrr: + case MOP_wmovrr: { + return IsZeroRegister(insn.GetOperand(kInsnSecondOpnd)); + } + case MOP_wlsrrri5: + case MOP_xlsrrri6: { + Operand &opnd2 = insn.GetOperand(kInsnThirdOpnd); + DEBUG_ASSERT(opnd2.IsIntImmediate(), "expects ImmOperand"); + ImmOperand &opndImm = static_cast(opnd2); + int64 shiftBits = opndImm.GetValue(); + if (((defMop == MOP_wlsrrri5) && (shiftBits == k32BitSize - 1)) || + ((defMop == MOP_xlsrrri6) && (shiftBits == k64BitSize - 1))) { + return true; + } else { + return false; + } + } + default: + return false; + } +} + +void ReplaceAsmListReg(const Insn *insn, uint32 index, uint32 regNO, Operand *newOpnd) +{ + MapleList *list = &static_cast(insn->GetOperand(index)).GetOperands(); + int32 size = static_cast(list->size()); + for (int i = 0; i < size; ++i) { + RegOperand *opnd = static_cast(*(list->begin())); + list->pop_front(); + if (opnd->GetRegisterNumber() == regNO) { + list->push_back(static_cast(newOpnd)); + } else { + list->push_back(opnd); + } + } +} + +void OptimizePattern::ReplaceAllUsedOpndWithNewOpnd(const InsnSet &useInsnSet, uint32 regNO, Operand &newOpnd, + bool updateInfo) const +{ + for (auto useInsn : useInsnSet) { + if (useInsn->GetMachineOpcode() == MOP_asm) { + ReplaceAsmListReg(useInsn, kAsmInputListOpnd, regNO, &newOpnd); + } + const InsnDesc *md = useInsn->GetDesc(); + uint32 opndNum = useInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = useInsn->GetOperand(i); + auto *regProp = md->opndMD[i]; + if (!regProp->IsRegUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == regNO)) { + useInsn->SetOperand(i, newOpnd); + if (updateInfo) { + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + MemOperand *newMem = nullptr; + if (base != nullptr && (base->GetRegisterNumber() == regNO)) { + newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetBaseRegister(*static_cast(&newOpnd)); + useInsn->SetOperand(i, *newMem); + if (updateInfo) { + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } + if (index != nullptr && (index->GetRegisterNumber() == regNO)) { + newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetIndexRegister(*static_cast(&newOpnd)); + if (static_cast(newOpnd).GetValidBitsNum() != index->GetValidBitsNum()) { + newMem->UpdateExtend(MemOperand::kSignExtend); + } + useInsn->SetOperand(i, *newMem); + if (updateInfo) { + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } + } + } + } +} + +bool ForwardPropPattern::CheckCondition(Insn &insn) +{ + if (!insn.IsMachineInstruction()) { + return false; + } + if ((insn.GetMachineOpcode() != MOP_xmovrr) && (insn.GetMachineOpcode() != MOP_wmovrr) && + (insn.GetMachineOpcode() != MOP_xmovrr_uxtw)) { + return false; + } + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &secondOpnd = insn.GetOperand(kInsnSecondOpnd); + if (firstOpnd.GetSize() != secondOpnd.GetSize() && insn.GetMachineOpcode() != MOP_xmovrr_uxtw) { + return false; + } + RegOperand &firstRegOpnd = static_cast(firstOpnd); + RegOperand &secondRegOpnd = static_cast(secondOpnd); + uint32 firstRegNO = firstRegOpnd.GetRegisterNumber(); + uint32 secondRegNO = secondRegOpnd.GetRegisterNumber(); + if (IsZeroRegister(firstRegOpnd) || !firstRegOpnd.IsVirtualRegister() || !secondRegOpnd.IsVirtualRegister()) { + return false; + } + firstRegUseInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(insn, firstRegNO, true); + if (firstRegUseInsnSet.empty()) { + return false; + } + InsnSet secondRegDefInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, secondRegNO, true); + if (secondRegDefInsnSet.size() != 1 || RegOperand::IsSameReg(firstOpnd, secondOpnd)) { + return false; + } + bool toDoOpt = true; + for (auto useInsn : firstRegUseInsnSet) { + if (!cgFunc.GetRD()->RegIsLiveBetweenInsn(secondRegNO, insn, *useInsn)) { + toDoOpt = false; + break; + } + /* part defined */ + if ((useInsn->GetMachineOpcode() == MOP_xmovkri16) || (useInsn->GetMachineOpcode() == MOP_wmovkri16)) { + toDoOpt = false; + break; + } + if (useInsn->GetMachineOpcode() == MOP_asm) { + toDoOpt = false; + break; + } + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, firstRegNO, true); + if (defInsnSet.size() > 1) { + toDoOpt = false; + break; + } else if (defInsnSet.size() == 1 && *defInsnSet.begin() != &insn) { + toDoOpt = false; + break; + } + } + return toDoOpt; +} + +void ForwardPropPattern::Optimize(Insn &insn) +{ + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &secondOpnd = insn.GetOperand(kInsnSecondOpnd); + RegOperand &firstRegOpnd = static_cast(firstOpnd); + uint32 firstRegNO = firstRegOpnd.GetRegisterNumber(); + for (auto *useInsn : firstRegUseInsnSet) { + if (useInsn->GetMachineOpcode() == MOP_asm) { + ReplaceAsmListReg(useInsn, kAsmInputListOpnd, firstRegNO, &secondOpnd); + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + continue; + } + const InsnDesc *md = useInsn->GetDesc(); + uint32 opndNum = useInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = useInsn->GetOperand(i); + const OpndDesc *regProp = md->GetOpndDes(i); + if (!regProp->IsRegUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == firstRegNO)) { + useInsn->SetOperand(i, secondOpnd); + if (((useInsn->GetMachineOpcode() == MOP_xmovrr) || (useInsn->GetMachineOpcode() == MOP_wmovrr)) && + (static_cast(useInsn->GetOperand(kInsnSecondOpnd)).IsVirtualRegister()) && + (static_cast(useInsn->GetOperand(kInsnFirstOpnd)).IsVirtualRegister())) { + (void)modifiedBB.insert(useInsn->GetBB()); + } + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + MemOperand *newMem = nullptr; + if (base != nullptr && (base->GetRegisterNumber() == firstRegNO)) { + newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetBaseRegister(static_cast(secondOpnd)); + useInsn->SetOperand(i, *newMem); + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + if ((index != nullptr) && (index->GetRegisterNumber() == firstRegNO)) { + newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetIndexRegister(static_cast(secondOpnd)); + if (static_cast(secondOpnd).GetValidBitsNum() != index->GetValidBitsNum()) { + newMem->UpdateExtend(MemOperand::kSignExtend); + } + useInsn->SetOperand(i, *newMem); + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } + } + } + insn.SetOperand(0, secondOpnd); + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), true); +} + +void ForwardPropPattern::RemoveMopUxtwToMov(Insn &insn) +{ + if (CGOptions::DoCGSSA()) { + CHECK_FATAL(false, "check case in ssa"); + } + auto &secondOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &destOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 destRegNo = destOpnd.GetRegisterNumber(); + destOpnd.SetRegisterNumber(secondOpnd.GetRegisterNumber()); + auto *newOpnd = static_cast(destOpnd.Clone(*cgFunc.GetMemoryPool())); + cgFunc.InsertExtendSet(secondOpnd.GetRegisterNumber()); + InsnSet regUseInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(insn, destRegNo, true); + if (regUseInsnSet.size() >= 1) { + for (auto useInsn : regUseInsnSet) { + uint32 optSize = useInsn->GetOperandSize(); + for (uint32 i = 0; i < optSize; i++) { + DEBUG_ASSERT(useInsn->GetOperand(i).IsRegister(), "only design for register"); + if (destRegNo == static_cast(useInsn->GetOperand(i)).GetRegisterNumber()) { + useInsn->SetOperand(i, *newOpnd); + } + } + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } + insn.GetBB()->RemoveInsn(insn); +} + +void ForwardPropPattern::Init() +{ + firstRegUseInsnSet.clear(); +} + +void ForwardPropPattern::Run() +{ + bool secondTime = false; + do { + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsUnreachable() || (secondTime && modifiedBB.find(bb) == modifiedBB.end())) { + continue; + } + + if (secondTime) { + modifiedBB.erase(bb); + } + + FOR_BB_INSNS(insn, bb) { + Init(); + if (!CheckCondition(*insn)) { + if (insn->GetMachineOpcode() == MOP_xmovrr_uxtw) { + insn->SetMOP(AArch64CG::kMd[MOP_xuxtw64]); + } + continue; + } + if (insn->GetMachineOpcode() == MOP_xmovrr_uxtw) { + RemoveMopUxtwToMov(*insn); + continue; + } + Optimize(*insn); + } + } + secondTime = true; + } while (!modifiedBB.empty()); +} + +bool BackPropPattern::CheckAndGetOpnd(const Insn &insn) +{ + if (!insn.IsMachineInstruction()) { + return false; + } + if (!cgFunc.IsAfterRegAlloc() && (insn.GetMachineOpcode() != MOP_xmovrr) && + (insn.GetMachineOpcode() != MOP_wmovrr)) { + return false; + } + if (cgFunc.IsAfterRegAlloc() && (insn.GetMachineOpcode() != MOP_xmovrr) && + (insn.GetMachineOpcode() != MOP_wmovrr) && (insn.GetMachineOpcode() != MOP_xvmovs) && + (insn.GetMachineOpcode() != MOP_xvmovd)) { + return false; + } + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &secondOpnd = insn.GetOperand(kInsnSecondOpnd); + if (RegOperand::IsSameReg(firstOpnd, secondOpnd)) { + return false; + } + if (firstOpnd.GetSize() != secondOpnd.GetSize()) { + return false; + } + firstRegOpnd = &static_cast(firstOpnd); + secondRegOpnd = &static_cast(secondOpnd); + if (IsZeroRegister(*firstRegOpnd)) { + return false; + } + if (!cgFunc.IsAfterRegAlloc() && (!secondRegOpnd->IsVirtualRegister() || !firstRegOpnd->IsVirtualRegister())) { + return false; + } + firstRegNO = firstRegOpnd->GetRegisterNumber(); + secondRegNO = secondRegOpnd->GetRegisterNumber(); + return true; +} + +bool BackPropPattern::DestOpndHasUseInsns(Insn &insn) +{ + BB &bb = *insn.GetBB(); + InsnSet useInsnSetOfFirstOpnd; + bool findRes = + cgFunc.GetRD()->FindRegUseBetweenInsn(firstRegNO, insn.GetNext(), bb.GetLastInsn(), useInsnSetOfFirstOpnd); + if ((findRes && useInsnSetOfFirstOpnd.empty()) || + (!findRes && useInsnSetOfFirstOpnd.empty() && !bb.GetLiveOut()->TestBit(firstRegNO))) { + return false; + } + return true; +} + +bool BackPropPattern::DestOpndLiveOutToEHSuccs(Insn &insn) const +{ + BB &bb = *insn.GetBB(); + for (auto ehSucc : bb.GetEhSuccs()) { + if (ehSucc->GetLiveIn()->TestBit(firstRegNO)) { + return true; + } + } + return false; +} + +bool BackPropPattern::CheckSrcOpndDefAndUseInsns(Insn &insn) +{ + BB &bb = *insn.GetBB(); + /* secondOpnd is defined in other BB */ + std::vector defInsnVec = + cgFunc.GetRD()->FindRegDefBetweenInsn(secondRegNO, bb.GetFirstInsn(), insn.GetPrev()); + if (defInsnVec.size() != 1) { + return false; + } + defInsnForSecondOpnd = defInsnVec.back(); + /* part defined */ + if ((defInsnForSecondOpnd->GetMachineOpcode() == MOP_xmovkri16) || + (defInsnForSecondOpnd->GetMachineOpcode() == MOP_wmovkri16) || + (defInsnForSecondOpnd->GetMachineOpcode() == MOP_asm)) { + return false; + } + if (AArch64isa::IsPseudoInstruction(defInsnForSecondOpnd->GetMachineOpcode()) || defInsnForSecondOpnd->IsCall()) { + return false; + } + /* unconcerned regs. */ + if ((secondRegNO >= RLR && secondRegNO <= RZR) || secondRegNO == RFP) { + return false; + } + if (defInsnForSecondOpnd->IsStore() || defInsnForSecondOpnd->IsLoad()) { + auto *memOpnd = static_cast(defInsnForSecondOpnd->GetMemOpnd()); + if (memOpnd != nullptr && !memOpnd->IsIntactIndexed()) { + return false; + } + } + + bool findFinish = cgFunc.GetRD()->FindRegUseBetweenInsn(secondRegNO, defInsnForSecondOpnd->GetNext(), + bb.GetLastInsn(), srcOpndUseInsnSet); + if (!findFinish && bb.GetLiveOut()->TestBit(secondRegNO)) { + return false; + } + if (cgFunc.IsAfterRegAlloc() && findFinish && srcOpndUseInsnSet.size() > 1) { + /* use later before killed. */ + return false; + } + if (cgFunc.IsAfterRegAlloc()) { + for (auto *usePoint : srcOpndUseInsnSet) { + if (usePoint->IsCall()) { + return false; + } + } + } + return true; +} + +bool BackPropPattern::CheckSrcOpndDefAndUseInsnsGlobal(Insn &insn) +{ + /* secondOpnd is defined in other BB */ + InsnSet defInsnVec = cgFunc.GetRD()->FindDefForRegOpnd(insn, secondRegNO, true); + if (defInsnVec.size() != 1) { + return false; + } + defInsnForSecondOpnd = *(defInsnVec.begin()); + + /* ensure that there is no fisrt RegNO def/use between insn and defInsnForSecondOpnd */ + std::vector defInsnVecFirst; + + if (insn.GetBB() != defInsnForSecondOpnd->GetBB()) { + defInsnVecFirst = cgFunc.GetRD()->FindRegDefBetweenInsnGlobal(firstRegNO, defInsnForSecondOpnd, &insn); + } else { + defInsnVecFirst = cgFunc.GetRD()->FindRegDefBetweenInsn(firstRegNO, defInsnForSecondOpnd, insn.GetPrev()); + } + if (!defInsnVecFirst.empty()) { + return false; + } + /* part defined */ + if ((defInsnForSecondOpnd->GetMachineOpcode() == MOP_xmovkri16) || + (defInsnForSecondOpnd->GetMachineOpcode() == MOP_wmovkri16) || + (defInsnForSecondOpnd->GetMachineOpcode() == MOP_asm)) { + return false; + } + + if (defInsnForSecondOpnd->IsStore() || defInsnForSecondOpnd->IsLoad()) { + auto *memOpnd = static_cast(defInsnForSecondOpnd->GetMemOpnd()); + if (memOpnd != nullptr && !memOpnd->IsIntactIndexed()) { + return false; + } + } + + srcOpndUseInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(*defInsnForSecondOpnd, secondRegNO, true); + /* + * useInsn is not expected to have multiple definition + * replaced opnd is not expected to have definition already + */ + return CheckReplacedUseInsn(insn); +} + +bool BackPropPattern::CheckPredefineInsn(Insn &insn) +{ + if (insn.GetPrev() == defInsnForSecondOpnd) { + return true; + } + std::vector preDefInsnForFirstOpndVec; + /* there is no predefine insn in current bb */ + if (!cgFunc.GetRD()->RegIsUsedOrDefBetweenInsn(firstRegNO, *defInsnForSecondOpnd, insn)) { + return false; + } + return true; +} + +bool BackPropPattern::CheckReplacedUseInsn(Insn &insn) +{ + for (auto *useInsn : srcOpndUseInsnSet) { + if (useInsn->GetMemOpnd() != nullptr) { + auto *a64MemOpnd = static_cast(useInsn->GetMemOpnd()); + if (!a64MemOpnd->IsIntactIndexed()) { + if (a64MemOpnd->GetBaseRegister() != nullptr && + a64MemOpnd->GetBaseRegister()->GetRegisterNumber() == secondRegNO) { + return false; + } + } + } + /* insn has been checked def */ + if (useInsn == &insn) { + if (defInsnForSecondOpnd != useInsn->GetPrev() && + cgFunc.GetRD()->FindRegUseBetweenInsnGlobal(firstRegNO, defInsnForSecondOpnd, useInsn, insn.GetBB())) { + return false; + } + continue; + } + auto checkOneDefOnly = [](const InsnSet &defSet, const Insn &oneDef, bool checkHasDef = false) -> bool { + if (defSet.size() > 1) { + return false; + } else if (defSet.size() == 1) { + if (&oneDef != *(defSet.begin())) { + return false; + } + } else { + if (checkHasDef) { + CHECK_FATAL(false, "find def insn failed"); + } + } + return true; + }; + /* ensure that the use insns to be replaced is defined by defInsnForSecondOpnd only */ + if (useInsn->IsMemAccess() && + static_cast(useInsn->GetMemOpnd())->GetIndexOpt() != MemOperand::kIntact) { + return false; + } + InsnSet defInsnVecOfSrcOpnd = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, secondRegNO, true); + if (!checkOneDefOnly(defInsnVecOfSrcOpnd, *defInsnForSecondOpnd, true)) { + return false; + } + + InsnSet defInsnVecOfFirstReg = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, firstRegNO, true); + if (!checkOneDefOnly(defInsnVecOfFirstReg, insn)) { + return false; + } + + if (defInsnForSecondOpnd != useInsn->GetPrev() && + cgFunc.GetRD()->FindRegUseBetweenInsnGlobal(firstRegNO, defInsnForSecondOpnd, useInsn, insn.GetBB())) { + return false; + } + } + return true; +} + +bool BackPropPattern::CheckRedefineInsn(Insn &insn) +{ + for (auto useInsn : srcOpndUseInsnSet) { + Insn *startInsn = &insn; + Insn *endInsn = useInsn; + if (endInsn == startInsn) { + if (cgFunc.GetRD()->RegIsUsedIncaller(firstRegNO, insn, *useInsn)) { + return false; + } else { + continue; + } + } + + if (useInsn->GetBB() == insn.GetBB()) { + if (useInsn->GetId() < insn.GetId()) { + startInsn = useInsn; + endInsn = &insn; + } + } + if (!cgFunc.GetRD()->RegIsLiveBetweenInsn(firstRegNO, *startInsn, *endInsn, true, true)) { + return false; + } + if (!cgFunc.GetRD()->RegIsLiveBetweenInsn(secondRegNO, *startInsn, *endInsn, true)) { + return false; + } + } + return true; +} + +bool BackPropPattern::CheckCondition(Insn &insn) +{ + if (!CheckAndGetOpnd(insn)) { + return false; + } + /* Unless there is a reason that dest can not live out the current BB */ + if (cgFunc.HasAsm() && !DestOpndHasUseInsns(insn)) { + return false; + } + /* first register must not be live out to eh_succs */ + if (DestOpndLiveOutToEHSuccs(insn)) { + return false; + } + if (globalProp) { + if (!CheckSrcOpndDefAndUseInsnsGlobal(insn)) { + return false; + } + } else { + if (!CheckSrcOpndDefAndUseInsns(insn)) { + return false; + } + if (!CheckPredefineInsn(insn)) { + return false; + } + if (!CheckRedefineInsn(insn)) { + return false; + } + } + return true; +} + +void BackPropPattern::Optimize(Insn &insn) +{ + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + ReplaceAllUsedOpndWithNewOpnd(srcOpndUseInsnSet, secondRegNO, firstOpnd, true); + /* replace define insn */ + const InsnDesc *md = defInsnForSecondOpnd->GetDesc(); + uint32 opndNum = defInsnForSecondOpnd->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = defInsnForSecondOpnd->GetOperand(i); + if (!md->opndMD[i]->IsRegDef() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == secondRegNO)) { + /* remove remat info */ + Operand &defOp = defInsnForSecondOpnd->GetOperand(i); + CHECK_FATAL(defOp.IsRegister(), "unexpect def opnd type"); + auto &defRegOp = static_cast(defOp); + MIRPreg *preg = static_cast(cgFunc).GetPseudoRegFromVirtualRegNO( + defRegOp.GetRegisterNumber(), CGOptions::DoCGSSA()); + if (preg != nullptr) { + preg->SetOp(OP_undef); + } + defInsnForSecondOpnd->SetOperand(i, firstOpnd); + cgFunc.GetRD()->UpdateInOut(*defInsnForSecondOpnd->GetBB()); + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if (base != nullptr && memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed()) && base->GetRegisterNumber() == secondRegNO) { + MemOperand *newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetBaseRegister(static_cast(firstOpnd)); + defInsnForSecondOpnd->SetOperand(i, *newMem); + cgFunc.GetRD()->UpdateInOut(*defInsnForSecondOpnd->GetBB()); + } + } + } + /* There is special implication when backward propagation is allowed for physical register R0. + * This is a case that the calling func foo directly returns the result from the callee bar as follows: + * + * foo: + * bl bl // bar() + * mov vreg, X0 //res = bar() naive bkprop + * .... //X0 is not redefined ====> .... //X0 may be reused as RA sees "X0 has not been used" + * after bl mov X0, vreg //In fact, X0 is implicitly used by foo. We + * need to tell RA that X0 is live ret ret + * + * To make RA simple, we tell RA to not use X0 by keeping "mov X0, X0". That is + * foo: + * bl //bar() + * .... // Perform backward prop X0 and force X0 cant be reused + * mov X0, X0 // This can be easily remved later in peephole phase + * ret + */ + if (cgFunc.HasCall() && !(cgFunc.GetFunction().IsReturnVoid()) && (firstRegNO == R0) && + (static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == R0)) { + /* Keep this instruction: mov R0, R0 */ + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), true); + return; + } else { + insn.GetBB()->RemoveInsn(insn); + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), true); + } +} + +void BackPropPattern::Init() +{ + firstRegOpnd = nullptr; + secondRegOpnd = nullptr; + firstRegNO = 0; + secondRegNO = 0; + srcOpndUseInsnSet.clear(); + defInsnForSecondOpnd = nullptr; +} + +void BackPropPattern::Run() +{ + bool secondTime = false; + std::set modifiedBB; + do { + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsUnreachable() || (secondTime && modifiedBB.find(bb) == modifiedBB.end())) { + continue; + } + + if (secondTime) { + modifiedBB.erase(bb); + } + + FOR_BB_INSNS_REV(insn, bb) { + Init(); + if (!CheckCondition(*insn)) { + continue; + } + (void)modifiedBB.insert(bb); + Optimize(*insn); + } + } + secondTime = true; + } while (!modifiedBB.empty()); +} + +bool CmpCsetPattern::CheckCondition(Insn &insn) +{ + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr || !insn.IsMachineInstruction()) { + return false; + } + + MOperator firstMop = insn.GetMachineOpcode(); + MOperator secondMop = nextInsn->GetMachineOpcode(); + if (!(((firstMop == MOP_wcmpri) || (firstMop == MOP_xcmpri)) && + ((secondMop == MOP_wcsetrc) || (secondMop == MOP_xcsetrc)))) { + return false; + } + + /* get cmp_first operand */ + cmpFirstOpnd = &(insn.GetOperand(kInsnSecondOpnd)); + /* get cmp second Operand, ImmOperand must be 0 or 1 */ + cmpSecondOpnd = &(insn.GetOperand(kInsnThirdOpnd)); + DEBUG_ASSERT(cmpSecondOpnd->IsIntImmediate(), "expects ImmOperand"); + ImmOperand *cmpConstOpnd = static_cast(cmpSecondOpnd); + cmpConstVal = cmpConstOpnd->GetValue(); + /* get cset first Operand */ + csetFirstOpnd = &(nextInsn->GetOperand(kInsnFirstOpnd)); + if (((cmpConstVal != 0) && (cmpConstVal != 1)) || (cmpFirstOpnd->GetSize() != csetFirstOpnd->GetSize()) || + !OpndDefByOneOrZero(insn, 1)) { + return false; + } + + InsnSet useInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(insn, 0, false); + if (useInsnSet.size() > 1) { + return false; + } + return true; +} + +void CmpCsetPattern::Optimize(Insn &insn) +{ + Insn *csetInsn = nextInsn; + BB &bb = *insn.GetBB(); + nextInsn = nextInsn->GetNextMachineInsn(); + /* get condition Operand */ + CondOperand &cond = static_cast(csetInsn->GetOperand(kInsnSecondOpnd)); + if (((cmpConstVal == 0) && (cond.GetCode() == CC_NE)) || ((cmpConstVal == 1) && (cond.GetCode() == CC_EQ))) { + if (RegOperand::IsSameReg(*cmpFirstOpnd, *csetFirstOpnd)) { + bb.RemoveInsn(insn); + bb.RemoveInsn(*csetInsn); + } else { + MOperator mopCode = (cmpFirstOpnd->GetSize() == k64BitSize) ? MOP_xmovrr : MOP_wmovrr; + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mopCode, *csetFirstOpnd, *cmpFirstOpnd); + newInsn.SetId(insn.GetId()); + bb.ReplaceInsn(insn, newInsn); + bb.RemoveInsn(*csetInsn); + } + } else if (((cmpConstVal == 1) && (cond.GetCode() == CC_NE)) || ((cmpConstVal == 0) && (cond.GetCode() == CC_EQ))) { + MOperator mopCode = (cmpFirstOpnd->GetSize() == k64BitSize) ? MOP_xeorrri13 : MOP_weorrri12; + constexpr int64 eorImm = 1; + auto &aarch64CGFunc = static_cast(cgFunc); + ImmOperand &one = aarch64CGFunc.CreateImmOperand(eorImm, k8BitSize, false); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mopCode, *csetFirstOpnd, *cmpFirstOpnd, one); + newInsn.SetId(insn.GetId()); + bb.ReplaceInsn(insn, newInsn); + bb.RemoveInsn(*csetInsn); + } + cgFunc.GetRD()->UpdateInOut(bb, true); +} + +void CmpCsetPattern::Init() +{ + cmpConstVal = 0; + cmpFirstOpnd = nullptr; + cmpSecondOpnd = nullptr; + csetFirstOpnd = nullptr; +} + +void CmpCsetPattern::Run() +{ + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool CselPattern::CheckCondition(Insn &insn) +{ + MOperator mopCode = insn.GetMachineOpcode(); + if ((mopCode != MOP_xcselrrrc) && (mopCode != MOP_wcselrrrc)) { + return false; + } + return true; +} + +void CselPattern::Optimize(Insn &insn) +{ + BB &bb = *insn.GetBB(); + Operand &opnd0 = insn.GetOperand(kInsnFirstOpnd); + Operand &cond = insn.GetOperand(kInsnFourthOpnd); + MOperator newMop = ((opnd0.GetSize()) == k64BitSize ? MOP_xcsetrc : MOP_wcsetrc); + Operand &rflag = cgFunc.GetOrCreateRflag(); + if (OpndDefByOne(insn, kInsnSecondOpnd) && OpndDefByZero(insn, kInsnThirdOpnd)) { + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newMop, opnd0, cond, rflag); + newInsn.SetId(insn.GetId()); + bb.ReplaceInsn(insn, newInsn); + cgFunc.GetRD()->InitGenUse(bb, false); + } else if (OpndDefByZero(insn, kInsnSecondOpnd) && OpndDefByOne(insn, kInsnThirdOpnd)) { + auto &originCond = static_cast(cond); + ConditionCode inverseCondCode = GetReverseBasicCC(originCond.GetCode()); + if (inverseCondCode == kCcLast) { + return; + } + auto &aarchCGFunc = static_cast(cgFunc); + CondOperand &inverseCond = aarchCGFunc.GetCondOperand(inverseCondCode); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newMop, opnd0, inverseCond, rflag); + newInsn.SetId(insn.GetId()); + bb.ReplaceInsn(insn, newInsn); + cgFunc.GetRD()->InitGenUse(bb, false); + } +} + +void CselPattern::Run() +{ + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +uint32 RedundantUxtPattern::GetInsnValidBit(const Insn &insn) +{ + MOperator mOp = insn.GetMachineOpcode(); + uint32 nRet; + switch (mOp) { + case MOP_wcsetrc: + case MOP_xcsetrc: + nRet = 1; + break; + case MOP_wldrb: + case MOP_wldarb: + case MOP_wldxrb: + case MOP_wldaxrb: + nRet = k8BitSize; + break; + case MOP_wldrh: + case MOP_wldarh: + case MOP_wldxrh: + case MOP_wldaxrh: + nRet = k16BitSize; + break; + case MOP_wmovrr: + case MOP_wmovri32: + case MOP_wldrsb: + case MOP_wldrsh: + case MOP_wldli: + case MOP_wldr: + case MOP_wldp: + case MOP_wldar: + case MOP_wmovkri16: + case MOP_wmovzri16: + case MOP_wmovnri16: + case MOP_wldxr: + case MOP_wldaxr: + case MOP_wldaxp: + case MOP_wcsincrrrc: + case MOP_wcselrrrc: + case MOP_wcsinvrrrc: + nRet = k32BitSize; + break; + default: + nRet = k64BitSize; + break; + } + return nRet; +} + +uint32 RedundantUxtPattern::GetMaximumValidBit(Insn &insn, uint8 index, InsnSet &visitedInsn) const +{ + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, index); + if (defInsnSet.empty()) { + /* disable opt when there is no def point. */ + return k64BitSize; + } + + uint32 validBit = 0; + uint32 nMaxValidBit = 0; + for (auto &defInsn : defInsnSet) { + if (visitedInsn.find(defInsn) != visitedInsn.end()) { + continue; + } + + (void)visitedInsn.insert(defInsn); + MOperator mOp = defInsn->GetMachineOpcode(); + if ((mOp == MOP_wmovrr) || (mOp == MOP_xmovrr)) { + validBit = GetMaximumValidBit(*defInsn, 1, visitedInsn); + } else { + validBit = GetInsnValidBit(*defInsn); + } + + nMaxValidBit = nMaxValidBit < validBit ? validBit : nMaxValidBit; + } + return nMaxValidBit; +} + +bool RedundantUxtPattern::CheckCondition(Insn &insn) +{ + BB &bb = *insn.GetBB(); + InsnSet visitedInsn1; + InsnSet visitedInsn2; + if (!((insn.GetMachineOpcode() == MOP_xuxth32 && + GetMaximumValidBit(insn, kInsnSecondOpnd, visitedInsn1) <= k16BitSize) || + (insn.GetMachineOpcode() == MOP_xuxtb32 && + GetMaximumValidBit(insn, kInsnSecondOpnd, visitedInsn2) <= k8BitSize))) { + return false; + } + + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + secondOpnd = &(insn.GetOperand(kInsnSecondOpnd)); + if (RegOperand::IsSameReg(firstOpnd, *secondOpnd)) { + bb.RemoveInsn(insn); + /* update in/out */ + cgFunc.GetRD()->UpdateInOut(bb, true); + return false; + } + useInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(insn, 0, false); + RegOperand &firstRegOpnd = static_cast(firstOpnd); + firstRegNO = firstRegOpnd.GetRegisterNumber(); + /* for uxth R1, V501, R1 is parameter register, this can't be optimized. */ + if (firstRegOpnd.IsPhysicalRegister()) { + return false; + } + + if (useInsnSet.empty()) { + bb.RemoveInsn(insn); + /* update in/out */ + cgFunc.GetRD()->UpdateInOut(bb, true); + return false; + } + + RegOperand *secondRegOpnd = static_cast(secondOpnd); + DEBUG_ASSERT(secondRegOpnd != nullptr, "secondRegOpnd should not be nullptr"); + uint32 secondRegNO = secondRegOpnd->GetRegisterNumber(); + for (auto useInsn : useInsnSet) { + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, firstRegNO, true); + if ((defInsnSet.size() > 1) || !(cgFunc.GetRD()->RegIsLiveBetweenInsn(secondRegNO, insn, *useInsn))) { + return false; + } + } + return true; +} + +void RedundantUxtPattern::Optimize(Insn &insn) +{ + BB &bb = *insn.GetBB(); + ReplaceAllUsedOpndWithNewOpnd(useInsnSet, firstRegNO, *secondOpnd, true); + bb.RemoveInsn(insn); + cgFunc.GetRD()->UpdateInOut(bb, true); +} + +void RedundantUxtPattern::Init() +{ + useInsnSet.clear(); + secondOpnd = nullptr; +} + +void RedundantUxtPattern::Run() +{ + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsUnreachable()) { + continue; + } + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool LocalVarSaveInsnPattern::CheckFirstInsn(const Insn &firstInsn) +{ + MOperator mOp = firstInsn.GetMachineOpcode(); + if (mOp != MOP_xmovrr && mOp != MOP_wmovrr) { + return false; + } + firstInsnSrcOpnd = &(firstInsn.GetOperand(kInsnSecondOpnd)); + RegOperand *firstInsnSrcReg = static_cast(firstInsnSrcOpnd); + if (firstInsnSrcReg->GetRegisterNumber() != R0) { + return false; + } + firstInsnDestOpnd = &(firstInsn.GetOperand(kInsnFirstOpnd)); + RegOperand *firstInsnDestReg = static_cast(firstInsnDestOpnd); + if (firstInsnDestReg->IsPhysicalRegister()) { + return false; + } + return true; +} + +bool LocalVarSaveInsnPattern::CheckSecondInsn() +{ + MOperator mOp = secondInsn->GetMachineOpcode(); + if (mOp != MOP_wstr && mOp != MOP_xstr) { + return false; + } + secondInsnSrcOpnd = &(secondInsn->GetOperand(kInsnFirstOpnd)); + if (!RegOperand::IsSameReg(*firstInsnDestOpnd, *secondInsnSrcOpnd)) { + return false; + } + /* check memOperand is stack memOperand, and x0 is stored in localref var region */ + secondInsnDestOpnd = &(secondInsn->GetOperand(kInsnSecondOpnd)); + MemOperand *secondInsnDestMem = static_cast(secondInsnDestOpnd); + RegOperand *baseReg = secondInsnDestMem->GetBaseRegister(); + RegOperand *indexReg = secondInsnDestMem->GetIndexRegister(); + if ((baseReg == nullptr) || !(cgFunc.IsFrameReg(*baseReg)) || (indexReg != nullptr)) { + return false; + } + return true; +} + +bool LocalVarSaveInsnPattern::CheckAndGetUseInsn(Insn &firstInsn) +{ + InsnSet useInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(firstInsn, kInsnFirstOpnd, false); + if (useInsnSet.size() != 2) { /* secondInsn and another useInsn */ + return false; + } + + /* useInsnSet includes secondInsn and another useInsn */ + for (auto tmpUseInsn : useInsnSet) { + if (tmpUseInsn->GetId() != secondInsn->GetId()) { + useInsn = tmpUseInsn; + break; + } + } + return true; +} + +bool LocalVarSaveInsnPattern::CheckLiveRange(const Insn &firstInsn) +{ + uint32 maxInsnNO = cgFunc.GetRD()->GetMaxInsnNO(); + uint32 useInsnID = useInsn->GetId(); + uint32 defInsnID = firstInsn.GetId(); + uint32 distance = useInsnID > defInsnID ? useInsnID - defInsnID : defInsnID - useInsnID; + float liveRangeProportion = static_cast(distance) / maxInsnNO; + /* 0.3 is a balance for real optimization effect */ + if (liveRangeProportion < 0.3) { + return false; + } + return true; +} + +bool LocalVarSaveInsnPattern::CheckCondition(Insn &firstInsn) +{ + secondInsn = firstInsn.GetNext(); + if (secondInsn == nullptr) { + return false; + } + /* check firstInsn is : mov vreg, R0; */ + if (!CheckFirstInsn(firstInsn)) { + return false; + } + /* check the secondInsn is : str vreg, stackMem */ + if (!CheckSecondInsn()) { + return false; + } + /* find the uses of the vreg */ + if (!CheckAndGetUseInsn(firstInsn)) { + return false; + } + /* simulate live range using insn distance */ + if (!CheckLiveRange(firstInsn)) { + return false; + } + RegOperand *firstInsnDestReg = static_cast(firstInsnDestOpnd); + regno_t firstInsnDestRegNO = firstInsnDestReg->GetRegisterNumber(); + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, firstInsnDestRegNO, true); + if (defInsnSet.size() != 1) { + return false; + } + DEBUG_ASSERT((*(defInsnSet.begin()))->GetId() == firstInsn.GetId(), "useInsn has only one define Insn : firstInsn"); + /* check whether the stack mem is changed or not */ + MemOperand *secondInsnDestMem = static_cast(secondInsnDestOpnd); + int64 memOffset = secondInsnDestMem->GetOffsetImmediate()->GetOffsetValue(); + InsnSet memDefInsnSet = cgFunc.GetRD()->FindDefForMemOpnd(*useInsn, memOffset, true); + if (memDefInsnSet.size() != 1) { + return false; + } + if ((*(memDefInsnSet.begin()))->GetId() != secondInsn->GetId()) { + return false; + } + /* check whether has call between use and def */ + if (!cgFunc.GetRD()->HasCallBetweenDefUse(firstInsn, *useInsn)) { + return false; + } + return true; +} + +void LocalVarSaveInsnPattern::Optimize(Insn &insn) +{ + /* insert ldr insn before useInsn */ + MOperator ldrOpCode = secondInsnSrcOpnd->GetSize() == k64BitSize ? MOP_xldr : MOP_wldr; + Insn &ldrInsn = cgFunc.GetInsnBuilder()->BuildInsn(ldrOpCode, *secondInsnSrcOpnd, *secondInsnDestOpnd); + ldrInsn.SetId(useInsn->GetId() - 1); + useInsn->GetBB()->InsertInsnBefore(*useInsn, ldrInsn); + cgFunc.GetRD()->UpdateInOut(*useInsn->GetBB(), true); + secondInsn->SetOperand(kInsnFirstOpnd, *firstInsnSrcOpnd); + BB *saveInsnBB = insn.GetBB(); + saveInsnBB->RemoveInsn(insn); + cgFunc.GetRD()->UpdateInOut(*saveInsnBB, true); +} + +void LocalVarSaveInsnPattern::Init() +{ + firstInsnSrcOpnd = nullptr; + firstInsnDestOpnd = nullptr; + secondInsnSrcOpnd = nullptr; + secondInsnDestOpnd = nullptr; + useInsn = nullptr; + secondInsn = nullptr; +} + +void LocalVarSaveInsnPattern::Run() +{ + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsCleanup()) { + continue; + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (!insn->IsCall()) { + continue; + } + Insn *firstInsn = insn->GetNextMachineInsn(); + if (firstInsn == nullptr) { + continue; + } + Init(); + if (!CheckCondition(*firstInsn)) { + continue; + } + Optimize(*firstInsn); + } + } +} + +void ExtendShiftOptPattern::SetExMOpType(const Insn &use) +{ + MOperator op = use.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_xxwaddrrre: + case MOP_xaddrrrs: { + exMOpType = kExAdd; + break; + } + case MOP_waddrrr: + case MOP_wwwaddrrre: + case MOP_waddrrrs: { + exMOpType = kEwAdd; + break; + } + case MOP_xsubrrr: + case MOP_xxwsubrrre: + case MOP_xsubrrrs: { + exMOpType = kExSub; + break; + } + case MOP_wsubrrr: + case MOP_wwwsubrrre: + case MOP_wsubrrrs: { + exMOpType = kEwSub; + break; + } + case MOP_xcmnrr: + case MOP_xwcmnrre: + case MOP_xcmnrrs: { + exMOpType = kExCmn; + break; + } + case MOP_wcmnrr: + case MOP_wwcmnrre: + case MOP_wcmnrrs: { + exMOpType = kEwCmn; + break; + } + case MOP_xcmprr: + case MOP_xwcmprre: + case MOP_xcmprrs: { + exMOpType = kExCmp; + break; + } + case MOP_wcmprr: + case MOP_wwcmprre: + case MOP_wcmprrs: { + exMOpType = kEwCmp; + break; + } + default: { + exMOpType = kExUndef; + } + } +} + +void ExtendShiftOptPattern::SetLsMOpType(const Insn &use) +{ + MOperator op = use.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_xaddrrrs: { + lsMOpType = kLxAdd; + break; + } + case MOP_waddrrr: + case MOP_waddrrrs: { + lsMOpType = kLwAdd; + break; + } + case MOP_xsubrrr: + case MOP_xsubrrrs: { + lsMOpType = kLxSub; + break; + } + case MOP_wsubrrr: + case MOP_wsubrrrs: { + lsMOpType = kLwSub; + break; + } + case MOP_xcmnrr: + case MOP_xcmnrrs: { + lsMOpType = kLxCmn; + break; + } + case MOP_wcmnrr: + case MOP_wcmnrrs: { + lsMOpType = kLwCmn; + break; + } + case MOP_xcmprr: + case MOP_xcmprrs: { + lsMOpType = kLxCmp; + break; + } + case MOP_wcmprr: + case MOP_wcmprrs: { + lsMOpType = kLwCmp; + break; + } + case MOP_xeorrrr: + case MOP_xeorrrrs: { + lsMOpType = kLxEor; + break; + } + case MOP_weorrrr: + case MOP_weorrrrs: { + lsMOpType = kLwEor; + break; + } + case MOP_xinegrr: + case MOP_xinegrrs: { + lsMOpType = kLxNeg; + replaceIdx = kInsnSecondOpnd; + break; + } + case MOP_winegrr: + case MOP_winegrrs: { + lsMOpType = kLwNeg; + replaceIdx = kInsnSecondOpnd; + break; + } + case MOP_xiorrrr: + case MOP_xiorrrrs: { + lsMOpType = kLxIor; + break; + } + case MOP_wiorrrr: + case MOP_wiorrrrs: { + lsMOpType = kLwIor; + break; + } + default: { + lsMOpType = kLsUndef; + } + } +} + +void ExtendShiftOptPattern::SelectExtendOrShift(const Insn &def) +{ + MOperator op = def.GetMachineOpcode(); + switch (op) { + case MOP_xsxtb32: + case MOP_xsxtb64: + extendOp = ExtendShiftOperand::kSXTB; + break; + case MOP_xsxth32: + case MOP_xsxth64: + extendOp = ExtendShiftOperand::kSXTH; + break; + case MOP_xsxtw64: + extendOp = ExtendShiftOperand::kSXTW; + break; + case MOP_xuxtb32: + extendOp = ExtendShiftOperand::kUXTB; + break; + case MOP_xuxth32: + extendOp = ExtendShiftOperand::kUXTH; + break; + case MOP_xuxtw64: + extendOp = ExtendShiftOperand::kUXTW; + break; + case MOP_wlslrri5: + case MOP_xlslrri6: + shiftOp = BitShiftOperand::kLSL; + break; + case MOP_xlsrrri6: + case MOP_wlsrrri5: + shiftOp = BitShiftOperand::kLSR; + break; + case MOP_xasrrri6: + case MOP_wasrrri5: + shiftOp = BitShiftOperand::kASR; + break; + default: { + extendOp = ExtendShiftOperand::kUndef; + shiftOp = BitShiftOperand::kUndef; + } + } +} + +/* first use must match SelectExtendOrShift */ +bool ExtendShiftOptPattern::CheckDefUseInfo(Insn &use, uint32 size) +{ + auto ®Operand = static_cast(defInsn->GetOperand(kInsnFirstOpnd)); + Operand &defSrcOpnd = defInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(defSrcOpnd.IsRegister(), "defSrcOpnd must be register!"); + auto ®DefSrc = static_cast(defSrcOpnd); + if (regDefSrc.IsPhysicalRegister()) { + return false; + } + /* + * has Implict cvt + * + * avoid cases as following: + * lsr x2, x2, #8 + * ubfx w2, x2, #0, #32 lsr x2, x2, #8 + * eor w0, w0, w2 ===> eor w0, w0, x2 ==\=> eor w0, w0, w2, LSR #8 + * + * the truncation causes the wrong value by shift right + * shift left does not matter + */ + auto &useDefOpnd = static_cast(use.GetOperand(kInsnFirstOpnd)); + if ((shiftOp != BitShiftOperand::kUndef || extendOp != ExtendShiftOperand::kUndef) && + (regDefSrc.GetSize() > regOperand.GetSize() || useDefOpnd.GetSize() != size)) { + return false; + } + if ((shiftOp == BitShiftOperand::kLSR || shiftOp == BitShiftOperand::kASR) && (defSrcOpnd.GetSize() > size)) { + return false; + } + regno_t defSrcRegNo = regDefSrc.GetRegisterNumber(); + /* check regDefSrc */ + InsnSet defSrcSet = cgFunc.GetRD()->FindDefForRegOpnd(use, defSrcRegNo, true); + /* The first defSrcInsn must be closest to useInsn */ + if (defSrcSet.empty()) { + return false; + } + Insn *defSrcInsn = *defSrcSet.begin(); + const InsnDesc *md = defSrcInsn->GetDesc(); + if ((size != regOperand.GetSize()) && md->IsMove()) { + return false; + } + if (defInsn->GetBB() == use.GetBB()) { + /* check replace reg def between defInsn and currInsn */ + Insn *tmpInsn = defInsn->GetNext(); + while (tmpInsn != &use) { + if (tmpInsn == defSrcInsn || tmpInsn == nullptr) { + return false; + } + tmpInsn = tmpInsn->GetNext(); + } + } else { /* def use not in same BB */ + if (defSrcInsn->GetBB() != defInsn->GetBB()) { + return false; + } + if (defSrcInsn->GetId() > defInsn->GetId()) { + return false; + } + } + /* case: + * lsl w0, w0, #5 + * eor w0, w2, w0 + * ---> + * eor w0, w2, w0, lsl 5 + */ + if (defSrcInsn == defInsn) { + InsnSet replaceRegUseSet = cgFunc.GetRD()->FindUseForRegOpnd(*defInsn, defSrcRegNo, true); + if (replaceRegUseSet.size() != k1BitSize) { + return false; + } + removeDefInsn = true; + } + return true; +} + +/* Check whether ExtendShiftOptPattern optimization can be performed. */ +ExtendShiftOptPattern::SuffixType ExtendShiftOptPattern::CheckOpType(const Operand &lastOpnd) const +{ + /* Assign values to useType and defType. */ + uint32 useType = ExtendShiftOptPattern::kNoSuffix; + uint32 defType = shiftOp; + if (extendOp != ExtendShiftOperand::kUndef) { + defType = ExtendShiftOptPattern::kExten; + } + if (lastOpnd.IsOpdShift()) { + BitShiftOperand lastShiftOpnd = static_cast(lastOpnd); + useType = lastShiftOpnd.GetShiftOp(); + } else if (lastOpnd.IsOpdExtend()) { + ExtendShiftOperand lastExtendOpnd = static_cast(lastOpnd); + useType = ExtendShiftOptPattern::kExten; + /* two insn is exten and exten ,value is exten(oneself) */ + if (useType == defType && extendOp != lastExtendOpnd.GetExtendOp()) { + return ExtendShiftOptPattern::kNoSuffix; + } + } + return doOptimize[useType][defType]; +} + +/* new Insn extenType: + * ===================== + * (useMop) (defMop) (newmop) + * | nosuffix | all | all| + * | exten | ex | ex | + * | ls | ex | ls | + * | asr | !asr | F | + * | !asr | asr | F | + * (useMop) (defMop) + * ===================== + */ +void ExtendShiftOptPattern::ReplaceUseInsn(Insn &use, const Insn &def, uint32 amount) +{ + AArch64CGFunc &a64CGFunc = static_cast(cgFunc); + uint32 lastIdx = use.GetOperandSize() - k1BitSize; + Operand &lastOpnd = use.GetOperand(lastIdx); + ExtendShiftOptPattern::SuffixType optType = CheckOpType(lastOpnd); + Operand *shiftOpnd = nullptr; + if (optType == ExtendShiftOptPattern::kNoSuffix) { + return; + } else if (optType == ExtendShiftOptPattern::kExten) { + replaceOp = exMOpTable[exMOpType]; + if (amount > k4BitSize) { + return; + } + shiftOpnd = &a64CGFunc.CreateExtendShiftOperand(extendOp, amount, static_cast(k64BitSize)); + } else { + replaceOp = lsMOpTable[lsMOpType]; + if (amount >= k32BitSize) { + return; + } + shiftOpnd = &a64CGFunc.CreateBitShiftOperand(shiftOp, amount, static_cast(k64BitSize)); + } + if (replaceOp == MOP_undef) { + return; + } + + Insn *replaceUseInsn = nullptr; + Operand &firstOpnd = use.GetOperand(kInsnFirstOpnd); + Operand *secondOpnd = &use.GetOperand(kInsnSecondOpnd); + if (replaceIdx == kInsnSecondOpnd) { /* replace neg insn */ + secondOpnd = &def.GetOperand(kInsnSecondOpnd); + replaceUseInsn = &cgFunc.GetInsnBuilder()->BuildInsn(replaceOp, firstOpnd, *secondOpnd, *shiftOpnd); + } else { + Operand &thirdOpnd = def.GetOperand(kInsnSecondOpnd); + replaceUseInsn = &cgFunc.GetInsnBuilder()->BuildInsn(replaceOp, firstOpnd, *secondOpnd, thirdOpnd, *shiftOpnd); + } + use.GetBB()->ReplaceInsn(use, *replaceUseInsn); + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In ExtendShiftOptPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======ReplaceInsn :\n"; + use.Dump(); + LogInfo::MapleLogger() << "=======NewInsn :\n"; + replaceUseInsn->Dump(); + } + if (removeDefInsn) { + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In ExtendShiftOptPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======RemoveDefInsn :\n"; + defInsn->Dump(); + } + defInsn->GetBB()->RemoveInsn(*defInsn); + } + cgFunc.GetRD()->InitGenUse(*defInsn->GetBB(), false); + cgFunc.GetRD()->UpdateInOut(*use.GetBB(), true); + newInsn = replaceUseInsn; + optSuccess = true; +} + +/* + * pattern1: + * UXTB/UXTW X0, W1 <---- def x0 + * .... <---- (X0 not used) + * AND/SUB/EOR X0, X1, X0 <---- use x0 + * ======> + * AND/SUB/EOR X0, X1, W1 UXTB/UXTW + * + * pattern2: + * LSL/LSR X0, X1, #8 + * ....(X0 not used) + * AND/SUB/EOR X0, X1, X0 + * ======> + * AND/SUB/EOR X0, X1, X1 LSL/LSR #8 + */ +void ExtendShiftOptPattern::Optimize(Insn &insn) +{ + uint32 amount = 0; + uint32 offset = 0; + uint32 lastIdx = insn.GetOperandSize() - k1BitSize; + Operand &lastOpnd = insn.GetOperand(lastIdx); + if (lastOpnd.IsOpdShift()) { + BitShiftOperand &lastShiftOpnd = static_cast(lastOpnd); + amount = lastShiftOpnd.GetShiftAmount(); + } else if (lastOpnd.IsOpdExtend()) { + ExtendShiftOperand &lastExtendOpnd = static_cast(lastOpnd); + amount = lastExtendOpnd.GetShiftAmount(); + } + if (shiftOp != BitShiftOperand::kUndef) { + ImmOperand &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + offset = static_cast(immOpnd.GetValue()); + } + amount += offset; + + ReplaceUseInsn(insn, *defInsn, amount); +} + +void ExtendShiftOptPattern::DoExtendShiftOpt(Insn &insn) +{ + Init(); + if (!CheckCondition(insn)) { + return; + } + Optimize(insn); + if (optSuccess) { + DoExtendShiftOpt(*newInsn); + } +} + +/* check and set: + * exMOpType, lsMOpType, extendOp, shiftOp, defInsn + */ +bool ExtendShiftOptPattern::CheckCondition(Insn &insn) +{ + SetLsMOpType(insn); + SetExMOpType(insn); + if ((exMOpType == kExUndef) && (lsMOpType == kLsUndef)) { + return false; + } + RegOperand ®Operand = static_cast(insn.GetOperand(replaceIdx)); + if (regOperand.IsPhysicalRegister()) { + return false; + } + regno_t regNo = regOperand.GetRegisterNumber(); + InsnSet regDefInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, regNo, true); + if (regDefInsnSet.size() != k1BitSize) { + return false; + } + defInsn = *regDefInsnSet.begin(); + CHECK_FATAL((defInsn != nullptr), "defInsn is null!"); + + SelectExtendOrShift(*defInsn); + /* defInsn must be shift or extend */ + if ((extendOp == ExtendShiftOperand::kUndef) && (shiftOp == BitShiftOperand::kUndef)) { + return false; + } + return CheckDefUseInfo(insn, regOperand.GetSize()); +} + +void ExtendShiftOptPattern::Init() +{ + replaceOp = MOP_undef; + extendOp = ExtendShiftOperand::kUndef; + shiftOp = BitShiftOperand::kUndef; + defInsn = nullptr; + replaceIdx = kInsnThirdOpnd; + newInsn = nullptr; + optSuccess = false; + removeDefInsn = false; + exMOpType = kExUndef; + lsMOpType = kLsUndef; +} + +void ExtendShiftOptPattern::Run() +{ + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + DoExtendShiftOpt(*insn); + } + } +} + +void ExtenToMovPattern::Run() +{ + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +/* Check for Implicit uxtw */ +bool ExtenToMovPattern::CheckHideUxtw(const Insn &insn, regno_t regno) const +{ + const InsnDesc *md = &AArch64CG::kMd[insn.GetMachineOpcode()]; + if (md->IsMove()) { + return false; + } + uint32 optSize = insn.GetOperandSize(); + for (uint32 i = 0; i < optSize; ++i) { + if (regno == static_cast(insn.GetOperand(i)).GetRegisterNumber()) { + auto *curOpndDescription = md->GetOpndDes(i); + if (curOpndDescription->IsDef() && curOpndDescription->GetSize() == k32BitSize) { + return true; + } + break; + } + } + return false; +} + +bool ExtenToMovPattern::CheckUxtw(Insn &insn) +{ + if (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize && + insn.GetOperand(kInsnSecondOpnd).GetSize() == k32BitSize) { + DEBUG_ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "is not Register"); + regno_t regno = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber(); + InsnSet preDef = cgFunc.GetRD()->FindDefForRegOpnd(insn, kInsnSecondOpnd, false); + if (preDef.empty()) { + return false; + } + for (auto defInsn : preDef) { + if (!CheckHideUxtw(*defInsn, regno)) { + return false; + } + } + replaceMop = MOP_xmovrr_uxtw; + return true; + } + return false; +} + +bool ExtenToMovPattern::CheckSrcReg(Insn &insn, regno_t srcRegNo, uint32 validNum) +{ + InsnSet srcDefSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, srcRegNo, true); + for (auto defInsn : srcDefSet) { + CHECK_FATAL((defInsn != nullptr), "defInsn is null!"); + MOperator mOp = defInsn->GetMachineOpcode(); + switch (mOp) { + case MOP_wiorrri12: + case MOP_weorrri12: { + /* check immVal if mop is OR */ + ImmOperand &imm = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + auto bitNum = static_cast(imm.GetValue()); + if ((bitNum >> validNum) != 0) { + return false; + } + } + case MOP_wandrri12: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + regno_t defSrcRegNo = defSrcRegOpnd.GetRegisterNumber(); + if (!CheckSrcReg(*defInsn, defSrcRegNo, validNum)) { + return false; + } + break; + } + case MOP_wandrrr: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd1 = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + RegOperand &defSrcRegOpnd2 = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + regno_t defSrcRegNo1 = defSrcRegOpnd1.GetRegisterNumber(); + regno_t defSrcRegNo2 = defSrcRegOpnd2.GetRegisterNumber(); + if (!CheckSrcReg(*defInsn, defSrcRegNo1, validNum) && !CheckSrcReg(*defInsn, defSrcRegNo2, validNum)) { + return false; + } + break; + } + case MOP_wiorrrr: + case MOP_weorrrr: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd1 = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + RegOperand &defSrcRegOpnd2 = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + regno_t defSrcRegNo1 = defSrcRegOpnd1.GetRegisterNumber(); + regno_t defSrcRegNo2 = defSrcRegOpnd2.GetRegisterNumber(); + if (!CheckSrcReg(*defInsn, defSrcRegNo1, validNum) || !CheckSrcReg(*defInsn, defSrcRegNo2, validNum)) { + return false; + } + break; + } + case MOP_wldrb: { + if (validNum != k8BitSize) { + return false; + } + break; + } + case MOP_wldrh: { + if (validNum != k16BitSize) { + return false; + } + break; + } + default: + return false; + } + } + return true; +} + +bool ExtenToMovPattern::BitNotAffected(Insn &insn, uint32 validNum) +{ + RegOperand &firstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + RegOperand &secondOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + regno_t desRegNo = firstOpnd.GetRegisterNumber(); + regno_t srcRegNo = secondOpnd.GetRegisterNumber(); + InsnSet desDefSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, desRegNo, true); + /* desReg is not redefined */ + if (!desDefSet.empty()) { + return false; + } + if (!CheckSrcReg(insn, srcRegNo, validNum)) { + return false; + } + replaceMop = MOP_wmovrr; + return true; +} + +bool ExtenToMovPattern::CheckCondition(Insn &insn) +{ + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_xuxtw64: + return CheckUxtw(insn); + case MOP_xuxtb32: + return BitNotAffected(insn, k8BitSize); + case MOP_xuxth32: + return BitNotAffected(insn, k16BitSize); + default: + return false; + } +} + +/* No initialization required */ +void ExtenToMovPattern::Init() +{ + replaceMop = MOP_undef; +} + +void ExtenToMovPattern::Optimize(Insn &insn) +{ + insn.SetMOP(AArch64CG::kMd[replaceMop]); +} + +void SameDefPattern::Run() +{ + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!CheckCondition(*insn) || !bb->GetEhPreds().empty()) { + continue; + } + Optimize(*insn); + } + } +} + +void SameDefPattern::Init() +{ + currInsn = nullptr; + sameInsn = nullptr; +} + +bool SameDefPattern::CheckCondition(Insn &insn) +{ + MOperator mOp = insn.GetMachineOpcode(); + if (insn.GetBB()->GetPreds().size() > k1BitSize) { + return false; + } + if (insn.GetBB()->HasCall()) { + return false; + } + return (mOp == MOP_wcmprr) || (mOp == MOP_xcmprr) || (mOp == MOP_xwcmprre) || (mOp == MOP_xcmprrs); +} + +void SameDefPattern::Optimize(Insn &insn) +{ + InsnSet sameDefSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, 0, false); + if (sameDefSet.size() != k1BitSize) { + return; + } + Insn *sameDefInsn = *sameDefSet.begin(); + if (sameDefInsn == nullptr) { + return; + } + currInsn = &insn; + sameInsn = sameDefInsn; + if (!IsSameDef()) { + return; + } + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In SameDefPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======remove insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "=======sameDef insn: \n"; + sameDefInsn->Dump(); + } + insn.GetBB()->RemoveInsn(insn); +} + +bool SameDefPattern::IsSameDef() +{ + if (!CheckCondition(*sameInsn)) { + return false; + } + if (currInsn == sameInsn) { + return false; + } + if (currInsn->GetMachineOpcode() != sameInsn->GetMachineOpcode()) { + return false; + } + for (uint32 i = k1BitSize; i < currInsn->GetOperandSize(); ++i) { + Operand &opnd0 = currInsn->GetOperand(i); + Operand &opnd1 = sameInsn->GetOperand(i); + if (!IsSameOperand(opnd0, opnd1)) { + return false; + } + } + return true; +} + +bool SameDefPattern::IsSameOperand(Operand &opnd0, Operand &opnd1) +{ + if (opnd0.IsRegister()) { + CHECK_FATAL(opnd1.IsRegister(), "must be RegOperand!"); + RegOperand ®Opnd0 = static_cast(opnd0); + RegOperand ®Opnd1 = static_cast(opnd1); + if (!RegOperand::IsSameReg(regOpnd0, regOpnd1)) { + return false; + } + regno_t regNo = regOpnd0.GetRegisterNumber(); + /* src reg not redefined between sameInsn and currInsn */ + if (SrcRegIsRedefined(regNo)) { + return false; + } + } else if (opnd0.IsOpdShift()) { + CHECK_FATAL(opnd1.IsOpdShift(), "must be ShiftOperand!"); + BitShiftOperand &shiftOpnd0 = static_cast(opnd0); + BitShiftOperand &shiftOpnd1 = static_cast(opnd1); + if (shiftOpnd0.GetShiftAmount() != shiftOpnd1.GetShiftAmount()) { + return false; + } + } else if (opnd0.IsOpdExtend()) { + CHECK_FATAL(opnd1.IsOpdExtend(), "must be ExtendOperand!"); + ExtendShiftOperand &extendOpnd0 = static_cast(opnd0); + ExtendShiftOperand &extendOpnd1 = static_cast(opnd1); + if (extendOpnd0.GetShiftAmount() != extendOpnd1.GetShiftAmount()) { + return false; + } + } else { + return false; + } + return true; +} + +bool SameDefPattern::SrcRegIsRedefined(regno_t regNo) +{ + AArch64ReachingDefinition *a64RD = static_cast(cgFunc.GetRD()); + if (currInsn->GetBB() == sameInsn->GetBB()) { + FOR_BB_INSNS(insn, currInsn->GetBB()) { + if (insn->GetMachineOpcode() == MOP_xbl) { + return true; + } + } + if (!a64RD->FindRegDefBetweenInsn(regNo, sameInsn, currInsn).empty()) { + return true; + } + } else if (a64RD->HasRegDefBetweenInsnGlobal(regNo, *sameInsn, *currInsn)) { + return true; + } + return false; +} + +void AndCbzPattern::Init() +{ + prevInsn = nullptr; +} + +bool AndCbzPattern::IsAdjacentArea(Insn &prev, Insn &curr) const +{ + if (prev.GetBB() == curr.GetBB()) { + return true; + } + for (auto *succ : prev.GetBB()->GetSuccs()) { + if (succ == curr.GetBB()) { + return true; + } + } + return false; +} + +bool AndCbzPattern::CheckCondition(Insn &insn) +{ + auto *aarch64RD = static_cast(cgFunc.GetRD()); + MOperator mOp = insn.GetMachineOpcode(); + if ((mOp != MOP_wcbz) && (mOp != MOP_xcbz) && (mOp != MOP_wcbnz) && (mOp != MOP_xcbnz)) { + return false; + } + regno_t regNo = static_cast(insn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + InsnSet defSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, regNo, true); + if (defSet.size() != k1BitSize) { + return false; + } + prevInsn = *defSet.begin(); + if (prevInsn->GetMachineOpcode() != MOP_wandrri12 && prevInsn->GetMachineOpcode() != MOP_xandrri13) { + return false; + } + if (!IsAdjacentArea(*prevInsn, insn)) { + return false; + } + regno_t propRegNo = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)).GetRegisterNumber(); + if (prevInsn->GetBB() == insn.GetBB() && !(aarch64RD->FindRegDefBetweenInsn(propRegNo, prevInsn, &insn).empty())) { + return false; + } + if (prevInsn->GetBB() != insn.GetBB() && aarch64RD->HasRegDefBetweenInsnGlobal(propRegNo, *prevInsn, insn)) { + return false; + } + if (!(cgFunc.GetRD()->FindUseForRegOpnd(insn, regNo, true).empty())) { + return false; + } + return true; +} + +int64 AndCbzPattern::CalculateLogValue(int64 val) const +{ + return (__builtin_popcountll(static_cast(val)) == 1) ? (__builtin_ffsll(val) - 1) : -1; +} + +void AndCbzPattern::Optimize(Insn &insn) +{ + BB *bb = insn.GetBB(); + auto &aarchFunc = static_cast(cgFunc); + auto &andImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + int64 tbzVal = CalculateLogValue(andImm.GetValue()); + if (tbzVal < 0) { + return; + } + MOperator mOp = insn.GetMachineOpcode(); + MOperator newMop = MOP_undef; + switch (mOp) { + case MOP_wcbz: + newMop = MOP_wtbz; + break; + case MOP_wcbnz: + newMop = MOP_wtbnz; + break; + case MOP_xcbz: + newMop = MOP_xtbz; + break; + case MOP_xcbnz: + newMop = MOP_xtbnz; + break; + default: + CHECK_FATAL(false, "must be cbz/cbnz"); + break; + } + auto &label = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &tbzImm = aarchFunc.CreateImmOperand(tbzVal, k8BitSize, false); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newMop, prevInsn->GetOperand(kInsnSecondOpnd), tbzImm, label); + newInsn.SetId(insn.GetId()); + bb->ReplaceInsn(insn, newInsn); + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In AndCbzPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======PrevInsn :\n"; + LogInfo::MapleLogger() << "=======ReplaceInsn :\n"; + insn.Dump(); + LogInfo::MapleLogger() << "=======NewInsn :\n"; + newInsn.Dump(); + } + cgFunc.GetRD()->UpdateInOut(*bb, true); +} + +void AndCbzPattern::Run() +{ + Init(); + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction() || !CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +void SameRHSPropPattern::Init() +{ + prevInsn = nullptr; + candidates = {MOP_waddrri12, MOP_xaddrri12, MOP_wsubrri12, MOP_xsubrri12, + MOP_wmovri32, MOP_xmovri64, MOP_wmovrr, MOP_xmovrr}; +} + +bool SameRHSPropPattern::IsSameOperand(Operand *opnd1, Operand *opnd2) const +{ + if (opnd1 == nullptr && opnd2 == nullptr) { + return true; + } else if (opnd1 == nullptr || opnd2 == nullptr) { + return false; + } + if (opnd1->IsRegister() && opnd2->IsRegister()) { + return RegOperand::IsSameReg(*opnd1, *opnd2); + } else if (opnd1->IsImmediate() && opnd2->IsImmediate()) { + auto *immOpnd1 = static_cast(opnd1); + auto *immOpnd2 = static_cast(opnd2); + return (immOpnd1->GetSize() == immOpnd2->GetSize()) && (immOpnd1->GetValue() == immOpnd2->GetValue()); + } + return false; +} + +bool SameRHSPropPattern::FindSameRHSInsnInBB(Insn &insn) +{ + uint32 opndNum = insn.GetOperandSize(); + Operand *curRegOpnd = nullptr; + Operand *curImmOpnd = nullptr; + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.OpndIsDef(i)) { + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsRegister()) { + curRegOpnd = &opnd; + } else if (opnd.IsImmediate()) { + auto &immOpnd = static_cast(opnd); + if (immOpnd.GetVary() == kUnAdjustVary) { + return false; + } + curImmOpnd = &opnd; + } + } + if (curRegOpnd == nullptr && curImmOpnd != nullptr && static_cast(curImmOpnd)->IsZero()) { + return false; + } + BB *bb = insn.GetBB(); + for (auto *cursor = insn.GetPrev(); cursor != nullptr && cursor != bb->GetFirstInsn(); cursor = cursor->GetPrev()) { + if (!cursor->IsMachineInstruction()) { + continue; + } + if (cursor->IsCall() && !cgFunc.IsAfterRegAlloc()) { + return false; + } + if (cursor->GetMachineOpcode() != insn.GetMachineOpcode()) { + continue; + } + uint32 candOpndNum = cursor->GetOperandSize(); + Operand *candRegOpnd = nullptr; + Operand *candImmOpnd = nullptr; + for (uint32 i = 0; i < candOpndNum; ++i) { + Operand &opnd = cursor->GetOperand(i); + if (cursor->OpndIsDef(i)) { + continue; + } + if (opnd.IsRegister()) { + candRegOpnd = &opnd; + } else if (opnd.IsImmediate()) { + auto &immOpnd = static_cast(opnd); + if (immOpnd.GetVary() == kUnAdjustVary) { + return false; + } + candImmOpnd = &opnd; + } + } + if (IsSameOperand(curRegOpnd, candRegOpnd) && IsSameOperand(curImmOpnd, candImmOpnd)) { + prevInsn = cursor; + return true; + } + } + return false; +} + +bool SameRHSPropPattern::CheckCondition(Insn &insn) +{ + if (!insn.IsMachineInstruction()) { + return false; + } + MOperator mOp = insn.GetMachineOpcode(); + if (std::find(candidates.begin(), candidates.end(), mOp) == candidates.end()) { + return false; + } + if (!FindSameRHSInsnInBB(insn)) { + return false; + } + CHECK_FATAL(prevInsn->GetOperand(kInsnFirstOpnd).IsRegister(), "prevInsn first operand must be register"); + if (prevInsn->GetOperand(kInsnSecondOpnd).IsRegister() && + RegOperand::IsSameReg(prevInsn->GetOperand(kInsnFirstOpnd), prevInsn->GetOperand(kInsnSecondOpnd))) { + return false; + } + uint32 opndNum = prevInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = prevInsn->GetOperand(i); + if (!opnd.IsRegister()) { + continue; + } + regno_t regNO = static_cast(opnd).GetRegisterNumber(); + if (!(cgFunc.GetRD()->FindRegDefBetweenInsn(regNO, prevInsn->GetNext(), insn.GetPrev()).empty())) { + return false; + } + } + return true; +} + +void SameRHSPropPattern::Optimize(Insn &insn) +{ + BB *bb = insn.GetBB(); + Operand &destOpnd = insn.GetOperand(kInsnFirstOpnd); + uint32 bitSize = static_cast(destOpnd).GetSize(); + MOperator mOp = (bitSize == k64BitSize ? MOP_xmovrr : MOP_wmovrr); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, destOpnd, prevInsn->GetOperand(kInsnFirstOpnd)); + newInsn.SetId(insn.GetId()); + bb->ReplaceInsn(insn, newInsn); + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In SameRHSPropPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======PrevInsn :\n"; + LogInfo::MapleLogger() << "======= ReplaceInsn :\n"; + insn.Dump(); + LogInfo::MapleLogger() << "======= NewInsn :\n"; + newInsn.Dump(); + } + cgFunc.GetRD()->UpdateInOut(*bb, true); +} + +void SameRHSPropPattern::Run() +{ + Init(); + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_ico.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_ico.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7b91aedea6cf80016c45f0d01d7062b14c1f9dc2 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_ico.cpp @@ -0,0 +1,966 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_ico.h" +#include "ico.h" +#include "cg.h" +#include "cg_option.h" +#include "aarch64_isa.h" +#include "aarch64_insn.h" +#include "aarch64_cgfunc.h" + +/* + * This phase implements if-conversion optimization, + * which tries to convert conditional branches into cset/csel instructions + */ +namespace maplebe { +void AArch64IfConversionOptimizer::InitOptimizePatterns() +{ + singlePassPatterns.emplace_back(memPool->New(*cgFunc)); + singlePassPatterns.emplace_back(memPool->New(*cgFunc)); + singlePassPatterns.emplace_back(memPool->New(*cgFunc)); +} + +/* build ccmp Insn */ +Insn *AArch64ICOPattern::BuildCcmpInsn(ConditionCode ccCode, const Insn *cmpInsn) const +{ + Operand &opnd0 = cmpInsn->GetOperand(kInsnFirstOpnd); + Operand &opnd1 = cmpInsn->GetOperand(kInsnSecondOpnd); + Operand &opnd2 = cmpInsn->GetOperand(kInsnThirdOpnd); + /* ccmp has only int opnd */ + if (!static_cast(opnd1).IsOfIntClass()) { + return nullptr; + } + AArch64CGFunc *func = static_cast(cgFunc); + uint32 nzcv = GetNZCV(ccCode, false); + if (nzcv == k16BitSize) { + return nullptr; + } + ImmOperand &opnd3 = func->CreateImmOperand(PTY_u8, nzcv); + CondOperand &cond = static_cast(cgFunc)->GetCondOperand(ccCode); + uint32 dSize = opnd1.GetSize(); + bool isIntTy = opnd2.IsIntImmediate(); + MOperator mOpCode = isIntTy ? (dSize == k64BitSize ? MOP_xccmpriic : MOP_wccmpriic) + : (dSize == k64BitSize ? MOP_xccmprric : MOP_wccmprric); + /* cmp opnd2 in the range 0-4095, ccmp opnd2 in the range 0-31 */ + if (isIntTy && static_cast(opnd2).GetRegisterNumber() >= k32BitSize) { + return nullptr; + } + return &cgFunc->GetInsnBuilder()->BuildInsn(mOpCode, opnd0, opnd1, opnd2, opnd3, cond); +} + +/* Rooted ccCode resource NZCV */ +uint32 AArch64ICOPattern::GetNZCV(ConditionCode ccCode, bool inverse) +{ + switch (ccCode) { + case CC_EQ: + return inverse ? k4BitSize : k0BitSize; + case CC_HS: + return inverse ? k2BitSize : k0BitSize; + case CC_MI: + return inverse ? k8BitSize : k0BitSize; + case CC_VS: + return inverse ? k1BitSize : k0BitSize; + case CC_VC: + return inverse ? k0BitSize : k1BitSize; + case CC_LS: + return inverse ? k4BitSize : k2BitSize; + case CC_LO: + return inverse ? k0BitSize : k2BitSize; + case CC_NE: + return inverse ? k0BitSize : k4BitSize; + case CC_HI: + return inverse ? k2BitSize : k4BitSize; + case CC_PL: + return inverse ? k0BitSize : k8BitSize; + default: + return k16BitSize; + } +} + +Insn *AArch64ICOPattern::BuildCmpInsn(const Insn &condBr) const +{ + AArch64CGFunc *func = static_cast(cgFunc); + RegOperand ® = static_cast(condBr.GetOperand(0)); + PrimType ptyp = (reg.GetSize() == k64BitSize) ? PTY_u64 : PTY_u32; + ImmOperand &numZero = func->CreateImmOperand(ptyp, 0); + Operand &rflag = func->GetOrCreateRflag(); + MOperator mopCode = (reg.GetSize() == k64BitSize) ? MOP_xcmpri : MOP_wcmpri; + Insn &cmpInsn = func->GetInsnBuilder()->BuildInsn(mopCode, rflag, reg, numZero); + return &cmpInsn; +} + +bool AArch64ICOPattern::IsSetInsn(const Insn &insn, Operand *&dest, std::vector &src) const +{ + MOperator mOpCode = insn.GetMachineOpcode(); + if ((mOpCode >= MOP_xmovrr && mOpCode <= MOP_xvmovd) || cgFunc->GetTheCFG()->IsAddOrSubInsn(insn)) { + dest = &(insn.GetOperand(0)); + for (uint32 i = 1; i < insn.GetOperandSize(); ++i) { + (void)src.emplace_back(&(insn.GetOperand(i))); + } + return true; + } + dest = nullptr; + src.clear(); + return false; +} + +ConditionCode AArch64ICOPattern::Encode(MOperator mOp, bool inverse) const +{ + switch (mOp) { + case MOP_bmi: + return inverse ? CC_PL : CC_MI; + case MOP_bvc: + return inverse ? CC_VS : CC_VC; + case MOP_bls: + return inverse ? CC_HI : CC_LS; + case MOP_blt: + return inverse ? CC_GE : CC_LT; + case MOP_ble: + return inverse ? CC_GT : CC_LE; + case MOP_beq: + return inverse ? CC_NE : CC_EQ; + case MOP_bne: + return inverse ? CC_EQ : CC_NE; + case MOP_blo: + return inverse ? CC_HS : CC_LO; + case MOP_bpl: + return inverse ? CC_MI : CC_PL; + case MOP_bhs: + return inverse ? CC_LO : CC_HS; + case MOP_bvs: + return inverse ? CC_VC : CC_VS; + case MOP_bhi: + return inverse ? CC_LS : CC_HI; + case MOP_bgt: + return inverse ? CC_LE : CC_GT; + case MOP_bge: + return inverse ? CC_LT : CC_GE; + case MOP_wcbnz: + return inverse ? CC_EQ : CC_NE; + case MOP_xcbnz: + return inverse ? CC_EQ : CC_NE; + case MOP_wcbz: + return inverse ? CC_NE : CC_EQ; + case MOP_xcbz: + return inverse ? CC_NE : CC_EQ; + default: + return kCcLast; + } +} + +Insn *AArch64ICOPattern::BuildCondSet(const Insn &branch, RegOperand ®, bool inverse) const +{ + ConditionCode ccCode = Encode(branch.GetMachineOpcode(), inverse); + DEBUG_ASSERT(ccCode != kCcLast, "unknown cond, ccCode can't be kCcLast"); + AArch64CGFunc *func = static_cast(cgFunc); + CondOperand &cond = func->GetCondOperand(ccCode); + Operand &rflag = func->GetOrCreateRflag(); + MOperator mopCode = (reg.GetSize() == k64BitSize) ? MOP_xcsetrc : MOP_wcsetrc; + return &func->GetInsnBuilder()->BuildInsn(mopCode, reg, cond, rflag); +} + +Insn *AArch64ICOPattern::BuildCondSel(const Insn &branch, MOperator mOp, RegOperand &dst, RegOperand &src1, + RegOperand &src2) const +{ + ConditionCode ccCode = Encode(branch.GetMachineOpcode(), false); + DEBUG_ASSERT(ccCode != kCcLast, "unknown cond, ccCode can't be kCcLast"); + CondOperand &cond = static_cast(cgFunc)->GetCondOperand(ccCode); + Operand &rflag = static_cast(cgFunc)->GetOrCreateRflag(); + return &cgFunc->GetInsnBuilder()->BuildInsn(mOp, dst, src1, src2, cond, rflag); +} + +void AArch64ICOIfThenElsePattern::GenerateInsnForImm(const Insn &branchInsn, Operand &ifDest, Operand &elseDest, + RegOperand &destReg, std::vector &generateInsn) +{ + ImmOperand &imm1 = static_cast(ifDest); + ImmOperand &imm2 = static_cast(elseDest); + bool inverse = imm1.IsZero() && imm2.IsOne(); + if (inverse || (imm2.IsZero() && imm1.IsOne())) { + Insn *csetInsn = BuildCondSet(branchInsn, destReg, inverse); + DEBUG_ASSERT(csetInsn != nullptr, "build a insn failed"); + generateInsn.emplace_back(csetInsn); + } else if (imm1.GetValue() == imm2.GetValue()) { + bool destIsIntTy = destReg.IsOfIntClass(); + MOperator mOp = destIsIntTy ? ((destReg.GetSize() == k64BitSize ? MOP_xmovri64 : MOP_wmovri32)) + : ((destReg.GetSize() == k64BitSize ? MOP_xdfmovri : MOP_wsfmovri)); + Insn &tempInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, destReg, imm1); + generateInsn.emplace_back(&tempInsn); + } else { + bool destIsIntTy = destReg.IsOfIntClass(); + uint32 dSize = destReg.GetSize(); + bool isD64 = dSize == k64BitSize; + MOperator mOp = destIsIntTy ? ((destReg.GetSize() == k64BitSize ? MOP_xmovri64 : MOP_wmovri32)) + : ((destReg.GetSize() == k64BitSize ? MOP_xdfmovri : MOP_wsfmovri)); + RegOperand *tempTarIf = nullptr; + if (imm1.IsZero()) { + tempTarIf = &cgFunc->GetZeroOpnd(dSize); + } else { + tempTarIf = cgFunc->GetTheCFG()->CreateVregFromReg(destReg); + Insn &tempInsnIf = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *tempTarIf, imm1); + generateInsn.emplace_back(&tempInsnIf); + } + + RegOperand *tempTarElse = nullptr; + if (imm2.IsZero()) { + tempTarElse = &cgFunc->GetZeroOpnd(dSize); + } else { + tempTarElse = cgFunc->GetTheCFG()->CreateVregFromReg(destReg); + Insn &tempInsnElse = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *tempTarElse, imm2); + generateInsn.emplace_back(&tempInsnElse); + } + + bool isIntTy = destReg.IsOfIntClass(); + MOperator mOpCode = isIntTy ? (isD64 ? MOP_xcselrrrc : MOP_wcselrrrc) + : (isD64 ? MOP_dcselrrrc : (dSize == k32BitSize ? MOP_scselrrrc : MOP_hcselrrrc)); + Insn *cselInsn = BuildCondSel(branchInsn, mOpCode, destReg, *tempTarIf, *tempTarElse); + CHECK_FATAL(cselInsn != nullptr, "build a csel insn failed"); + generateInsn.emplace_back(cselInsn); + } +} + +RegOperand *AArch64ICOIfThenElsePattern::GenerateRegAndTempInsn(Operand &dest, const RegOperand &destReg, + std::vector &generateInsn) const +{ + RegOperand *reg = nullptr; + if (!dest.IsRegister()) { + bool destIsIntTy = destReg.IsOfIntClass(); + bool isDest64 = destReg.GetSize() == k64BitSize; + MOperator mOp = + destIsIntTy ? (isDest64 ? MOP_xmovri64 : MOP_wmovri32) : (isDest64 ? MOP_xdfmovri : MOP_wsfmovri); + reg = cgFunc->GetTheCFG()->CreateVregFromReg(destReg); + ImmOperand &tempSrcElse = static_cast(dest); + if (tempSrcElse.IsZero()) { + return &cgFunc->GetZeroOpnd(destReg.GetSize()); + } + Insn &tempInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *reg, tempSrcElse); + generateInsn.emplace_back(&tempInsn); + return reg; + } else { + return (static_cast(&dest)); + } +} + +void AArch64ICOIfThenElsePattern::GenerateInsnForReg(const Insn &branchInsn, Operand &ifDest, Operand &elseDest, + RegOperand &destReg, std::vector &generateInsn) +{ + RegOperand *tReg = static_cast(&ifDest); + RegOperand *eReg = static_cast(&elseDest); + + /* mov w0, w1 mov w0, w1 --> mov w0, w1 */ + if (eReg->GetRegisterNumber() == tReg->GetRegisterNumber()) { + uint32 dSize = destReg.GetSize(); + bool srcIsIntTy = tReg->IsOfIntClass(); + bool destIsIntTy = destReg.IsOfIntClass(); + MOperator mOp; + if (dSize == k64BitSize) { + mOp = srcIsIntTy ? (destIsIntTy ? MOP_xmovrr : MOP_xvmovdr) : (destIsIntTy ? MOP_xvmovrd : MOP_xvmovd); + } else { + mOp = srcIsIntTy ? (destIsIntTy ? MOP_wmovrr : MOP_xvmovsr) : (destIsIntTy ? MOP_xvmovrs : MOP_xvmovs); + } + Insn &tempInsnIf = cgFunc->GetInsnBuilder()->BuildInsn(mOp, destReg, *tReg); + generateInsn.emplace_back(&tempInsnIf); + } else { + uint32 dSize = destReg.GetSize(); + bool isIntTy = destReg.IsOfIntClass(); + MOperator mOpCode = + isIntTy ? (dSize == k64BitSize ? MOP_xcselrrrc : MOP_wcselrrrc) + : (dSize == k64BitSize ? MOP_dcselrrrc : (dSize == k32BitSize ? MOP_scselrrrc : MOP_hcselrrrc)); + Insn *cselInsn = BuildCondSel(branchInsn, mOpCode, destReg, *tReg, *eReg); + CHECK_FATAL(cselInsn != nullptr, "build a csel insn failed"); + generateInsn.emplace_back(cselInsn); + } +} + +Operand *AArch64ICOIfThenElsePattern::GetDestReg(const std::map> &destSrcMap, + const RegOperand &destReg) const +{ + Operand *dest = nullptr; + for (const auto &destSrcPair : destSrcMap) { + DEBUG_ASSERT(destSrcPair.first->IsRegister(), "opnd must be register"); + RegOperand *destRegInMap = static_cast(destSrcPair.first); + DEBUG_ASSERT(destRegInMap != nullptr, "nullptr check"); + if (destRegInMap->GetRegisterNumber() == destReg.GetRegisterNumber()) { + if (destSrcPair.second.size() > 1) { + dest = destSrcPair.first; + } else { + dest = destSrcPair.second[0]; + } + break; + } + } + return dest; +} + +bool AArch64ICOIfThenElsePattern::BuildCondMovInsn(BB &cmpBB, const BB &bb, + const std::map> &ifDestSrcMap, + const std::map> &elseDestSrcMap, + bool elseBBIsProcessed, std::vector &generateInsn) +{ + Insn *branchInsn = cgFunc->GetTheCFG()->FindLastCondBrInsn(cmpBB); + FOR_BB_INSNS_CONST(insn, (&bb)) { + if (!insn->IsMachineInstruction() || insn->IsBranch()) { + continue; + } + Operand *dest = nullptr; + std::vector src; + + if (!IsSetInsn(*insn, dest, src)) { + DEBUG_ASSERT(false, "insn check"); + } + DEBUG_ASSERT(dest->IsRegister(), "register check"); + RegOperand *destReg = static_cast(dest); + + Operand *elseDest = GetDestReg(elseDestSrcMap, *destReg); + Operand *ifDest = GetDestReg(ifDestSrcMap, *destReg); + + if (elseBBIsProcessed) { + if (elseDest != nullptr) { + continue; + } + elseDest = dest; + DEBUG_ASSERT(ifDest != nullptr, "null ptr check"); + if (!bb.GetLiveOut()->TestBit(destReg->GetRegisterNumber())) { + continue; + } + } else { + DEBUG_ASSERT(elseDest != nullptr, "null ptr check"); + if (ifDest == nullptr) { + if (!bb.GetLiveOut()->TestBit(destReg->GetRegisterNumber())) { + continue; + } + ifDest = dest; + } + } + + /* generate cset or csel instruction */ + DEBUG_ASSERT(ifDest != nullptr, "null ptr check"); + if (ifDest->IsIntImmediate() && elseDest->IsIntImmediate()) { + GenerateInsnForImm(*branchInsn, *ifDest, *elseDest, *destReg, generateInsn); + } else { + RegOperand *tReg = GenerateRegAndTempInsn(*ifDest, *destReg, generateInsn); + RegOperand *eReg = GenerateRegAndTempInsn(*elseDest, *destReg, generateInsn); + if ((tReg->GetRegisterType() != eReg->GetRegisterType()) || + (tReg->GetRegisterType() != destReg->GetRegisterType())) { + return false; + } + GenerateInsnForReg(*branchInsn, *tReg, *eReg, *destReg, generateInsn); + } + } + + return true; +} + +bool AArch64ICOIfThenElsePattern::CheckHasSameDest(std::vector &lInsn, std::vector &rInsn) const +{ + for (size_t i = 0; i < lInsn.size(); ++i) { + if (cgFunc->GetTheCFG()->IsAddOrSubInsn(*lInsn[i])) { + bool hasSameDest = false; + for (size_t j = 0; j < rInsn.size(); ++j) { + RegOperand *rDestReg = static_cast(&rInsn[j]->GetOperand(0)); + RegOperand *lDestReg = static_cast(&lInsn[i]->GetOperand(0)); + if (lDestReg->GetRegisterNumber() == rDestReg->GetRegisterNumber()) { + hasSameDest = true; + break; + } + } + if (!hasSameDest) { + return false; + } + } + } + return true; +} + +bool AArch64ICOIfThenElsePattern::CheckModifiedRegister(Insn &insn, + std::map> &destSrcMap, + std::vector &src, Operand &dest, const Insn *cmpInsn, + const Operand *flagOpnd) const +{ + /* src was modified in this blcok earlier */ + for (auto srcOpnd : src) { + if (srcOpnd->IsRegister()) { + RegOperand &srcReg = static_cast(*srcOpnd); + for (const auto &destSrcPair : destSrcMap) { + DEBUG_ASSERT(destSrcPair.first->IsRegister(), "opnd must be register"); + RegOperand *mapSrcReg = static_cast(destSrcPair.first); + if (mapSrcReg->GetRegisterNumber() == srcReg.GetRegisterNumber()) { + return false; + } + } + } + } + + /* dest register was modified earlier in this block */ + DEBUG_ASSERT(dest.IsRegister(), "opnd must be register"); + RegOperand &destReg = static_cast(dest); + for (const auto &destSrcPair : destSrcMap) { + DEBUG_ASSERT(destSrcPair.first->IsRegister(), "opnd must be register"); + RegOperand *mapSrcReg = static_cast(destSrcPair.first); + if (mapSrcReg->GetRegisterNumber() == destReg.GetRegisterNumber()) { + return false; + } + } + + /* src register is modified later in this block, will not be processed */ + for (auto srcOpnd : src) { + if (srcOpnd->IsRegister()) { + RegOperand &srcReg = static_cast(*srcOpnd); + if (destReg.IsOfFloatOrSIMDClass() && srcReg.GetRegisterNumber() == RZR) { + return false; + } + for (Insn *tmpInsn = &insn; tmpInsn != nullptr; tmpInsn = tmpInsn->GetNext()) { + Operand *tmpDest = nullptr; + std::vector tmpSrc; + if (IsSetInsn(*tmpInsn, tmpDest, tmpSrc) && tmpDest->Equals(*srcOpnd)) { + DEBUG_ASSERT(tmpDest->IsRegister(), "opnd must be register"); + RegOperand *tmpDestReg = static_cast(tmpDest); + if (srcReg.GetRegisterNumber() == tmpDestReg->GetRegisterNumber()) { + return false; + } + } + } + } + } + + /* add/sub insn's dest register does not exist in cmp insn. */ + if (cgFunc->GetTheCFG()->IsAddOrSubInsn(insn)) { + RegOperand &insnDestReg = static_cast(insn.GetOperand(0)); + if (flagOpnd) { + RegOperand &cmpReg = static_cast(cmpInsn->GetOperand(0)); + if (insnDestReg.GetRegisterNumber() == cmpReg.GetRegisterNumber()) { + return false; + } + } else { + RegOperand &cmpReg1 = static_cast(cmpInsn->GetOperand(1)); + if (cmpInsn->GetOperand(2).IsRegister()) { + RegOperand &cmpReg2 = static_cast(cmpInsn->GetOperand(2)); + if (insnDestReg.GetRegisterNumber() == cmpReg1.GetRegisterNumber() || + insnDestReg.GetRegisterNumber() == cmpReg2.GetRegisterNumber()) { + return false; + } + } else { + if (insnDestReg.GetRegisterNumber() == cmpReg1.GetRegisterNumber()) { + return false; + } + } + } + } + return true; +} + +bool AArch64ICOIfThenElsePattern::CheckCondMoveBB(BB *bb, std::map> &destSrcMap, + std::vector &destRegs, std::vector &setInsn, + Operand *flagOpnd, Insn *cmpInsn) const +{ + if (bb == nullptr) { + return false; + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction() || insn->IsBranch()) { + continue; + } + Operand *dest = nullptr; + std::vector src; + + if (!IsSetInsn(*insn, dest, src)) { + return false; + } + DEBUG_ASSERT(dest != nullptr, "null ptr check"); + DEBUG_ASSERT(src.size() != 0, "null ptr check"); + + if (!dest->IsRegister()) { + return false; + } + + for (auto srcOpnd : src) { + if (!(srcOpnd->IsConstImmediate()) && !srcOpnd->IsRegister()) { + return false; + } + } + + if (flagOpnd != nullptr) { + RegOperand *flagReg = static_cast(flagOpnd); + regno_t flagRegNO = flagReg->GetRegisterNumber(); + if (bb->GetLiveOut()->TestBit(flagRegNO)) { + return false; + } + } + + if (!CheckModifiedRegister(*insn, destSrcMap, src, *dest, cmpInsn, flagOpnd)) { + return false; + } + + (void)destSrcMap.insert(std::make_pair(dest, src)); + destRegs.emplace_back(dest); + (void)setInsn.emplace_back(insn); + } + return true; +} + +/* Convert conditional branches into cset/csel instructions */ +bool AArch64ICOIfThenElsePattern::DoOpt(BB &cmpBB, BB *ifBB, BB *elseBB, BB &joinBB) +{ + Insn *condBr = cgFunc->GetTheCFG()->FindLastCondBrInsn(cmpBB); + DEBUG_ASSERT(condBr != nullptr, "nullptr check"); + Insn *cmpInsn = FindLastCmpInsn(cmpBB); + Operand *flagOpnd = nullptr; + /* for cbnz and cbz institution */ + if (cgFunc->GetTheCFG()->IsCompareAndBranchInsn(*condBr)) { + Operand &opnd0 = condBr->GetOperand(0); + if (opnd0.IsRegister() && static_cast(opnd0).GetRegisterNumber() == RZR) { + return false; + } + cmpInsn = condBr; + flagOpnd = &(opnd0); + } + + /* tbz will not be optimized */ + MOperator mOperator = condBr->GetMachineOpcode(); + if (mOperator == MOP_xtbz || mOperator == MOP_wtbz || mOperator == MOP_xtbnz || mOperator == MOP_wtbnz) { + return false; + } + if (cmpInsn == nullptr) { + return false; + } + + std::vector ifDestRegs; + std::vector ifSetInsn; + std::vector elseDestRegs; + std::vector elseSetInsn; + + std::map> ifDestSrcMap; + std::map> elseDestSrcMap; + + if (!CheckCondMoveBB(elseBB, elseDestSrcMap, elseDestRegs, elseSetInsn, flagOpnd, cmpInsn) || + (ifBB != nullptr && !CheckCondMoveBB(ifBB, ifDestSrcMap, ifDestRegs, ifSetInsn, flagOpnd, cmpInsn))) { + return false; + } + + if (!CheckHasSameDest(ifSetInsn, elseSetInsn) || !CheckHasSameDest(elseSetInsn, ifSetInsn)) { + return false; + } + + size_t count = elseDestRegs.size(); + + for (size_t i = 0; i < ifDestRegs.size(); ++i) { + bool foundInElse = false; + for (size_t j = 0; j < elseDestRegs.size(); ++j) { + RegOperand *elseDestReg = static_cast(elseDestRegs[j]); + RegOperand *ifDestReg = static_cast(ifDestRegs[i]); + if (ifDestReg->GetRegisterNumber() == elseDestReg->GetRegisterNumber()) { + if (cgFunc->GetTheCFG()->IsAddOrSubInsn(*ifSetInsn[i]) && + cgFunc->GetTheCFG()->IsAddOrSubInsn(*elseSetInsn[j])) { + return false; + } + foundInElse = true; + break; + } + } + if (foundInElse) { + continue; + } else { + ++count; + } + } + if (count > kThreshold) { + return false; + } + + /* generate insns */ + std::vector elseGenerateInsn; + std::vector ifGenerateInsn; + bool elseBBProcessResult = false; + if (elseBB != nullptr) { + elseBBProcessResult = BuildCondMovInsn(cmpBB, *elseBB, ifDestSrcMap, elseDestSrcMap, false, elseGenerateInsn); + } + bool ifBBProcessResult = false; + if (ifBB != nullptr) { + ifBBProcessResult = BuildCondMovInsn(cmpBB, *ifBB, ifDestSrcMap, elseDestSrcMap, true, ifGenerateInsn); + } + if (!elseBBProcessResult || (ifBB != nullptr && !ifBBProcessResult)) { + return false; + } + + /* insert insn */ + if (cgFunc->GetTheCFG()->IsCompareAndBranchInsn(*condBr)) { + Insn *innerCmpInsn = BuildCmpInsn(*condBr); + cmpBB.InsertInsnBefore(*condBr, *innerCmpInsn); + cmpInsn = innerCmpInsn; + } + + if (elseBB != nullptr) { + cmpBB.SetKind(elseBB->GetKind()); + } else { + DEBUG_ASSERT(ifBB != nullptr, "ifBB should not be nullptr"); + cmpBB.SetKind(ifBB->GetKind()); + } + + for (auto setInsn : ifSetInsn) { + if (cgFunc->GetTheCFG()->IsAddOrSubInsn(*setInsn)) { + (void)cmpBB.InsertInsnBefore(*cmpInsn, *setInsn); + } + } + + for (auto setInsn : elseSetInsn) { + if (cgFunc->GetTheCFG()->IsAddOrSubInsn(*setInsn)) { + (void)cmpBB.InsertInsnBefore(*cmpInsn, *setInsn); + } + } + + /* delete condBr */ + cmpBB.RemoveInsn(*condBr); + /* Insert goto insn after csel insn. */ + if (cmpBB.GetKind() == BB::kBBGoto || cmpBB.GetKind() == BB::kBBIf) { + if (elseBB != nullptr) { + (void)cmpBB.InsertInsnAfter(*cmpBB.GetLastInsn(), *elseBB->GetLastInsn()); + } else { + DEBUG_ASSERT(ifBB != nullptr, "ifBB should not be nullptr"); + (void)cmpBB.InsertInsnAfter(*cmpBB.GetLastInsn(), *ifBB->GetLastInsn()); + } + } + + /* Insert instructions in branches after cmpInsn */ + for (auto itr = elseGenerateInsn.rbegin(); itr != elseGenerateInsn.rend(); ++itr) { + (void)cmpBB.InsertInsnAfter(*cmpInsn, **itr); + } + for (auto itr = ifGenerateInsn.rbegin(); itr != ifGenerateInsn.rend(); ++itr) { + (void)cmpBB.InsertInsnAfter(*cmpInsn, **itr); + } + + /* Remove branches and merge join */ + if (ifBB != nullptr) { + cgFunc->GetTheCFG()->RemoveBB(*ifBB); + } + if (elseBB != nullptr) { + cgFunc->GetTheCFG()->RemoveBB(*elseBB); + } + + if (cmpBB.GetKind() != BB::kBBIf && cmpBB.GetNext() == &joinBB && + !maplebe::CGCFG::InLSDA(joinBB.GetLabIdx(), *cgFunc->GetEHFunc()) && + cgFunc->GetTheCFG()->CanMerge(cmpBB, joinBB)) { + maplebe::CGCFG::MergeBB(cmpBB, joinBB, *cgFunc); + keepPosition = true; + } + return true; +} + +/* + * Find IF-THEN-ELSE or IF-THEN basic block pattern, + * and then invoke DoOpt(...) to finish optimize. + */ +bool AArch64ICOIfThenElsePattern::Optimize(BB &curBB) +{ + if (curBB.GetKind() != BB::kBBIf) { + return false; + } + BB *ifBB = nullptr; + BB *elseBB = nullptr; + BB *joinBB = nullptr; + + BB *thenDest = CGCFG::GetTargetSuc(curBB); + BB *elseDest = curBB.GetNext(); + CHECK_FATAL(thenDest != nullptr, "then_dest is null in ITEPattern::Optimize"); + CHECK_FATAL(elseDest != nullptr, "else_dest is null in ITEPattern::Optimize"); + /* IF-THEN-ELSE */ + if (thenDest->NumPreds() == 1 && thenDest->NumSuccs() == 1 && elseDest->NumSuccs() == 1 && + elseDest->NumPreds() == 1 && thenDest->GetSuccs().front() == elseDest->GetSuccs().front()) { + ifBB = thenDest; + elseBB = elseDest; + joinBB = thenDest->GetSuccs().front(); + } else if (elseDest->NumPreds() == 1 && elseDest->NumSuccs() == 1 && elseDest->GetSuccs().front() == thenDest) { + /* IF-THEN */ + ifBB = nullptr; + elseBB = elseDest; + joinBB = thenDest; + } else { + /* not a form we can handle */ + return false; + } + DEBUG_ASSERT(elseBB != nullptr, "elseBB should not be nullptr"); + if (CGCFG::InLSDA(elseBB->GetLabIdx(), *cgFunc->GetEHFunc()) || + CGCFG::InSwitchTable(elseBB->GetLabIdx(), *cgFunc)) { + return false; + } + + if (ifBB != nullptr && + (CGCFG::InLSDA(ifBB->GetLabIdx(), *cgFunc->GetEHFunc()) || CGCFG::InSwitchTable(ifBB->GetLabIdx(), *cgFunc))) { + return false; + } + return DoOpt(curBB, ifBB, elseBB, *joinBB); +} + +/* If( cmp || cmp ) then + * or + * If( cmp && cmp ) then */ +bool AArch64ICOSameCondPattern::Optimize(BB &secondIfBB) +{ + if (secondIfBB.GetKind() != BB::kBBIf || secondIfBB.NumPreds() != 1) { + return false; + } + BB *firstIfBB = secondIfBB.GetPrev(); + BB *nextBB = firstIfBB->GetNext(); + CHECK_FATAL(nextBB != nullptr, "nextBB is null in AArch64ICOSameCondPattern::Optimize"); + /* firstIfBB's nextBB is secondIfBB */ + if (firstIfBB == nullptr || firstIfBB->GetKind() != BB::kBBIf || nextBB->GetId() != secondIfBB.GetId()) { + return false; + } + return DoOpt(firstIfBB, secondIfBB); +} + +bool AArch64ICOPattern::CheckMop(MOperator mOperator) const +{ + switch (mOperator) { + case MOP_beq: + case MOP_bne: + case MOP_blt: + case MOP_ble: + case MOP_bgt: + case MOP_bge: + case MOP_blo: + case MOP_bls: + case MOP_bhs: + case MOP_bhi: + case MOP_bpl: + case MOP_bmi: + case MOP_bvc: + case MOP_bvs: + return true; + default: + return false; + } +} + +/* branchInsn1 is firstIfBB's LastCondBrInsn + * branchInsn2 is secondIfBB's LastCondBrInsn + * + * Limitations: branchInsn1 is the same as branchInsn2 + * */ +bool AArch64ICOSameCondPattern::DoOpt(BB *firstIfBB, BB &secondIfBB) +{ + Insn *branchInsn1 = cgFunc->GetTheCFG()->FindLastCondBrInsn(*firstIfBB); + DEBUG_ASSERT(branchInsn1 != nullptr, "nullptr check"); + Insn *cmpInsn1 = FindLastCmpInsn(*firstIfBB); + MOperator mOperator1 = branchInsn1->GetMachineOpcode(); + Insn *branchInsn2 = cgFunc->GetTheCFG()->FindLastCondBrInsn(secondIfBB); + DEBUG_ASSERT(branchInsn2 != nullptr, "nullptr check"); + Insn *cmpInsn2 = FindLastCmpInsn(secondIfBB); + MOperator mOperator2 = branchInsn2->GetMachineOpcode(); + if (cmpInsn1 == nullptr || cmpInsn2 == nullptr) { + return false; + } + + /* tbz and cbz will not be optimized */ + if (mOperator1 != mOperator2 || !CheckMop(mOperator1)) { + return false; + } + + /* two BB has same branch */ + std::vector labelOpnd1 = GetLabelOpnds(*branchInsn1); + std::vector labelOpnd2 = GetLabelOpnds(*branchInsn2); + if (labelOpnd1.size() != 1 || labelOpnd1.size() != 1 || + labelOpnd1[0]->GetLabelIndex() != labelOpnd2[0]->GetLabelIndex()) { + return false; + } + + /* secondifBB only has branchInsn and cmpInsn */ + FOR_BB_INSNS_REV(insn, &secondIfBB) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn != branchInsn2 && insn != cmpInsn2) { + return false; + } + } + + /* build ccmp Insn */ + ConditionCode ccCode = Encode(branchInsn1->GetMachineOpcode(), true); + DEBUG_ASSERT(ccCode != kCcLast, "unknown cond, ccCode can't be kCcLast"); + Insn *ccmpInsn = BuildCcmpInsn(ccCode, cmpInsn2); + if (ccmpInsn == nullptr) { + return false; + } + + /* insert ccmp Insn */ + firstIfBB->InsertInsnBefore(*branchInsn1, *ccmpInsn); + + /* Remove secondIfBB */ + BB *nextBB = secondIfBB.GetNext(); + cgFunc->GetTheCFG()->RemoveBB(secondIfBB); + firstIfBB->PushFrontSuccs(*nextBB); + nextBB->PushFrontPreds(*firstIfBB); + return true; +} +/* + * find the preds all is ifBB + */ +bool AArch64ICOMorePredsPattern::Optimize(BB &curBB) +{ + if (curBB.GetKind() != BB::kBBGoto) { + return false; + } + for (BB *preBB : curBB.GetPreds()) { + if (preBB->GetKind() != BB::kBBIf) { + return false; + } + } + for (BB *succsBB : curBB.GetSuccs()) { + if (succsBB->GetKind() != BB::kBBFallthru) { + return false; + } + if (succsBB->NumPreds() > 2) { + return false; + } + } + Insn *gotoBr = curBB.GetLastMachineInsn(); + DEBUG_ASSERT(gotoBr != nullptr, "gotoBr should not be nullptr"); + auto &gotoLabel = static_cast(gotoBr->GetOperand(gotoBr->GetOperandSize() - 1)); + for (BB *preBB : curBB.GetPreds()) { + Insn *condBr = cgFunc->GetTheCFG()->FindLastCondBrInsn(*preBB); + DEBUG_ASSERT(condBr != nullptr, "nullptr check"); + Operand &condBrLastOpnd = condBr->GetOperand(condBr->GetOperandSize() - 1); + DEBUG_ASSERT(condBrLastOpnd.IsLabelOpnd(), "label Operand must be exist in branch insn"); + auto &labelOpnd = static_cast(condBrLastOpnd); + if (labelOpnd.GetLabelIndex() != curBB.GetLabIdx()) { + return false; + } + if (gotoLabel.GetLabelIndex() != preBB->GetNext()->GetLabIdx()) { + /* do not if convert if 'else' clause present */ + return false; + } + } + return DoOpt(curBB); +} + +/* this BBGoto only has mov Insn and Branch */ +bool AArch64ICOMorePredsPattern::CheckGotoBB(BB &gotoBB, std::vector &movInsn) const +{ + FOR_BB_INSNS(insn, &gotoBB) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsMove()) { + movInsn.push_back(insn); + continue; + } + if (insn->GetId() != gotoBB.GetLastInsn()->GetId()) { + return false; + } else if (!insn->IsBranch()) { /* last Insn is Branch */ + return false; + } + } + return true; +} + +/* this BBGoto only has mov Insn */ +bool AArch64ICOMorePredsPattern::MovToCsel(std::vector &movInsn, std::vector &cselInsn, + const Insn &branchInsn) const +{ + Operand &branchOpnd0 = branchInsn.GetOperand(kInsnFirstOpnd); + regno_t branchRegNo; + if (branchOpnd0.IsRegister()) { + branchRegNo = static_cast(branchOpnd0).GetRegisterNumber(); + } + for (Insn *insn : movInsn) { + /* use mov build csel */ + Operand &opnd0 = insn->GetOperand(kInsnFirstOpnd); + Operand &opnd1 = insn->GetOperand(kInsnSecondOpnd); + ConditionCode ccCode = AArch64ICOPattern::Encode(branchInsn.GetMachineOpcode(), false); + DEBUG_ASSERT(ccCode != kCcLast, "unknown cond, ccCode can't be kCcLast"); + CondOperand &cond = static_cast(cgFunc)->GetCondOperand(ccCode); + Operand &rflag = static_cast(cgFunc)->GetOrCreateRflag(); + RegOperand ®Opnd0 = static_cast(opnd0); + RegOperand ®Opnd1 = static_cast(opnd1); + /* movInsn's opnd1 is Immediate */ + if (opnd1.IsImmediate()) { + return false; + } + /* opnd0 and opnd1 hsa same type and size */ + if (regOpnd0.GetSize() != regOpnd1.GetSize() || (regOpnd0.IsOfIntClass() != regOpnd1.IsOfIntClass())) { + return false; + } + /* The branchOpnd0 cannot be modified for csel. */ + regno_t movRegNo0 = static_cast(opnd0).GetRegisterNumber(); + if (branchOpnd0.IsRegister() && branchRegNo == movRegNo0) { + return false; + } + uint32 dSize = regOpnd0.GetSize(); + bool isIntTy = regOpnd0.IsOfIntClass(); + MOperator mOpCode = + isIntTy ? (dSize == k64BitSize ? MOP_xcselrrrc : MOP_wcselrrrc) + : (dSize == k64BitSize ? MOP_dcselrrrc : (dSize == k32BitSize ? MOP_scselrrrc : MOP_hcselrrrc)); + cselInsn.emplace_back(&cgFunc->GetInsnBuilder()->BuildInsn(mOpCode, opnd0, opnd1, opnd0, cond, rflag)); + } + if (cselInsn.size() < 1) { + return false; + } + return true; +} + +bool AArch64ICOMorePredsPattern::DoOpt(BB &gotoBB) +{ + std::vector movInsn; + std::vector> presCselInsn; + std::vector presBB; + Insn *branchInsn = gotoBB.GetLastMachineInsn(); + if (branchInsn == nullptr || !branchInsn->IsUnCondBranch()) { + return false; + } + /* get preds's new label */ + std::vector labelOpnd = GetLabelOpnds(*branchInsn); + if (labelOpnd.size() != 1) { + return false; + } + if (!CheckGotoBB(gotoBB, movInsn)) { + return false; + } + /* Check all preBB, Exclude gotoBBs that cannot be optimized. */ + for (BB *preBB : gotoBB.GetPreds()) { + Insn *condBr = cgFunc->GetTheCFG()->FindLastCondBrInsn(*preBB); + DEBUG_ASSERT(condBr != nullptr, "nullptr check"); + + /* tbz/cbz will not be optimized */ + MOperator mOperator = condBr->GetMachineOpcode(); + if (!CheckMop(mOperator)) { + return false; + } + std::vector cselInsn; + if (!MovToCsel(movInsn, cselInsn, *condBr)) { + return false; + } + if (cselInsn.size() < 1) { + return false; + } + presCselInsn.emplace_back(cselInsn); + presBB.emplace_back(preBB); + } + /* modifies presBB */ + for (size_t i = 0; i < presCselInsn.size(); ++i) { + BB *preBB = presBB[i]; + Insn *condBr = cgFunc->GetTheCFG()->FindLastCondBrInsn(*preBB); + std::vector cselInsn = presCselInsn[i]; + /* insert csel insn */ + for (Insn *csel : cselInsn) { + preBB->InsertInsnBefore(*condBr, *csel); + } + /* new condBr */ + condBr->SetOperand(condBr->GetOperandSize() - 1, *labelOpnd[0]); + } + /* Remove branches and merge gotoBB */ + cgFunc->GetTheCFG()->RemoveBB(gotoBB); + return true; +} + +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_insn.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_insn.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d96ce60196b71dbd763fd8a3cb512c581b9bb841 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_insn.cpp @@ -0,0 +1,589 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_insn.h" +#include "aarch64_cg.h" +#include "common_utils.h" +#include "insn.h" +#include "metadata_layout.h" +#include + +namespace maplebe { + +void A64OpndEmitVisitor::EmitIntReg(const RegOperand &v, uint8 opndSz) +{ + CHECK_FATAL(v.GetRegisterType() == kRegTyInt, "wrong Type"); + uint8 opndSize = (opndSz == kMaxSimm32) ? v.GetSize() : opndSz; + DEBUG_ASSERT((opndSize == k32BitSize || opndSize == k64BitSize), "illegal register size"); +#ifdef USE_32BIT_REF + bool r32 = (opndSize == k32BitSize) || isRefField; +#else + bool r32 = (opndSize == k32BitSize); +#endif /* USE_32BIT_REF */ + (void)emitter.Emit( + AArch64CG::intRegNames[(r32 ? AArch64CG::kR32List : AArch64CG::kR64List)][v.GetRegisterNumber()]); +} + +void A64OpndEmitVisitor::Visit(maplebe::RegOperand *v) +{ + DEBUG_ASSERT(opndProp == nullptr || opndProp->IsRegister(), "operand type doesn't match"); + uint32 size = v->GetSize(); + regno_t regNO = v->GetRegisterNumber(); + uint8 opndSize = (opndProp != nullptr) ? opndProp->GetSize() : size; + switch (v->GetRegisterType()) { + case kRegTyInt: { + EmitIntReg(*v, opndSize); + break; + } + case kRegTyFloat: { + DEBUG_ASSERT((opndSize == k8BitSize || opndSize == k16BitSize || opndSize == k32BitSize || + opndSize == k64BitSize || opndSize == k128BitSize), + "illegal register size"); + if (opndProp->IsVectorOperand() && v->GetVecLaneSize() != 0) { + EmitVectorOperand(*v); + } else { + /* FP reg cannot be reffield. 8~0, 16~1, 32~2, 64~3. 8 is 1000b, has 3 zero. */ + uint32 regSet = __builtin_ctz(static_cast(opndSize)) - 3; + (void)emitter.Emit(AArch64CG::intRegNames[regSet][regNO]); + } + break; + } + default: + DEBUG_ASSERT(false, "NYI"); + break; + } +} + +void A64OpndEmitVisitor::Visit(maplebe::ImmOperand *v) +{ + if (v->IsOfstImmediate()) { + return Visit(static_cast(v)); + } + + int64 value = v->GetValue(); + if (!v->IsFmov()) { + (void)emitter.Emit((opndProp != nullptr && opndProp->IsLoadLiteral()) ? "=" : "#") + .Emit((v->GetSize() == k64BitSize) ? value : static_cast(static_cast(value))); + return; + } + if (v->GetKind() == Operand::kOpdFPImmediate) { + CHECK_FATAL(value == 0, "NIY"); + emitter.Emit("#0.0"); + } + /* + * compute float value + * use top 4 bits expect MSB of value . then calculate its fourth power + */ + int32 exp = static_cast((((static_cast(value) & 0x70) >> 4) ^ 0x4) - 3); + /* use the lower four bits of value in this expression */ + const float mantissa = 1.0 + (static_cast(static_cast(value) & 0xf) / 16.0); + float result = std::pow(2, exp) * mantissa; + + std::stringstream ss; + ss << std::setprecision(10) << result; + std::string res; + ss >> res; + size_t dot = res.find('.'); + if (dot == std::string::npos) { + res += ".0"; + dot = res.find('.'); + CHECK_FATAL(dot != std::string::npos, "cannot find in string"); + } + (void)res.erase(dot, 1); + std::string integer(res, 0, 1); + std::string fraction(res, 1); + while (fraction.size() != 1 && fraction[fraction.size() - 1] == '0') { + fraction.pop_back(); + } + /* fetch the sign bit of this value */ + std::string sign = static_cast(value) & 0x80 ? "-" : ""; + (void)emitter.Emit(sign + integer + "." + fraction + "e+").Emit(static_cast(dot) - 1); +} + +void A64OpndEmitVisitor::Visit(maplebe::MemOperand *v) +{ + auto a64v = static_cast(v); + MemOperand::AArch64AddressingMode addressMode = a64v->GetAddrMode(); +#if DEBUG + const InsnDesc *md = &AArch64CG::kMd[emitter.GetCurrentMOP()]; + bool isLDSTpair = md->IsLoadStorePair(); + DEBUG_ASSERT(md->Is64Bit() || md->GetOperandSize() <= k32BitSize || md->GetOperandSize() == k128BitSize, + "unexpected opnd size"); +#endif + if (addressMode == MemOperand::kAddrModeBOi) { + (void)emitter.Emit("["); + auto *baseReg = v->GetBaseRegister(); + DEBUG_ASSERT(baseReg != nullptr, "expect an RegOperand here"); + uint32 baseSize = baseReg->GetSize(); + if (baseSize != k64BitSize) { + baseReg->SetSize(k64BitSize); + } + EmitIntReg(*baseReg); + baseReg->SetSize(baseSize); + OfstOperand *offset = a64v->GetOffsetImmediate(); + if (offset != nullptr) { +#ifndef USE_32BIT_REF /* can be load a ref here */ + /* + * Cortex-A57 Software Optimization Guide: + * The ARMv8-A architecture allows many types of load and store accesses to be arbitrarily aligned. + * The Cortex- A57 processor handles most unaligned accesses without performance penalties. + */ +#if DEBUG + if (a64v->IsOffsetMisaligned(md->GetOperandSize())) { + INFO(kLncInfo, "The Memory operand's offset is misaligned:", ""); + } +#endif +#endif /* USE_32BIT_REF */ + if (a64v->IsPostIndexed()) { + DEBUG_ASSERT(!a64v->IsSIMMOffsetOutOfRange(offset->GetOffsetValue(), md->Is64Bit(), isLDSTpair), + "should not be SIMMOffsetOutOfRange"); + (void)emitter.Emit("]"); + if (!offset->IsZero()) { + (void)emitter.Emit(", "); + Visit(offset); + } + } else if (a64v->IsPreIndexed()) { + DEBUG_ASSERT(!a64v->IsSIMMOffsetOutOfRange(offset->GetOffsetValue(), md->Is64Bit(), isLDSTpair), + "should not be SIMMOffsetOutOfRange"); + if (!offset->IsZero()) { + (void)emitter.Emit(","); + Visit(offset); + } + (void)emitter.Emit("]!"); + } else { + if (CGOptions::IsPIC() && (offset->IsSymOffset() || offset->IsSymAndImmOffset()) && + (offset->GetSymbol()->NeedPIC() || offset->GetSymbol()->IsThreadLocal())) { + std::string gotEntry = offset->GetSymbol()->IsThreadLocal() ? ", #:tlsdesc_lo12:" : ", #:got_lo12:"; + (void)emitter.Emit(gotEntry + offset->GetSymbolName()); + } else { + if (!offset->IsZero()) { + (void)emitter.Emit(","); + Visit(offset); + } + } + (void)emitter.Emit("]"); + } + } else { + (void)emitter.Emit("]"); + } + } else if (addressMode == MemOperand::kAddrModeBOrX) { + /* + * Base plus offset | [base{, #imm}] [base, Xm{, LSL #imm}] [base, Wm, (S|U)XTW {#imm}] + * offset_opnds=nullptr + * offset_opnds=64 offset_opnds=32 + * imm=0 or 3 imm=0 or 2, s/u + */ + (void)emitter.Emit("["); + auto *baseReg = v->GetBaseRegister(); + // After ssa version support different size, the value is changed back + baseReg->SetSize(k64BitSize); + + EmitIntReg(*baseReg); + (void)emitter.Emit(","); + EmitIntReg(*a64v->GetIndexRegister()); + if (a64v->ShouldEmitExtend() || v->GetBaseRegister()->GetSize() > a64v->GetIndexRegister()->GetSize()) { + (void)emitter.Emit(","); + /* extend, #0, of #3/#2 */ + (void)emitter.Emit(a64v->GetExtendAsString()); + if (a64v->GetExtendAsString() == "LSL" || a64v->ShiftAmount() != 0) { + (void)emitter.Emit(" #"); + (void)emitter.Emit(a64v->ShiftAmount()); + } + } + (void)emitter.Emit("]"); + } else if (addressMode == MemOperand::kAddrModeLiteral) { + CHECK_FATAL(opndProp != nullptr, "prop is nullptr in MemOperand::Emit"); + if (opndProp->IsMemLow12()) { + (void)emitter.Emit("#:lo12:"); + } + PUIdx pIdx = emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + (void)emitter.Emit(v->GetSymbol()->GetName() + std::to_string(pIdx)); + } else if (addressMode == MemOperand::kAddrModeLo12Li) { + (void)emitter.Emit("["); + EmitIntReg(*v->GetBaseRegister()); + + OfstOperand *offset = a64v->GetOffsetImmediate(); + DEBUG_ASSERT(offset != nullptr, "nullptr check"); + + (void)emitter.Emit(", #:lo12:"); + if (v->GetSymbol()->GetAsmAttr() != UStrIdx(0) && + (v->GetSymbol()->GetStorageClass() == kScPstatic || v->GetSymbol()->GetStorageClass() == kScPstatic)) { + std::string asmSection = GlobalTables::GetUStrTable().GetStringFromStrIdx(v->GetSymbol()->GetAsmAttr()); + (void)emitter.Emit(asmSection); + } else { + if (v->GetSymbol()->GetStorageClass() == kScPstatic && v->GetSymbol()->IsLocal()) { + PUIdx pIdx = emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + (void)emitter.Emit(a64v->GetSymbolName() + std::to_string(pIdx)); + } else { + (void)emitter.Emit(a64v->GetSymbolName()); + } + } + if (!offset->IsZero()) { + (void)emitter.Emit("+"); + (void)emitter.Emit(std::to_string(offset->GetOffsetValue())); + } + (void)emitter.Emit("]"); + } else { + DEBUG_ASSERT(false, "nyi"); + } +} + +void A64OpndEmitVisitor::Visit(LabelOperand *v) +{ + emitter.EmitLabelRef(v->GetLabelIndex()); +} + +void A64OpndEmitVisitor::Visit(CondOperand *v) +{ + (void)emitter.Emit(CondOperand::ccStrs[v->GetCode()]); +} + +void A64OpndEmitVisitor::Visit(ExtendShiftOperand *v) +{ + DEBUG_ASSERT(v->GetShiftAmount() <= k4BitSize && v->GetShiftAmount() >= 0, + "shift amount out of range in ExtendShiftOperand"); + auto emitExtendShift = [this, v](const std::string &extendKind) -> void { + (void)emitter.Emit(extendKind); + if (v->GetShiftAmount() != 0) { + (void)emitter.Emit(" #").Emit(v->GetShiftAmount()); + } + }; + switch (v->GetExtendOp()) { + case ExtendShiftOperand::kUXTB: + emitExtendShift("UXTB"); + break; + case ExtendShiftOperand::kUXTH: + emitExtendShift("UXTH"); + break; + case ExtendShiftOperand::kUXTW: + emitExtendShift("UXTW"); + break; + case ExtendShiftOperand::kUXTX: + emitExtendShift("UXTX"); + break; + case ExtendShiftOperand::kSXTB: + emitExtendShift("SXTB"); + break; + case ExtendShiftOperand::kSXTH: + emitExtendShift("SXTH"); + break; + case ExtendShiftOperand::kSXTW: + emitExtendShift("SXTW"); + break; + case ExtendShiftOperand::kSXTX: + emitExtendShift("SXTX"); + break; + default: + DEBUG_ASSERT(false, "should not be here"); + break; + } +} + +void A64OpndEmitVisitor::Visit(BitShiftOperand *v) +{ + (void)emitter + .Emit((v->GetShiftOp() == BitShiftOperand::kLSL) + ? "LSL #" + : ((v->GetShiftOp() == BitShiftOperand::kLSR) ? "LSR #" : "ASR #")) + .Emit(v->GetShiftAmount()); +} + +void A64OpndEmitVisitor::Visit(StImmOperand *v) +{ + CHECK_FATAL(opndProp != nullptr, "opndProp is nullptr in StImmOperand::Emit"); + const MIRSymbol *symbol = v->GetSymbol(); + const bool isThreadLocal = symbol->IsThreadLocal(); + const bool isLiteralLow12 = opndProp->IsLiteralLow12(); + const bool hasGotEntry = CGOptions::IsPIC() && symbol->NeedPIC(); + bool hasPrefix = false; + if (isThreadLocal) { + (void)emitter.Emit(":tlsdesc"); + hasPrefix = true; + } + if (!hasPrefix && hasGotEntry) { + (void)emitter.Emit(":got"); + hasPrefix = true; + } + if (isLiteralLow12) { + std::string lo12String = hasPrefix ? "_lo12" : ":lo12"; + (void)emitter.Emit(lo12String); + hasPrefix = true; + } + if (hasPrefix) { + (void)emitter.Emit(":"); + } + if (symbol->GetAsmAttr() != UStrIdx(0) && + (symbol->GetStorageClass() == kScPstatic || symbol->GetStorageClass() == kScPstatic)) { + std::string asmSection = GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->GetAsmAttr()); + (void)emitter.Emit(asmSection); + } else { + if (symbol->GetStorageClass() == kScPstatic && symbol->GetSKind() != kStConst && symbol->IsLocal()) { + (void)emitter.Emit(symbol->GetName() + + std::to_string(emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx())); + } else { + (void)emitter.Emit(v->GetName()); + } + } + if (!hasGotEntry && v->GetOffset() != 0) { + (void)emitter.Emit("+" + std::to_string(v->GetOffset())); + } +} + +void A64OpndEmitVisitor::Visit(FuncNameOperand *v) +{ + (void)emitter.Emit(v->GetName()); +} + +void A64OpndEmitVisitor::Visit(CommentOperand *v) +{ + (void)emitter.Emit(v->GetComment()); +} + +void A64OpndEmitVisitor::Visit(ListOperand *v) +{ + (void)opndProp; + size_t nLeft = v->GetOperands().size(); + if (nLeft == 0) { + return; + } + + for (auto it = v->GetOperands().begin(); it != v->GetOperands().end(); ++it) { + Visit(*it); + if (--nLeft >= 1) { + (void)emitter.Emit(", "); + } + } +} + +void A64OpndEmitVisitor::Visit(OfstOperand *v) +{ + int64 value = v->GetValue(); + if (v->IsImmOffset()) { + (void)emitter.Emit((opndProp != nullptr && opndProp->IsLoadLiteral()) ? "=" : "#") + .Emit((v->GetSize() == k64BitSize) ? value : static_cast(static_cast(value))); + return; + } + const MIRSymbol *symbol = v->GetSymbol(); + if (CGOptions::IsPIC() && symbol->NeedPIC()) { + (void)emitter.Emit(":got:" + symbol->GetName()); + } else if (symbol->GetStorageClass() == kScPstatic && symbol->GetSKind() != kStConst && symbol->IsLocal()) { + (void)emitter.Emit(symbol->GetName() + + std::to_string(emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx())); + } else { + (void)emitter.Emit(symbol->GetName()); + } + if (value != 0) { + (void)emitter.Emit("+" + std::to_string(value)); + } +} + +void A64OpndEmitVisitor::EmitVectorOperand(const RegOperand &v) +{ + std::string width; + switch (v.GetVecElementSize()) { + case k8BitSize: + width = "b"; + break; + case k16BitSize: + width = "h"; + break; + case k32BitSize: + width = "s"; + break; + case k64BitSize: + width = "d"; + break; + default: + CHECK_FATAL(false, "unexpected value size for vector element"); + break; + } + (void)emitter.Emit(AArch64CG::vectorRegNames[v.GetRegisterNumber()]); + int32 lanePos = v.GetVecLanePosition(); + if (lanePos == -1) { + (void)emitter.Emit("." + std::to_string(v.GetVecLaneSize()) + width); + } else { + (void)emitter.Emit("." + width + "[" + std::to_string(lanePos) + "]"); + } +} + +void A64OpndDumpVisitor::Visit(RegOperand *v) +{ + std::array prims = {"U", "R", "V", "C", "X", "Vra"}; + std::array classes = {"[U]", "[I]", "[F]", "[CC]", "[X87]", "[Vra]"}; + uint32 regType = v->GetRegisterType(); + DEBUG_ASSERT(regType < kRegTyLast, "unexpected regType"); + + regno_t reg = v->GetRegisterNumber(); + reg = v->IsVirtualRegister() ? reg : (reg - 1); + uint32 vb = v->GetValidBitsNum(); + LogInfo::MapleLogger() << (v->IsVirtualRegister() ? "vreg:" : " reg:") << prims[regType] << reg << " " + << classes[regType]; + if (v->GetValidBitsNum() != v->GetSize()) { + LogInfo::MapleLogger() << " Vb: [" << vb << "]"; + } + LogInfo::MapleLogger() << " Sz: [" << v->GetSize() << "]"; +} + +void A64OpndDumpVisitor::Visit(ImmOperand *v) +{ + LogInfo::MapleLogger() << "imm:" << v->GetValue(); +} + +void A64OpndDumpVisitor::Visit(MemOperand *a64v) +{ + LogInfo::MapleLogger() << "Mem:"; + LogInfo::MapleLogger() << " size:" << a64v->GetSize() << " "; + LogInfo::MapleLogger() << " isStack:" << a64v->IsStackMem() << "-" << a64v->IsStackArgMem() << " "; + switch (a64v->GetAddrMode()) { + case MemOperand::kAddrModeBOi: { + LogInfo::MapleLogger() << "base:"; + Visit(a64v->GetBaseRegister()); + LogInfo::MapleLogger() << "offset:"; + Visit(a64v->GetOffsetOperand()); + switch (a64v->GetIndexOpt()) { + case MemOperand::kIntact: + LogInfo::MapleLogger() << " intact"; + break; + case MemOperand::kPreIndex: + LogInfo::MapleLogger() << " pre-index"; + break; + case MemOperand::kPostIndex: + LogInfo::MapleLogger() << " post-index"; + break; + default: + break; + } + break; + } + case MemOperand::kAddrModeBOrX: { + LogInfo::MapleLogger() << "base:"; + Visit(a64v->GetBaseRegister()); + LogInfo::MapleLogger() << "offset:"; + Visit(a64v->GetIndexRegister()); + LogInfo::MapleLogger() << " " << a64v->GetExtendAsString(); + LogInfo::MapleLogger() << " shift: " << a64v->ShiftAmount(); + LogInfo::MapleLogger() << " extend: " << a64v->GetExtendAsString(); + break; + } + case MemOperand::kAddrModeLiteral: + LogInfo::MapleLogger() << "literal: " << a64v->GetSymbolName(); + break; + case MemOperand::kAddrModeLo12Li: { + LogInfo::MapleLogger() << "base:"; + Visit(a64v->GetBaseRegister()); + LogInfo::MapleLogger() << "offset:"; + OfstOperand *offOpnd = a64v->GetOffsetImmediate(); + LogInfo::MapleLogger() << "#:lo12:"; + if (a64v->GetSymbol()->GetStorageClass() == kScPstatic && a64v->GetSymbol()->IsLocal()) { + PUIdx pIdx = CG::GetCurCGFunc()->GetMirModule().CurFunction()->GetPuidx(); + LogInfo::MapleLogger() << a64v->GetSymbolName() << std::to_string(pIdx); + } else { + LogInfo::MapleLogger() << a64v->GetSymbolName(); + } + LogInfo::MapleLogger() << "+" << std::to_string(offOpnd->GetOffsetValue()); + break; + } + default: + DEBUG_ASSERT(false, "error memoperand dump"); + break; + } +} + +void A64OpndDumpVisitor::Visit(CondOperand *v) +{ + LogInfo::MapleLogger() << "CC: " << CondOperand::ccStrs[v->GetCode()]; +} +void A64OpndDumpVisitor::Visit(StImmOperand *v) +{ + LogInfo::MapleLogger() << v->GetName(); + LogInfo::MapleLogger() << "+offset:" << v->GetOffset(); +} +void A64OpndDumpVisitor::Visit(BitShiftOperand *v) +{ + BitShiftOperand::ShiftOp shiftOp = v->GetShiftOp(); + uint32 shiftAmount = v->GetShiftAmount(); + LogInfo::MapleLogger() << ((shiftOp == BitShiftOperand::kLSL) + ? "LSL: " + : ((shiftOp == BitShiftOperand::kLSR) ? "LSR: " : "ASR: ")); + LogInfo::MapleLogger() << shiftAmount; +} +void A64OpndDumpVisitor::Visit(ExtendShiftOperand *v) +{ + auto dumpExtendShift = [v](const std::string &extendKind) -> void { + LogInfo::MapleLogger() << extendKind; + if (v->GetShiftAmount() != 0) { + LogInfo::MapleLogger() << " : " << v->GetShiftAmount(); + } + }; + switch (v->GetExtendOp()) { + case ExtendShiftOperand::kUXTB: + dumpExtendShift("UXTB"); + break; + case ExtendShiftOperand::kUXTH: + dumpExtendShift("UXTH"); + break; + case ExtendShiftOperand::kUXTW: + dumpExtendShift("UXTW"); + break; + case ExtendShiftOperand::kUXTX: + dumpExtendShift("UXTX"); + break; + case ExtendShiftOperand::kSXTB: + dumpExtendShift("SXTB"); + break; + case ExtendShiftOperand::kSXTH: + dumpExtendShift("SXTH"); + break; + case ExtendShiftOperand::kSXTW: + dumpExtendShift("SXTW"); + break; + case ExtendShiftOperand::kSXTX: + dumpExtendShift("SXTX"); + break; + default: + DEBUG_ASSERT(false, "should not be here"); + break; + } +} +void A64OpndDumpVisitor::Visit(LabelOperand *v) +{ + LogInfo::MapleLogger() << "label:" << v->GetLabelIndex(); +} +void A64OpndDumpVisitor::Visit(FuncNameOperand *v) +{ + LogInfo::MapleLogger() << "func :" << v->GetName(); +} +void A64OpndDumpVisitor::Visit(CommentOperand *v) +{ + LogInfo::MapleLogger() << " #" << v->GetComment(); +} +void A64OpndDumpVisitor::Visit(PhiOperand *v) +{ + auto &phiList = v->GetOperands(); + for (auto it = phiList.begin(); it != phiList.end();) { + Visit(it->second); + LogInfo::MapleLogger() << " fBB<" << it->first << ">"; + LogInfo::MapleLogger() << (++it == phiList.end() ? "" : " ,"); + } +} +void A64OpndDumpVisitor::Visit(ListOperand *v) +{ + auto &opndList = v->GetOperands(); + for (auto it = opndList.begin(); it != opndList.end();) { + Visit(*it); + LogInfo::MapleLogger() << (++it == opndList.end() ? "" : " ,"); + } +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_isa.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_isa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c599f61bfee5da1537ab86fc126b65358652d14d --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_isa.cpp @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_isa.h" +#include "insn.h" + +namespace maplebe { +/* + * Get the ldp/stp corresponding to ldr/str + * mop : a ldr or str machine operator + */ +MOperator GetMopPair(MOperator mop) +{ + switch (mop) { + case MOP_xldr: + return MOP_xldp; + case MOP_wldr: + return MOP_wldp; + case MOP_xstr: + return MOP_xstp; + case MOP_wstr: + return MOP_wstp; + case MOP_dldr: + return MOP_dldp; + case MOP_qldr: + return MOP_qldp; + case MOP_sldr: + return MOP_sldp; + case MOP_dstr: + return MOP_dstp; + case MOP_sstr: + return MOP_sstp; + case MOP_qstr: + return MOP_qstp; + default: + DEBUG_ASSERT(false, "should not run here"); + return MOP_undef; + } +} +namespace AArch64isa { +MOperator FlipConditionOp(MOperator flippedOp) +{ + switch (flippedOp) { + case AArch64MopT::MOP_beq: + return AArch64MopT::MOP_bne; + case AArch64MopT::MOP_bge: + return AArch64MopT::MOP_blt; + case AArch64MopT::MOP_bgt: + return AArch64MopT::MOP_ble; + case AArch64MopT::MOP_bhi: + return AArch64MopT::MOP_bls; + case AArch64MopT::MOP_bhs: + return AArch64MopT::MOP_blo; + case AArch64MopT::MOP_ble: + return AArch64MopT::MOP_bgt; + case AArch64MopT::MOP_blo: + return AArch64MopT::MOP_bhs; + case AArch64MopT::MOP_bls: + return AArch64MopT::MOP_bhi; + case AArch64MopT::MOP_blt: + return AArch64MopT::MOP_bge; + case AArch64MopT::MOP_bne: + return AArch64MopT::MOP_beq; + case AArch64MopT::MOP_bpl: + return AArch64MopT::MOP_bmi; + case AArch64MopT::MOP_xcbnz: + return AArch64MopT::MOP_xcbz; + case AArch64MopT::MOP_wcbnz: + return AArch64MopT::MOP_wcbz; + case AArch64MopT::MOP_xcbz: + return AArch64MopT::MOP_xcbnz; + case AArch64MopT::MOP_wcbz: + return AArch64MopT::MOP_wcbnz; + case AArch64MopT::MOP_wtbnz: + return AArch64MopT::MOP_wtbz; + case AArch64MopT::MOP_wtbz: + return AArch64MopT::MOP_wtbnz; + case AArch64MopT::MOP_xtbnz: + return AArch64MopT::MOP_xtbz; + case AArch64MopT::MOP_xtbz: + return AArch64MopT::MOP_xtbnz; + default: + break; + } + return AArch64MopT::MOP_undef; +} + +uint32 GetJumpTargetIdx(const Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + switch (curMop) { + /* unconditional jump */ + case MOP_xuncond: { + return kInsnFirstOpnd; + } + case MOP_xbr: { + DEBUG_ASSERT(insn.GetOperandSize() == 2, "ERR"); + return kInsnSecondOpnd; + } + /* conditional jump */ + case MOP_bmi: + case MOP_bvc: + case MOP_bls: + case MOP_blt: + case MOP_ble: + case MOP_blo: + case MOP_beq: + case MOP_bpl: + case MOP_bhs: + case MOP_bvs: + case MOP_bhi: + case MOP_bgt: + case MOP_bge: + case MOP_bne: + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: { + return kInsnSecondOpnd; + } + case MOP_wtbz: + case MOP_xtbz: + case MOP_wtbnz: + case MOP_xtbnz: { + return kInsnThirdOpnd; + } + default: + CHECK_FATAL(false, "Not a jump insn"); + } + return kInsnFirstOpnd; +} +} /* namespace AArch64isa */ +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_live.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_live.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e470c231ca88889f6294cdad29798f5f4318f986 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_live.cpp @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_live.h" +#include "aarch64_cg.h" + +namespace maplebe { +void AArch64LiveAnalysis::GenerateReturnBBDefUse(BB &bb) const +{ + PrimType returnType = cgFunc->GetFunction().GetReturnType()->GetPrimType(); + auto *aarchCGFunc = static_cast(cgFunc); + if (IsPrimitiveFloat(returnType)) { + Operand &phyOpnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(V0), k64BitSize, kRegTyFloat); + CollectLiveInfo(bb, phyOpnd, false, true); + } else if (IsPrimitiveInteger(returnType)) { + Operand &phyOpnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(R0), k64BitSize, kRegTyInt); + CollectLiveInfo(bb, phyOpnd, false, true); + } +} + +void AArch64LiveAnalysis::InitEhDefine(BB &bb) +{ + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + + /* Insert MOP_pseudo_eh_def_x R1. */ + RegOperand ®R1 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); + Insn &pseudoInsn1 = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_eh_def_x, regR1); + bb.InsertInsnBegin(pseudoInsn1); + + /* Insert MOP_pseudo_eh_def_x R0. */ + RegOperand ®R0 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + Insn &pseudoInsn2 = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_eh_def_x, regR0); + bb.InsertInsnBegin(pseudoInsn2); +} + +bool AArch64LiveAnalysis::CleanupBBIgnoreReg(regno_t reg) +{ + regno_t regNO = reg + R0; + if (regNO < R8 || (RLR <= regNO && regNO <= RZR)) { + return true; + } + return false; +} + +void AArch64LiveAnalysis::ProcessCallInsnParam(BB &bb, const Insn &insn) const +{ + /* R0 ~ R7(R0 + 0 ~ R0 + 7) and V0 ~ V7 (V0 + 0 ~ V0 + 7) is parameter register */ + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + auto *targetOpnd = insn.GetCallTargetOperand(); + CHECK_FATAL(targetOpnd != nullptr, "target is null in Insn::IsCallToFunctionThatNeverReturns"); + if (CGOptions::DoIPARA() && targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + DEBUG_ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + for (auto preg : func->GetReferedRegs()) { + if (AArch64Abi::IsCalleeSavedReg(static_cast(preg))) { + continue; + } + RegOperand *opnd = &aarchCGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(preg), k64BitSize, + AArch64isa::IsFPSIMDRegister(static_cast(preg)) ? kRegTyFloat : kRegTyInt); + CollectLiveInfo(bb, *opnd, true, false); + } + return; + } + } + for (uint32 i = 0; i < 8; ++i) { + Operand &phyOpndR = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(R0 + i), k64BitSize, kRegTyInt); + CollectLiveInfo(bb, phyOpndR, true, false); + Operand &phyOpndV = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(V0 + i), k64BitSize, kRegTyFloat); + CollectLiveInfo(bb, phyOpndV, true, false); + } +} + +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_memlayout.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_memlayout.cpp new file mode 100644 index 0000000000000000000000000000000000000000..56e8748036cdd9c5647d8264fa92401c5d70e117 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_memlayout.cpp @@ -0,0 +1,585 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_memlayout.h" +#include "aarch64_cgfunc.h" +#include "becommon.h" +#include "mir_nodes.h" + +namespace maplebe { +using namespace maple; + +/* + * Returns stack space required for a call + * which is used to pass arguments that cannot be + * passed through registers + */ +uint32 AArch64MemLayout::ComputeStackSpaceRequirementForCall(StmtNode &stmt, int32 &aggCopySize, bool isIcall) +{ + /* instantiate a parm locator */ + CCImpl &parmLocator = *static_cast(cgFunc)->GetOrCreateLocator(CCImpl::GetCallConvKind(stmt)); + uint32 sizeOfArgsToStkPass = 0; + size_t i = 0; + /* An indirect call's first operand is the invocation target */ + if (isIcall) { + ++i; + } + + if (std::strcmp(stmt.GetOpName(), "call") == 0) { + CallNode *callNode = static_cast(&stmt); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + CHECK_FATAL(fn != nullptr, "get MIRFunction failed"); + MIRSymbol *symbol = be.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(fn->GetStIdx(), false); + if (symbol->GetName() == "MCC_CallFastNative" || symbol->GetName() == "MCC_CallFastNativeExt" || + symbol->GetName() == "MCC_CallSlowNative0" || symbol->GetName() == "MCC_CallSlowNative1" || + symbol->GetName() == "MCC_CallSlowNative2" || symbol->GetName() == "MCC_CallSlowNative3" || + symbol->GetName() == "MCC_CallSlowNative4" || symbol->GetName() == "MCC_CallSlowNative5" || + symbol->GetName() == "MCC_CallSlowNative6" || symbol->GetName() == "MCC_CallSlowNative7" || + symbol->GetName() == "MCC_CallSlowNative8" || symbol->GetName() == "MCC_CallSlowNativeExt") { + ++i; + } + } + + aggCopySize = 0; + for (uint32 anum = 0; i < stmt.NumOpnds(); ++i, ++anum) { + BaseNode *opnd = stmt.Opnd(i); + MIRType *ty = nullptr; + if (opnd->GetPrimType() != PTY_agg) { + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(opnd->GetPrimType())]; + } else { + Opcode opndOpcode = opnd->GetOpCode(); + DEBUG_ASSERT(opndOpcode == OP_dread || opndOpcode == OP_iread, "opndOpcode should be OP_dread or OP_iread"); + if (opndOpcode == OP_dread) { + DreadNode *dread = static_cast(opnd); + MIRSymbol *sym = be.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread->GetStIdx()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + if (dread->GetFieldID() != 0) { + DEBUG_ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass || + ty->GetKind() == kTypeUnion, + "expect struct or class"); + if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) { + ty = static_cast(ty)->GetFieldType(dread->GetFieldID()); + } else { + ty = static_cast(ty)->GetFieldType(dread->GetFieldID()); + } + } + } else { + /* OP_iread */ + IreadNode *iread = static_cast(opnd); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx()); + DEBUG_ASSERT(ty->GetKind() == kTypePointer, "expect pointer"); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(ty)->GetPointedTyIdx()); + if (iread->GetFieldID() != 0) { + DEBUG_ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass || + ty->GetKind() == kTypeUnion, + "expect struct or class"); + if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) { + ty = static_cast(ty)->GetFieldType(iread->GetFieldID()); + } else { + ty = static_cast(ty)->GetFieldType(iread->GetFieldID()); + } + } + } + } + CCLocInfo ploc; + aggCopySize += parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { + continue; /* passed in register, so no effect on actual area */ + } + sizeOfArgsToStkPass = RoundUp(ploc.memOffset + ploc.memSize, GetPointerSize()); + } + return sizeOfArgsToStkPass; +} + +void AArch64MemLayout::SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const +{ + if (be.GetTypeSize(typeIdx) > k16ByteSize) { + /* size > 16 is passed on stack, the formal is just a pointer to the copy on stack. */ + if (CGOptions::IsArm64ilp32()) { + align = k8ByteSize; + size = k8ByteSize; + } else { + align = GetPointerSize(); + size = GetPointerSize(); + } + } else { + align = be.GetTypeAlign(typeIdx); + size = static_cast(be.GetTypeSize(typeIdx)); + } +} + +void AArch64MemLayout::SetSegmentSize(AArch64SymbolAlloc &symbolAlloc, MemSegment &segment, uint32 typeIdx) const +{ + uint32 size; + uint32 align; + SetSizeAlignForTypeIdx(typeIdx, size, align); + segment.SetSize(static_cast(RoundUp(static_cast(segment.GetSize()), align))); + symbolAlloc.SetOffset(segment.GetSize()); + segment.SetSize(segment.GetSize() + size); + segment.SetSize(static_cast(RoundUp(static_cast(segment.GetSize()), GetPointerSize()))); +} + +void AArch64MemLayout::LayoutVarargParams() +{ + uint32 nIntRegs = 0; + uint32 nFpRegs = 0; + AArch64CallConvImpl parmlocator(be); + CCLocInfo ploc; + MIRFunction *func = mirFunction; + if (be.GetMIRModule().IsCModule() && func->GetAttr(FUNCATTR_varargs)) { + for (uint32 i = 0; i < func->GetFormalCount(); i++) { + if (i == 0) { + if (func->IsFirstArgReturn() && func->GetReturnType()->GetPrimType() != PTY_void) { + TyIdx tyIdx = func->GetFuncRetStructTyIdx(); + if (be.GetTypeSize(tyIdx.GetIdx()) <= k16ByteSize) { + continue; + } + } + } + MIRType *ty = func->GetNthParamType(i); + CHECK_FATAL(mirFunction->GetAttr(FUNCATTR_ccall), "only c calling convention support here"); + parmlocator.LocateNextParm(*ty, ploc, i == 0, func); + if (ploc.reg0 != kRinvalid) { + if (ploc.reg0 >= R0 && ploc.reg0 <= R7) { + nIntRegs++; + } else if (ploc.reg0 >= V0 && ploc.reg0 <= V7) { + nFpRegs++; + } + } + if (ploc.reg1 != kRinvalid) { + if (ploc.reg1 >= R0 && ploc.reg1 <= R7) { + nIntRegs++; + } else if (ploc.reg1 >= V0 && ploc.reg1 <= V7) { + nFpRegs++; + } + } + if (ploc.reg2 != kRinvalid) { + if (ploc.reg2 >= R0 && ploc.reg2 <= R7) { + nIntRegs++; + } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) { + nFpRegs++; + } + } + if (ploc.reg3 != kRinvalid) { + if (ploc.reg3 >= R0 && ploc.reg3 <= R7) { + nIntRegs++; + } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) { + nFpRegs++; + } + } + } + if (CGOptions::IsArm64ilp32()) { + SetSizeOfGRSaveArea((k8BitSize - nIntRegs) * k8ByteSize); + } else { + SetSizeOfGRSaveArea((k8BitSize - nIntRegs) * GetPointerSize()); + } + if (CGOptions::UseGeneralRegOnly()) { + SetSizeOfVRSaveArea(0); + } else { + if (CGOptions::IsArm64ilp32()) { + SetSizeOfVRSaveArea((k8BitSize - nFpRegs) * k8ByteSize * k2ByteSize); + } else { + SetSizeOfVRSaveArea((k8BitSize - nFpRegs) * GetPointerSize() * k2ByteSize); + } + } + } +} + +void AArch64MemLayout::LayoutFormalParams() +{ + bool isLmbc = (be.GetMIRModule().GetFlavor() == kFlavorLmbc); + if (isLmbc && mirFunction->GetFormalCount() == 0) { + /* + * lmbc : upformalsize - size of formals passed from caller's frame into current function + * framesize - total frame size of current function used by Maple IR + * outparmsize - portion of frame size of current function used by call parameters + */ + segArgsStkPassed.SetSize(mirFunction->GetOutParmSize()); + segArgsRegPassed.SetSize(mirFunction->GetOutParmSize()); + return; + } + + CCImpl &parmLocator = *static_cast(cgFunc)->GetOrCreateLocator(cgFunc->GetCurCallConvKind()); + CCLocInfo ploc; + for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) { + MIRSymbol *sym = mirFunction->GetFormal(i); + uint32 stIndex = sym->GetStIndex(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + if (i == 0) { + if (mirFunction->IsReturnStruct() && mirFunction->IsFirstArgReturn()) { + symLoc->SetMemSegment(GetSegArgsRegPassed()); + symLoc->SetOffset(GetSegArgsRegPassed().GetSize()); + TyIdx tyIdx = mirFunction->GetFuncRetStructTyIdx(); + if (be.GetTypeSize(tyIdx.GetIdx()) > k16ByteSize) { + if (CGOptions::IsArm64ilp32()) { + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + k8ByteSize); + } else { + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + GetPointerSize()); + } + } + continue; + } + } + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirFunction->GetFormalDefVec()[i].formalTyIdx); + uint32 ptyIdx = ty->GetTypeIndex(); + parmLocator.LocateNextParm(*ty, ploc, i == 0, mirFunction); + if (ploc.reg0 != kRinvalid) { /* register */ + symLoc->SetRegisters(static_cast(ploc.reg0), static_cast(ploc.reg1), + static_cast(ploc.reg2), static_cast(ploc.reg3)); + if (!cgFunc->GetMirModule().IsCModule() && mirFunction->GetNthParamAttr(i).GetAttr(ATTR_localrefvar)) { + symLoc->SetMemSegment(segRefLocals); + SetSegmentSize(*symLoc, segRefLocals, ptyIdx); + } else if (!sym->IsPreg()) { + uint32 size; + uint32 align; + SetSizeAlignForTypeIdx(ptyIdx, size, align); + symLoc->SetMemSegment(GetSegArgsRegPassed()); + /* the type's alignment requirement may be smaller than a registser's byte size */ + if (ty->GetPrimType() == PTY_agg) { + /* struct param aligned on 8 byte boundary unless it is small enough */ + if (CGOptions::IsArm64ilp32()) { + align = k8ByteSize; + } else { + align = GetPointerSize(); + } + } + uint32 tSize = 0; + if ((IsPrimitiveVector(ty->GetPrimType()) && GetPrimTypeSize(ty->GetPrimType()) > k8ByteSize) || + AArch64Abi::IsVectorArrayType(ty, tSize) != PTY_void) { + align = k16ByteSize; + } + segArgsRegPassed.SetSize(static_cast(RoundUp(segArgsRegPassed.GetSize(), align))); + symLoc->SetOffset(segArgsRegPassed.GetSize()); + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + size); + } else if (isLmbc) { + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + k8ByteSize); + } + } else { /* stack */ + uint32 size; + uint32 align; + SetSizeAlignForTypeIdx(ptyIdx, size, align); + symLoc->SetMemSegment(GetSegArgsStkPassed()); + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), align))); + symLoc->SetOffset(segArgsStkPassed.GetSize()); + segArgsStkPassed.SetSize(segArgsStkPassed.GetSize() + size); + /* We need it as dictated by the AArch64 ABI $5.4.2 C12 */ + if (CGOptions::IsArm64ilp32()) { + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), k8ByteSize))); + } else { + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize()))); + } + if (!cgFunc->GetMirModule().IsCModule() && mirFunction->GetNthParamAttr(i).GetAttr(ATTR_localrefvar)) { + SetLocalRegLocInfo(sym->GetStIdx(), *symLoc); + AArch64SymbolAlloc *symLoc1 = memAllocator->GetMemPool()->New(); + symLoc1->SetMemSegment(segRefLocals); + SetSegmentSize(*symLoc1, segRefLocals, ptyIdx); + SetSymAllocInfo(stIndex, *symLoc1); + } + } + if (cgFunc->GetCG()->GetCGOptions().WithDwarf() && ploc.reg0 == kRinvalid) { + cgFunc->AddDIESymbolLocation(sym, symLoc); + } + } +} + +void AArch64MemLayout::LayoutLocalVariables(std::vector &tempVar, std::vector &returnDelays) +{ + if (be.GetMIRModule().GetFlavor() == kFlavorLmbc) { + segLocals.SetSize(mirFunction->GetFrameSize() - mirFunction->GetOutParmSize()); + return; + } + + uint32 symTabSize = mirFunction->GetSymTab()->GetSymbolTableSize(); + for (uint32 i = 0; i < symTabSize; ++i) { + MIRSymbol *sym = mirFunction->GetSymTab()->GetSymbolFromStIdx(i); + if (sym == nullptr || sym->GetStorageClass() != kScAuto || sym->IsDeleted()) { + continue; + } + uint32 stIndex = sym->GetStIndex(); + TyIdx tyIdx = sym->GetTyIdx(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + CHECK_FATAL(!symLoc->IsRegister(), "expect not register"); + + if (sym->IsRefType()) { + if (mirFunction->GetRetRefSym().find(sym) != mirFunction->GetRetRefSym().end()) { + /* try to put ret_ref at the end of segRefLocals */ + returnDelays.emplace_back(sym); + continue; + } + symLoc->SetMemSegment(segRefLocals); + segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx))); + symLoc->SetOffset(segRefLocals.GetSize()); + segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx)); + } else { + if (sym->GetName() == "__EARetTemp__" || sym->GetName().substr(0, kEARetTempNameSize) == "__EATemp__") { + tempVar.emplace_back(sym); + continue; + } + symLoc->SetMemSegment(segLocals); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + uint32 align = be.GetTypeAlign(tyIdx); + uint32 tSize = 0; + if ((IsPrimitiveVector(ty->GetPrimType()) && GetPrimTypeSize(ty->GetPrimType()) > k8ByteSize) || + AArch64Abi::IsVectorArrayType(ty, tSize) != PTY_void) { + align = k16ByteSize; + } + if (ty->GetPrimType() == PTY_agg && align < k8BitSize) { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), k8BitSize))); + } else { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), align))); + } + symLoc->SetOffset(segLocals.GetSize()); + segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(tyIdx)); + } + if (cgFunc->GetCG()->GetCGOptions().WithDwarf()) { + cgFunc->AddDIESymbolLocation(sym, symLoc); + } + } +} + +void AArch64MemLayout::LayoutEAVariales(std::vector &tempVar) +{ + for (auto sym : tempVar) { + uint32 stIndex = sym->GetStIndex(); + TyIdx tyIdx = sym->GetTyIdx(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + DEBUG_ASSERT(!symLoc->IsRegister(), "expect not register"); + symLoc->SetMemSegment(segRefLocals); + segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx))); + symLoc->SetOffset(segRefLocals.GetSize()); + segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx)); + } +} + +void AArch64MemLayout::LayoutReturnRef(std::vector &returnDelays, int32 &structCopySize, + int32 &maxParmStackSize) +{ + for (auto sym : returnDelays) { + uint32 stIndex = sym->GetStIndex(); + TyIdx tyIdx = sym->GetTyIdx(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + DEBUG_ASSERT(!symLoc->IsRegister(), "expect not register"); + + DEBUG_ASSERT(sym->IsRefType(), "expect reftype "); + symLoc->SetMemSegment(segRefLocals); + segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx))); + symLoc->SetOffset(segRefLocals.GetSize()); + segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx)); + } + if (be.GetMIRModule().GetFlavor() == kFlavorLmbc) { + segArgsToStkPass.SetSize(mirFunction->GetOutParmSize() + kDivide2 * k8ByteSize); + } else { + segArgsToStkPass.SetSize(FindLargestActualArea(structCopySize)); + } + maxParmStackSize = static_cast(segArgsToStkPass.GetSize()); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + AssignSpillLocationsToPseudoRegisters(); + } else { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + /* 8-VirtualRegNode occupy byte number */ + aarchCGFunc->SetCatchRegno(cgFunc->NewVReg(kRegTyInt, 8)); + } + segRefLocals.SetSize(static_cast(RoundUp(segRefLocals.GetSize(), GetPointerSize()))); + if (CGOptions::IsArm64ilp32()) { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), k8ByteSize))); + } else { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), GetPointerSize()))); + } +} + +void AArch64MemLayout::LayoutActualParams() +{ + for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) { + if (i == 0) { + if (mirFunction->IsReturnStruct() && mirFunction->IsFirstArgReturn()) { + continue; + } + } + MIRSymbol *sym = mirFunction->GetFormal(i); + if (sym->IsPreg()) { + continue; + } + uint32 stIndex = sym->GetStIndex(); + AArch64SymbolAlloc *symLoc = static_cast(GetSymAllocInfo(stIndex)); + if (symLoc->GetMemSegment() == &GetSegArgsRegPassed()) { /* register */ + /* + * In O0, we store parameters passed via registers into memory. + * So, each of such parameter needs to get assigned storage in stack. + * If a function parameter is never accessed in the function body, + * and if we don't create its memory operand here, its offset gets + * computed when the instruction to store its value into stack + * is generated in the prologue when its memory operand is created. + * But, the parameter would see a different StackFrameSize than + * the parameters that are accessed in the body, because + * the size of the storage for FP/LR is added to the stack frame + * size in between. + * To make offset assignment easier, we create a memory operand + * for each of function parameters in advance. + * This has to be done after all of formal parameters and local + * variables get assigned their respecitve storage, i.e. + * CallFrameSize (discounting callee-saved and FP/LR) is known. + */ + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirFunction->GetFormalDefVec()[i].formalTyIdx); + uint32 ptyIdx = ty->GetTypeIndex(); + static_cast(cgFunc)->GetOrCreateMemOpnd(*sym, 0, be.GetTypeAlign(ptyIdx) * kBitsPerByte); + } + } +} + +void AArch64MemLayout::LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) +{ + LayoutVarargParams(); + LayoutFormalParams(); + /* + * We do need this as LDR/STR with immediate + * requires imm be aligned at a 8/4-byte boundary, + * and local varirables may need 8-byte alignment. + */ + if (CGOptions::IsArm64ilp32()) { + segArgsRegPassed.SetSize(RoundUp(segArgsRegPassed.GetSize(), k8ByteSize)); + /* we do need this as SP has to be aligned at a 16-bytes bounardy */ + segArgsStkPassed.SetSize(RoundUp(segArgsStkPassed.GetSize(), k8ByteSize + k8ByteSize)); + } else { + segArgsRegPassed.SetSize(RoundUp(segArgsRegPassed.GetSize(), GetPointerSize())); + segArgsStkPassed.SetSize(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize() + GetPointerSize())); + } + /* allocate the local variables in the stack */ + std::vector EATempVar; + std::vector retDelays; + LayoutLocalVariables(EATempVar, retDelays); + LayoutEAVariales(EATempVar); + + /* handle ret_ref sym now */ + LayoutReturnRef(retDelays, structCopySize, maxParmStackSize); + + /* + * for the actual arguments that cannot be pass through registers + * need to allocate space for caller-save registers + */ + LayoutActualParams(); + + fixStackSize = static_cast(RealStackFrameSize()); + cgFunc->SetUseFP(cgFunc->UseFP() || fixStackSize > kMaxPimm32); +} + +void AArch64MemLayout::AssignSpillLocationsToPseudoRegisters() +{ + MIRPregTable *pregTab = cgFunc->GetFunction().GetPregTab(); + + /* BUG: n_regs include index 0 which is not a valid preg index. */ + size_t nRegs = pregTab->Size(); + spillLocTable.resize(nRegs); + for (size_t i = 1; i < nRegs; ++i) { + PrimType pType = pregTab->PregFromPregIdx(i)->GetPrimType(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + symLoc->SetMemSegment(segLocals); + segLocals.SetSize(RoundUp(segLocals.GetSize(), GetPrimTypeSize(pType))); + symLoc->SetOffset(segLocals.GetSize()); + MIRType *mirTy = GlobalTables::GetTypeTable().GetTypeTable()[pType]; + segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(mirTy->GetTypeIndex())); + spillLocTable[i] = symLoc; + } + + if (!cgFunc->GetMirModule().IsJavaModule()) { + return; + } + + /* + * Allocate additional stack space for "thrownval". + * segLocals need 8 bit align + */ + if (CGOptions::IsArm64ilp32()) { + segLocals.SetSize(RoundUp(segLocals.GetSize(), k8ByteSize)); + } else { + segLocals.SetSize(RoundUp(segLocals.GetSize(), GetPointerSize())); + } + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + RegOperand &baseOpnd = aarchCGFunc->GetOrCreateStackBaseRegOperand(); + int32 offset = static_cast(segLocals.GetSize()); + + OfstOperand *offsetOpnd = &aarchCGFunc->CreateOfstOpnd(offset + k16BitSize, k64BitSize); + MemOperand *throwMem = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, k64BitSize, baseOpnd, + static_cast(nullptr), offsetOpnd, nullptr); + aarchCGFunc->SetCatchOpnd(*throwMem); + if (CGOptions::IsArm64ilp32()) { + segLocals.SetSize(segLocals.GetSize() + k8ByteSize); + } else { + segLocals.SetSize(segLocals.GetSize() + GetPointerSize()); + } +} + +uint64 AArch64MemLayout::StackFrameSize() const +{ + uint64 total = segArgsRegPassed.GetSize() + static_cast(cgFunc)->SizeOfCalleeSaved() + + GetSizeOfRefLocals() + locals().GetSize() + GetSizeOfSpillReg() + + cgFunc->GetFunction().GetFrameReseverdSlot(); + + if (cgFunc->GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + if (GetSizeOfGRSaveArea() > 0) { + total += RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment); + } + if (GetSizeOfVRSaveArea() > 0) { + total += RoundUp(GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment); + } + } + + /* + * if the function does not have VLA nor alloca, + * we allocate space for arguments to stack-pass + * in the call frame; otherwise, it has to be allocated for each call and reclaimed afterward. + */ + total += segArgsToStkPass.GetSize(); + return RoundUp(total, kAarch64StackPtrAlignment); +} + +uint32 AArch64MemLayout::RealStackFrameSize() const +{ + auto size = StackFrameSize(); + if (cgFunc->GetCG()->IsStackProtectorStrong() || cgFunc->GetCG()->IsStackProtectorAll()) { + size += static_cast(kAarch64StackPtrAlignment); + } + return static_cast(size); +} + +int32 AArch64MemLayout::GetRefLocBaseLoc() const +{ + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + auto beforeSize = GetSizeOfLocals(); + if (aarchCGFunc->UsedStpSubPairForCallFrameAllocation()) { + return static_cast(beforeSize); + } + return static_cast(beforeSize + kSizeOfFplr); +} + +int32 AArch64MemLayout::GetGRSaveAreaBaseLoc() +{ + int32 total = static_cast(RealStackFrameSize() - RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment)); + total -= static_cast(SizeOfArgsToStackPass()) + cgFunc->GetFunction().GetFrameReseverdSlot(); + return total; +} + +int32 AArch64MemLayout::GetVRSaveAreaBaseLoc() +{ + int32 total = + static_cast((RealStackFrameSize() - RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment)) - + RoundUp(GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment)); + total -= static_cast(SizeOfArgsToStackPass()) + cgFunc->GetFunction().GetFrameReseverdSlot(); + return total; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_obj_emitter.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_obj_emitter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3c96f257efa6cc1380bc208d397e2c2875654641 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_obj_emitter.cpp @@ -0,0 +1,1732 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_obj_emitter.h" +#include "aarch64_isa.h" + +namespace { +enum ShiftNumber : maple::uint8 { + kShiftFour = 4, + kShiftFive = 5, + kShiftSix = 6, + kShiftEight = 8, + kShiftTen = 10, + kShiftTwelve = 12, + kShiftThirteen = 13, + kShiftFifteen = 15, + kShiftSixteen = 16, + kShiftNineteen = 19, + kShiftTwenty = 20, + kShiftTwentyOne = 21, + kShiftTwentyTwo = 22, + kShiftTwentyFour = 24, + kShiftTwentyNine = 29, +}; + +enum ShiftTypeValue : maple::uint32 { + kShiftLSL = 0, + kShiftLSR = 1, + kShiftASR = 2, +}; + +/* from armv8 manual C1.2.3 */ +maple::uint8 ccEncode[maplebe::kCcLast] = { +#define CONDCODE(a, encode) encode, +#include "aarch64_cc.def" +#undef CONDCODE +}; +}; // namespace + +namespace maplebe { +/* fixup b .label, b(cond) .label, ldr label insn */ +void AArch64ObjFuncEmitInfo::HandleLocalBranchFixup(const std::vector &label2Offset, + const std::vector &symbol2Offset) +{ + for (auto *fixup : localFixups) { + uint32 useOffset = fixup->GetOffset(); + uint32 useLabelIndex = fixup->GetLabelIndex(); + uint32 defOffset = label2Offset[useLabelIndex]; + + FixupKind fixupKind = fixup->GetFixupKind(); + if (defOffset == 0xFFFFFFFFULL) { + CHECK_FATAL(static_cast(fixupKind) == kAArch64LoadPCRelImm19, "fixup is not local"); + } + if (static_cast(fixupKind) == kAArch64CondBranchPCRelImm19 || + static_cast(fixupKind) == kAArch64CompareBranchPCRelImm19) { + uint32 pcRelImm = (defOffset - useOffset) >> k2BitSize; + uint32 mask = 0x7FFFF; +#ifdef EMIT_DEBUG + LogInfo::MapleLogger() << "contents: " << std::hex << GetTextDataElem32(useOffset) << "\n"; +#endif + CHECK_FATAL(useOffset < textData.size(), "out of range"); + uint32 newValue = GetTextDataElem32(useOffset) | ((pcRelImm & mask) << kShiftFive); + SwapTextData(&newValue, useOffset, sizeof(uint32)); +#ifdef EMIT_DEBUG + LogInfo::MapleLogger() << "after contents: " << std::hex << GetTextDataElem32(useOffset) << "\n"; +#endif + } else if (static_cast(fixupKind) == kAArch64UnCondBranchPCRelImm26) { + uint32 pcRelImm = (defOffset - useOffset) >> k2BitSize; + uint32 mask = 0x3FFFFFF; +#ifdef EMIT_DEBUG + LogInfo::MapleLogger() << "contents: " << std::hex << GetTextDataElem32(useOffset) << "\n"; +#endif + CHECK_FATAL(useOffset < textData.size(), "out of vector size!"); + uint32 newValue = GetTextDataElem32(useOffset) | (pcRelImm & mask); + SwapTextData(&newValue, useOffset, sizeof(uint32)); +#ifdef EMIT_DEBUG + LogInfo::MapleLogger() << "after contents: " << std::hex << GetTextDataElem32(useOffset) << "\n"; +#endif + } else if (static_cast(fixupKind) == kAArch64TestBranchPCRelImm14) { + uint32 pcRelImm = (defOffset - useOffset) >> k2BitSize; + uint32 mask = 0x3FFF; + CHECK_FATAL(useOffset < textData.size(), "out of vector size"); + uint32 newValue = GetTextDataElem32(useOffset) | ((pcRelImm & mask) << kShiftFive); + SwapTextData(&newValue, useOffset, sizeof(uint32)); + } else if (static_cast(fixupKind) == kAArch64LoadPCRelImm19) { + defOffset = symbol2Offset[useLabelIndex]; + uint32 pcRelImm = (defOffset - useOffset) >> k2BitSize; + uint32 mask = 0x7FFFF; + uint32 newValue = GetTextDataElem32(useOffset) | ((pcRelImm & mask) << kShiftFive); + SwapTextData(&newValue, useOffset, sizeof(uint32)); + } + } + localFixups.clear(); +} + +void AArch64ObjEmitter::HandleTextSectionGlobalFixup() +{ + for (auto *content : contents) { + if (content == nullptr) { + continue; + } + for (auto *fixup : content->GetGlobalFixups()) { + if (fixup->GetFixupKind() == kLSDAFixup) { + HandleLSDAFixup(*content, *fixup); + continue; + } + switch (static_cast(fixup->GetFixupKind())) { + case kAArch64CallPCRelImm26: { + HandleCallFixup(*content, *fixup); + break; + } + case kAArch64PCRelAdrImm21: { + HandleAdrFixup(*content, *fixup); + break; + } + default: + DEBUG_ASSERT(false, "unsupported FixupKind"); + break; + } + } + } +} + +void AArch64ObjEmitter::HandleTextSectionFixup() +{ + relaSection = memPool->New(".rela.text", SHT_RELA, SHF_INFO_LINK, textSection->GetIndex(), 8, + *symbolTabSection, *this, *memPool); + for (auto *content : contents) { + if (content == nullptr) { + continue; + } + for (auto *fixup : content->GetGlobalFixups()) { + switch (static_cast(fixup->GetFixupKind())) { + case kAArch64CallPCRelImm26: { + auto nameIndex = strTabSection->AddString(fixup->GetLabel()); + symbolTabSection->AppendSymbol({static_cast(nameIndex), + static_cast((STB_GLOBAL << kShiftFour) + (STT_NOTYPE & 0xf)), + 0, 0, 0, 0}); + symbolTabSection->AppendIdxInSymbols(0); // 0: temporarily + uint32 relOffset = fixup->GetRelOffset(); + uint32 offset = fixup->GetOffset(); + uint64 type = R_AARCH64_CALL26; + relaSection->AppendRela( + {offset, static_cast((symbolTabSection->GetIdxInSymbols(0) << 32) + (type & 0xffffffff)), + relOffset}); + break; + } + case kAArch64PCRelAdrpImm21: { + uint32 relOffset = fixup->GetRelOffset(); + uint32 offset = fixup->GetOffset(); + uint64 type = R_AARCH64_ADR_PREL_PG_HI21; + int64 rodataSecSymIdx = ~rodataSection->GetIndex() + 1; + relaSection->AppendRela( + {offset, + static_cast((symbolTabSection->GetIdxInSymbols(rodataSecSymIdx) << 32) + + (type & 0xffffffff)), + relOffset}); + break; + } + case kAArch64PCRelAdrImm21: { + break; + } + case kAArch64LdrPCRelLo12: + case kAArch64AddPCRelLo12: { + int32 relOffset = fixup->GetRelOffset(); + uint32 offset = fixup->GetOffset(); + uint64 type = R_AARCH64_ADD_ABS_LO12_NC; + int64 rodataSecSymIdx = ~rodataSection->GetIndex() + 1; + relaSection->AppendRela( + {offset, + static_cast((symbolTabSection->GetIdxInSymbols(rodataSecSymIdx) << 32) + + (type & 0xffffffff)), + relOffset}); + break; + } + default: + DEBUG_ASSERT(false, "unsupported FixupKind"); + break; + } + } + } +} + +void AArch64ObjEmitter::HandleCallFixup(ObjFuncEmitInfo &funcEmitInfo, const Fixup &fixup) +{ + AArch64ObjFuncEmitInfo &objFuncEmitInfo = static_cast(funcEmitInfo); + uint32 useOffset = objFuncEmitInfo.GetStartOffset() + fixup.GetOffset(); + const std::string &funcName = fixup.GetLabel(); + auto str2objSymbolItr = globalLabel2Offset.find(funcName); + if (str2objSymbolItr != globalLabel2Offset.end()) { + uint32 defOffset = str2objSymbolItr->second.offset; + uint32 pcRelImm = (defOffset - useOffset) >> k2BitSize; + uint32 newValue = objFuncEmitInfo.GetTextDataElem32(fixup.GetOffset()) | (pcRelImm & 0x3FFFFFF); + objFuncEmitInfo.SwapTextData(&newValue, fixup.GetOffset(), sizeof(uint32)); + } +} + +void AArch64ObjEmitter::HandleAdrFixup(ObjFuncEmitInfo &funcEmitInfo, const Fixup &fixup) +{ + AArch64ObjFuncEmitInfo &objFuncEmitInfo = static_cast(funcEmitInfo); + uint32 useOffset = objFuncEmitInfo.GetStartOffset() + fixup.GetOffset(); + const std::string &label = fixup.GetLabel(); + auto str2objSymbolItr = globalLabel2Offset.find(label); + if (str2objSymbolItr != globalLabel2Offset.end()) { + uint32 defOffset = str2objSymbolItr->second.offset + fixup.GetRelOffset(); + uint32 pcRelImm = defOffset - useOffset; + uint32 immLow = (pcRelImm & 0x3) << kShiftTwentyNine; + uint32 immHigh = ((pcRelImm >> k2BitSize) & 0x7FFFF) << kShiftFive; + uint32 newValue = objFuncEmitInfo.GetTextDataElem32(fixup.GetOffset()) | immLow | immHigh; + objFuncEmitInfo.SwapTextData(&newValue, fixup.GetOffset(), sizeof(uint32)); + } +} + +void AArch64ObjEmitter::HandleLSDAFixup(ObjFuncEmitInfo &funcEmitInfo, const Fixup &fixup) +{ + AArch64ObjFuncEmitInfo &objFuncEmitInfo = static_cast(funcEmitInfo); + uint32 value = objFuncEmitInfo.GetExceptStartOffset() - objFuncEmitInfo.GetStartOffset(); + objFuncEmitInfo.SwapTextData(&value, fixup.GetOffset(), sizeof(uint32)); +} + +void AArch64ObjEmitter::AppendTextSectionData() +{ + auto &contents = GetContents(); + for (auto *content : contents) { + if (content == nullptr) { + continue; + } + MapleVector funcTextData = content->GetTextData(); + textSection->AppendData(funcTextData); + } +} + +void AArch64ObjEmitter::AppendGlobalLabel() +{ + auto &contents = GetContents(); + uint32 offset = 0; + for (auto *content : contents) { + if (content == nullptr) { + continue; + } + content->SetStartOffset(offset); + ObjLabel objLabel = {offset, content->GetTextDataSize()}; + std::string funcName(content->GetFuncName().c_str()); + const auto &emitMemoryManager = CGOptions::GetInstance().GetEmitMemoryManager(); + if (emitMemoryManager.funcAddressSaver != nullptr) { + emitMemoryManager.funcAddressSaver(emitMemoryManager.codeSpace, funcName, offset); + } + if (emitMemoryManager.codeSpace != nullptr) { + const auto &offset2StackMapInfo = content->GetOffset2StackMapInfo(); + for (const auto &elem : offset2StackMapInfo) { + const auto &stackMapInfo = elem.second; + emitMemoryManager.pc2CallSiteInfoSaver( + emitMemoryManager.codeSpace, content->GetStartOffset() + elem.first, stackMapInfo.referenceMap); + emitMemoryManager.pc2DeoptInfoSaver(emitMemoryManager.codeSpace, content->GetStartOffset() + elem.first, + stackMapInfo.deoptInfo); + } + } + + offset += content->GetTextDataSize(); + RegisterGlobalLabel(funcName, objLabel); + /* register all the start of switch table */ + const MapleMap &switchTableOffset = content->GetSwitchTableOffset(); + for (auto &elem : switchTableOffset) { + ObjLabel switchTableLabel = {elem.second + content->GetStartOffset(), 0}; + RegisterGlobalLabel(elem.first.c_str(), switchTableLabel); + } + } +} + +void AArch64ObjEmitter::AppendSymsToSymTabSec() +{ + // section symbol + AddSymbol(".text", textSection->GetDataSize(), *textSection, 0); + /* Indexed by the inverse of the section index. */ + symbolTabSection->AppendIdxInSymbols(~textSection->GetIndex() + 1); + AddSymbol(".data", dataSection->GetDataSize(), *dataSection, 0); + symbolTabSection->AppendIdxInSymbols(~dataSection->GetIndex() + 1); + + Address offset = 0; + auto &contents = GetContents(); + for (auto *content : contents) { + if (content == nullptr) { + continue; + } + // func symbol + AddFuncSymbol(content->GetFuncName(), content->GetTextData().size(), offset); + offset += content->GetTextData().size(); + } +} + +void AArch64ObjEmitter::InitSections() +{ + (void)memPool->New(" ", SHT_NULL, 0, 0, *this, *memPool); + textSection = memPool->New(".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, 4, *this, *memPool); + dataSection = memPool->New(".data", SHT_PROGBITS, SHF_WRITE | SHF_ALLOC, 8, *this, *memPool); + strTabSection = memPool->New(".strtab", SHT_STRTAB, 0, 1, *this, *memPool); + symbolTabSection = + memPool->New(".symtab", SHT_SYMTAB, 0, sizeof(Symbol), *this, *memPool, *strTabSection); + shStrSection = memPool->New(".shstrtab", SHT_STRTAB, 0, 1, *this, *memPool); +} + +void AArch64ObjEmitter::LayoutSections() +{ + /* Init elf file header */ + InitELFHeader(); + globalOffset = sizeof(FileHeader); + globalOffset = Alignment::Align(globalOffset, k8ByteSize); + + globalAddr = globalOffset; + + for (auto *section : sections) { + section->SetSectionHeaderNameIndex(static_cast(shStrSection->AddString(section->GetName()))); + } + + for (auto *section : sections) { + globalOffset = Alignment::Align(globalOffset, section->GetAlign()); + globalAddr = Alignment::Align
(globalAddr, section->GetAlign()); + section->Layout(); + } + + globalOffset = Alignment::Align(globalOffset, k8ByteSize); + header.e_shoff = globalOffset; + header.e_phnum = 0; + header.e_shnum = sections.size(); +} + +void AArch64ObjEmitter::UpdateMachineAndFlags(FileHeader &header) +{ + header.e_machine = EM_AARCH64; + header.e_flags = 0; +} + +/* input insn, ang get the binary code of insn */ +uint32 AArch64ObjEmitter::GetBinaryCodeForInsn(const Insn &insn, const std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) const +{ + const InsnDesc &md = AArch64CG::kMd[insn.GetMachineOpcode()]; + uint32 binInsn = md.GetMopEncode(); + switch (md.GetEncodeType()) { + case kMovReg: + return GenMovReg(insn); + + case kMovImm: + return GenMovImm(insn); + + case kAddSubExtendReg: + return binInsn | GenAddSubExtendRegInsn(insn); + + case kAddSubImm: + return binInsn | GenAddSubImmInsn(insn); + + case kAddSubShiftImm: + return binInsn | GenAddSubShiftImmInsn(insn); + + case kAddSubReg: + return binInsn | GenAddSubRegInsn(insn); + + case kAddSubShiftReg: + return binInsn | GenAddSubShiftRegInsn(insn); + + case kBitfield: { + if (insn.GetMachineOpcode() == MOP_xuxtw64) { + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftSixteen; + opnd |= (0b11111 << kShiftFive) | AArch64CG::kMd[MOP_wiorrrr].GetMopEncode(); + return opnd; + } + return binInsn | GenBitfieldInsn(insn); + } + + case kExtract: + return binInsn | GenExtractInsn(insn); + + case kBranchImm: + return binInsn | GenBranchImmInsn(insn, label2Offset, objFuncEmitInfo); + + case kBranchReg: + return binInsn | GenBranchRegInsn(insn); + + case kCompareBranch: + return binInsn | GenCompareBranchInsn(insn, objFuncEmitInfo); + + case kCondCompareImm: + return binInsn | GenCondCompareImmInsn(insn); + + case kCondCompareReg: + return binInsn | GenCondCompareRegInsn(insn); + + case kConditionalSelect: + return binInsn | GenConditionalSelectInsn(insn); + + case kDataProcess1Src: + return binInsn | GenDataProcess1SrcInsn(insn); + + case kDataProcess2Src: + return binInsn | GenDataProcess2SrcInsn(insn); + + case kDataProcess3Src: + return binInsn | GenDataProcess3SrcInsn(insn); + + case kFloatIntConversions: + return binInsn | GenFloatIntConversionsInsn(insn); + + case kFloatCompare: + return binInsn | GenFloatCompareInsn(insn); + + case kFloatDataProcessing1: + return binInsn | GenFloatDataProcessing1Insn(insn); + + case kFloatDataProcessing2: + return binInsn | GenFloatDataProcessing2Insn(insn); + + case kFloatImm: + return binInsn | GenFloatImmInsn(insn); + + case kFloatCondSelect: + return binInsn | GenFloatCondSelectInsn(insn); + + case kLoadStoreReg: + return GenLoadStoreRegInsn(insn, objFuncEmitInfo); + + case kLoadStoreAR: + return binInsn | GenLoadStoreARInsn(insn); + + case kLoadExclusive: + return binInsn | GenLoadExclusiveInsn(insn); + + case kLoadExclusivePair: + return binInsn | GenLoadExclusivePairInsn(insn); + + case kStoreExclusive: + return binInsn | GenStoreExclusiveInsn(insn); + + case kStoreExclusivePair: + return binInsn | GenStoreExclusivePairInsn(insn); + + case kLoadPair: + return binInsn | GenLoadPairInsn(insn); + + case kStorePair: + return binInsn | GenStorePairInsn(insn); + + case kLoadStoreFloat: + return GenLoadStoreFloatInsn(insn, objFuncEmitInfo); + + case kLoadPairFloat: + return binInsn | GenLoadPairFloatInsn(insn); + + case kStorePairFloat: + return binInsn | GenStorePairFloatInsn(insn); + + case kLoadLiteralReg: + return binInsn | GenLoadLiteralRegInsn(insn, objFuncEmitInfo); + + case kLogicalReg: + return binInsn | GenLogicalRegInsn(insn); + + case kLogicalImm: + return binInsn | GenLogicalImmInsn(insn); + + case kMoveWide: + return binInsn | GenMoveWideInsn(insn); + + case kPCRelAddr: + return binInsn | GenPCRelAddrInsn(insn, objFuncEmitInfo); + + case kAddPCRelAddr: + return binInsn | GenAddPCRelAddrInsn(insn, objFuncEmitInfo); + + case kSystemInsn: + return binInsn | GenSystemInsn(insn); + + case kTestBranch: + return binInsn | GenTestBranchInsn(insn, objFuncEmitInfo); + + case kCondBranch: + return binInsn | GenCondBranchInsn(insn, objFuncEmitInfo); + + case kUnknownEncodeType: + break; + + default: + break; + } + return binInsn; +} + +/* get binary code of operand */ +uint32 AArch64ObjEmitter::GetOpndMachineValue(const Operand &opnd) const +{ + if (opnd.IsRegister()) { + const RegOperand ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (regNO == kRFLAG) { + return 0; + } + if (regOpnd.IsOfIntClass()) { + if (regOpnd.GetRegisterNumber() == RZR) { + return regNO - R0 - 2; + } + if (regOpnd.GetRegisterNumber() == RSP) { + return regNO - R0 - 1; + } + return regNO - R0; + } + return regNO - V0; + } else if (opnd.IsImmediate()) { + return static_cast(static_cast(opnd).GetValue()); + } else if (opnd.IsConditionCode()) { + const CondOperand &condOpnd = static_cast(opnd); + return static_cast(ccEncode[condOpnd.GetCode()]); + } else if (opnd.IsOpdExtend()) { + const ExtendShiftOperand &exendOpnd = static_cast(opnd); + uint32 shift = exendOpnd.GetShiftAmount(); + DEBUG_ASSERT(exendOpnd.GetExtendOp() == ExtendShiftOperand::kSXTW, "support kSXTW only!"); + uint32 option = 0x30; + return option | shift; + } else { + CHECK_FATAL(false, "not supported operand type currently"); + } +} + +uint32 AArch64ObjEmitter::GetAdrLabelOpndValue(const Insn &insn, const Operand &opnd, + ObjFuncEmitInfo &objFuncEmitInfo) const +{ + FixupKind fixupKind = + (insn.GetMachineOpcode() == MOP_xadrp) ? FixupKind(kAArch64PCRelAdrpImm21) : FixupKind(kAArch64PCRelAdrImm21); + if (opnd.IsMemoryAccessOperand()) { + const MemOperand &memOpnd = static_cast(opnd); + Fixup *fixup = memPool->New(memOpnd.GetSymbolName(), 0, objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendGlobalFixups(*fixup); + } else if (opnd.IsStImmediate()) { + const StImmOperand &stOpnd = static_cast(opnd); + Fixup *fixup = + memPool->New(stOpnd.GetName(), stOpnd.GetOffset(), objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendGlobalFixups(*fixup); + } else { + CHECK_FATAL(opnd.IsImmediate(), "check kind failed"); + } + return 0; +} + +uint32 AArch64ObjEmitter::GetLoadLiteralOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const +{ + FixupKind fixupKind = FixupKind(kAArch64LoadPCRelImm19); + CHECK_FATAL(opnd.IsLabelOpnd(), "check literal kind failed"); + const LabelOperand &label = static_cast(opnd); + LocalFixup *fixup = memPool->New(label.GetLabelIndex(), objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendLocalFixups(*fixup); + return 0; +} + +uint32 AArch64ObjEmitter::GetCondBranchOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const +{ + FixupKind fixupKind = FixupKind(kAArch64CondBranchPCRelImm19); + uint32 labelIndex = static_cast(opnd).GetLabelIndex(); + LocalFixup *fixup = memPool->New(labelIndex, objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendLocalFixups(*fixup); + return 0; +} + +uint32 AArch64ObjEmitter::GetUnCondBranchOpndValue(const Operand &opnd, const std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) const +{ + uint32 defOffset = label2Offset[static_cast(opnd).GetLabelIndex()]; + if (defOffset != 0xFFFFFFFFULL) { + uint32 useOffset = objFuncEmitInfo.GetTextDataSize(); + uint32 pcRelImm = (defOffset - useOffset) >> k2BitSize; + return (pcRelImm & 0x3FFFFFF); + } + + FixupKind fixupKind = FixupKind(kAArch64UnCondBranchPCRelImm26); + uint32 labelIndex = static_cast(opnd).GetLabelIndex(); + LocalFixup *fixup = memPool->New(labelIndex, objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendLocalFixups(*fixup); + return 0; +} + +uint32 AArch64ObjEmitter::GetCallFuncOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const +{ + const FuncNameOperand &funcNameOpnd = static_cast(opnd); + const MIRSymbol *funcSymbol = funcNameOpnd.GetFunctionSymbol(); + FixupKind fixupKind = FixupKind(kAArch64CallPCRelImm26); + + Fixup *fixup = memPool->New(funcNameOpnd.GetName(), 0, objFuncEmitInfo.GetTextDataSize(), fixupKind); + if (funcSymbol->IsGlobal()) { + objFuncEmitInfo.AppendGlobalFixups(*fixup); + } + return 0; +} + +uint32 AArch64ObjEmitter::GetCompareBranchOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const +{ + FixupKind fixupKind = FixupKind(kAArch64CompareBranchPCRelImm19); + uint32 labelIndex = static_cast(opnd).GetLabelIndex(); + LocalFixup *fixup = memPool->New(labelIndex, objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendLocalFixups(*fixup); + return 0; +} + +uint32 AArch64ObjEmitter::GetTestBranchOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const +{ + FixupKind fixupKind = FixupKind(kAArch64TestBranchPCRelImm14); + uint32 labelIndex = static_cast(opnd).GetLabelIndex(); + LocalFixup *fixup = memPool->New(labelIndex, objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendLocalFixups(*fixup); + return 0; +} + +uint32 AArch64ObjEmitter::GetLo12LitrealOpndValue(MOperator mOp, const Operand &opnd, + ObjFuncEmitInfo &objFuncEmitInfo) const +{ + FixupKind fixupKind = (mOp == MOP_xadrpl12) ? FixupKind(kAArch64AddPCRelLo12) : FixupKind(kAArch64LdrPCRelLo12); + if (opnd.IsMemoryAccessOperand()) { + const MemOperand &memOpnd = static_cast(opnd); + uint32 offset = 0; + if (memOpnd.GetOffsetImmediate() != nullptr) { + offset = static_cast(memOpnd.GetOffsetImmediate()->GetOffsetValue()); + } + Fixup *fixup = + memPool->New(memOpnd.GetSymbolName(), offset, objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendGlobalFixups(*fixup); + } else { + CHECK_FATAL(opnd.IsStImmediate(), "check opnd kind"); + const StImmOperand &stOpnd = static_cast(opnd); + Fixup *fixup = + memPool->New(stOpnd.GetName(), stOpnd.GetOffset(), objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendGlobalFixups(*fixup); + } + return 0; +} + +uint32 AArch64ObjEmitter::GenMovReg(const Insn &insn) const +{ + Operand &opnd1 = insn.GetOperand(kInsnFirstOpnd); + Operand &opnd2 = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(opnd1.IsRegister(), "opnd1 must be a register"); + DEBUG_ASSERT(opnd2.IsRegister(), "opnd2 must be a register"); + uint32 opCode = 0; + if (static_cast(opnd1).GetRegisterNumber() == RSP || + static_cast(opnd2).GetRegisterNumber() == RSP) { + if (insn.GetMachineOpcode() == MOP_xmovrr) { + const InsnDesc &md = AArch64CG::kMd[MOP_xaddrri12]; + opCode = md.GetMopEncode(); + } else { + DEBUG_ASSERT(insn.GetMachineOpcode() == MOP_wmovrr, "support MOP_wmovrr Currently!"); + const InsnDesc &md = AArch64CG::kMd[MOP_waddrri12]; + opCode = md.GetMopEncode(); + } + /* Rd */ + uint32 opnd = opCode | GetOpndMachineValue(opnd1); + /* Rn */ + opnd |= GetOpndMachineValue(opnd2) << kShiftFive; + return opnd; + } else { + if (insn.GetMachineOpcode() == MOP_xmovrr) { + const InsnDesc &md = AArch64CG::kMd[MOP_xiorrrr]; + opCode = md.GetMopEncode(); + } else { + DEBUG_ASSERT(insn.GetMachineOpcode() == MOP_wmovrr, "support MOP_wmovrr Currently!"); + const InsnDesc &md = AArch64CG::kMd[MOP_wiorrrr]; + opCode = md.GetMopEncode(); + } + /* Rd */ + uint32 opnd = opCode | GetOpndMachineValue(opnd1); + /* Rn */ + opnd |= GetOpndMachineValue(opnd2) << kShiftSixteen; + /* Rm */ + uint32 zr = 0x1f; /* xzr / wzr */ + opnd |= zr << kShiftFive; + return opnd; + } +} + +uint32 AArch64ObjEmitter::GenMovImm(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + ImmOperand &immOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + uint32 immSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); + uint64 immValue = static_cast(immOpnd.GetValue()); + bool isMovz = IsMoveWidableImmediate(immValue, immSize); + bool isMovn = IsMoveWidableImmediate(~immValue, immSize); + if (isMovz || isMovn) { + if (!isMovz) { + immValue = ~immValue; + } + uint32 hwFlag = 0; + if (immSize == k32BitSize) { + auto &md = isMovz ? AArch64CG::kMd[MOP_wmovzri16] : AArch64CG::kMd[MOP_wmovnri16]; + opnd |= md.GetMopEncode(); + immValue = static_cast(immValue); + uint32 bitFieldValue = 0xFFFF; + if (((static_cast(immValue)) & (bitFieldValue << k16BitSize)) != 0) { + hwFlag = 1; + } + } else { + DEBUG_ASSERT(immSize == k64BitSize, "support 64 bit only!"); + auto &md = isMovz ? AArch64CG::kMd[MOP_xmovzri16] : AArch64CG::kMd[MOP_xmovnri16]; + opnd |= md.GetMopEncode(); + uint64 bitFieldValue = 0xFFFF; + /* hw is 00b, 01b, 10b, or 11b */ + for (hwFlag = 0; hwFlag < 4; ++hwFlag) { + if (immValue & (bitFieldValue << (k16BitSize * hwFlag))) { + break; + } + } + } + opnd |= ((static_cast(immValue >> (hwFlag * k16BitSize))) << kShiftFive); + opnd |= (hwFlag << kShiftTwentyOne); + } else { + if (immSize == k32BitSize) { + auto &md = AArch64CG::kMd[MOP_wiorrri12]; + opnd |= md.GetMopEncode(); + } else { + DEBUG_ASSERT(immSize == k64BitSize, "support 64 bit only!"); + auto &md = AArch64CG::kMd[MOP_xiorrri13]; + opnd |= md.GetMopEncode(); + } + uint64 value = static_cast(immOpnd.GetValue()); + uint32 size = (immSize == k32BitSize) ? k32BitSize : k64BitSize; + opnd |= EncodeLogicaImm(value, size) << kShiftTen; + opnd |= (0x1FU << kShiftFive); + } + + return opnd; +} + +uint32 AArch64ObjEmitter::GenAddSubExtendRegInsn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + /* Extend */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)) << kShiftTen; + return opnd; +} + +uint32 AArch64ObjEmitter::GenAddPCRelAddrInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Imm */ + opnd |= GetLo12LitrealOpndValue(insn.GetMachineOpcode(), insn.GetOperand(kInsnThirdOpnd), objFuncEmitInfo) + << kShiftTen; + return opnd; +} + +uint32 AArch64ObjEmitter::GenAddSubImmInsn(const Insn &insn) const +{ + uint32 operandSize = 4; // subs insn + int32 index = insn.GetOperandSize() == operandSize ? 1 : 0; + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd + index)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd + index)) << kShiftFive; + /* Imm */ + uint32 immValue = GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd + index)); + if ((immValue & (0xFFFU)) == 0 && ((immValue & (0xFFFU << kShiftTwelve))) != 0) { + opnd |= (1U << kShiftTwentyTwo); + immValue >>= kShiftTwelve; + } + opnd |= (immValue << kShiftTen); + return opnd; +} + +uint32 AArch64ObjEmitter::GenAddSubShiftImmInsn(const Insn &insn) const +{ + uint32 operandSize = 5; // subs insn + int32 index = insn.GetOperandSize() == operandSize ? 1 : 0; + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd + index)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd + index)) << kShiftFive; + /* Imm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd + index)) << kShiftTen; + /* Shift */ + BitShiftOperand &lslOpnd = static_cast(insn.GetOperand(kInsnFourthOpnd + index)); + if (lslOpnd.GetShiftAmount() > 0) { + uint32 shift = 0x1; + opnd |= shift << kShiftTwentyTwo; + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenAddSubRegInsn(const Insn &insn) const +{ + uint32 operandSize = 4; // subs insn + int32 index = insn.GetOperandSize() == operandSize ? 1 : 0; + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd + index)); + operandSize = 2; + if (insn.GetOperandSize() == operandSize) { // neg, cmp or cmn insn + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftSixteen; + return opnd; + } + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd + index)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd + index)) << kShiftSixteen; + + RegOperand &rd = static_cast(insn.GetOperand(kInsnFirstOpnd + index)); + RegOperand &rn = static_cast(insn.GetOperand(kInsnSecondOpnd + index)); + // SP register can only be used with LSL or Extend + if (rd.GetRegisterNumber() == RSP || rn.GetRegisterNumber() == RSP) { + uint32 regSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); + opnd |= 1 << kShiftTwentyOne; + opnd |= ((regSize == k64BitSize ? 0b11 : 0b10) << kShiftThirteen); // option + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenAddSubShiftRegInsn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + + BitShiftOperand *bitShiftOpnd = nullptr; + + uint32 operandSize = 3; + if (insn.GetOperandSize() == operandSize) { + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftSixteen; + bitShiftOpnd = static_cast(&insn.GetOperand(kInsnThirdOpnd)); + } else { + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + bitShiftOpnd = static_cast(&insn.GetOperand(kInsnFourthOpnd)); + } + uint32 shift = 0; + switch (bitShiftOpnd->GetShiftOp()) { + case BitShiftOperand::kLSL: + shift = kShiftLSL; + break; + case BitShiftOperand::kLSR: + shift = kShiftLSR; + break; + case BitShiftOperand::kASR: + shift = kShiftASR; + break; + default: + break; + } + /* Shift */ + opnd |= shift << kShiftTwentyTwo; + /* Imm */ + opnd |= bitShiftOpnd->GetShiftAmount() << kShiftTen; + return opnd; +} + +uint32 AArch64ObjEmitter::GenBitfieldInsn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + + uint32 operandSize = 4; + if (insn.GetMachineOpcode() == MOP_wubfizrri5i5 || insn.GetMachineOpcode() == MOP_xubfizrri6i6 || + insn.GetMachineOpcode() == MOP_wbfirri5i5 || insn.GetMachineOpcode() == MOP_xbfirri6i6) { + uint32 mod = insn.GetOperand(kInsnFirstOpnd).GetSize(); /* 64 & 32 from ARMv8 manual C5.6.114 */ + CHECK_FATAL(mod == 64 || mod == 32, "mod must be 64/32"); + uint32 shift = GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)); + uint32 immr = -shift % mod; + opnd |= immr << kShiftSixteen; + uint32 width = GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)); + uint32 imms = width - 1; + opnd |= imms << kShiftTen; + } else if (insn.GetOperandSize() == operandSize) { + uint32 lab = GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)); + opnd |= lab << kShiftSixteen; + uint32 width = GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)); + opnd |= (lab + width - 1) << kShiftTen; + } else if (insn.GetMachineOpcode() == MOP_xlslrri6 || insn.GetMachineOpcode() == MOP_wlslrri5) { + uint32 mod = insn.GetOperand(kInsnFirstOpnd).GetSize(); /* 64 & 32 from ARMv8 manual C5.6.114 */ + CHECK_FATAL(mod == 64 || mod == 32, "mod must be 64/32"); + uint32 shift = GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)); + uint32 immr = -shift % mod; + opnd |= immr << kShiftSixteen; + uint32 imms = mod - 1 - shift; + opnd |= imms << kShiftTen; + } else if (insn.GetMachineOpcode() == MOP_xlsrrri6 || insn.GetMachineOpcode() == MOP_wlsrrri5 || + insn.GetMachineOpcode() == MOP_xasrrri6 || insn.GetMachineOpcode() == MOP_wasrrri5) { + uint32 mod = insn.GetOperand(kInsnFirstOpnd).GetSize(); /* 64 & 32 from ARMv8 manual C5.6.114 */ + CHECK_FATAL(mod == 64 || mod == 32, "mod must be 64/32"); + uint32 immr = GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)); + opnd |= immr << kShiftSixteen; + uint32 imms = mod - 1; + opnd |= imms << kShiftTen; + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenExtractInsn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Imm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftTen; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + + return opnd; +} + +uint32 AArch64ObjEmitter::GenBranchImmInsn(const Insn &insn, const std::vector &label2Offset, + ObjFuncEmitInfo &objFuncEmitInfo) const +{ + /* Imm */ + if (insn.IsCall()) { + return GetCallFuncOpndValue(insn.GetOperand(kInsnFirstOpnd), objFuncEmitInfo); + } else { + return GetUnCondBranchOpndValue(insn.GetOperand(kInsnFirstOpnd), label2Offset, objFuncEmitInfo); + } +} + +uint32 AArch64ObjEmitter::GenBranchRegInsn(const Insn &insn) const +{ + if (insn.GetMachineOpcode() == MOP_xret || insn.GetMachineOpcode() == MOP_clrex) { + return 0; + } + /* Rn */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenCompareBranchInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const +{ + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Imm */ + opnd |= GetCompareBranchOpndValue(insn.GetOperand(kInsnSecondOpnd), objFuncEmitInfo) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenCondCompareImmInsn(const Insn &insn) const +{ + /* Rn */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Imm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + /* Nzcv */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)); + /* Cond */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFifthOpnd)) << kShiftTwelve; + return opnd; +} + +uint32 AArch64ObjEmitter::GenCondCompareRegInsn(const Insn &insn) const +{ + /* Rn */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + /* Nzcv */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)); + /* Cond */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFifthOpnd)) << kShiftTwelve; + return opnd; +} + +uint32 AArch64ObjEmitter::GenConditionalSelectInsn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + uint32 operandSize = 5; + if (insn.GetOperandSize() == operandSize) { + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + /* Cond */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)) << kShiftTwelve; + } else if (insn.GetMachineOpcode() == MOP_wcnegrrrc || insn.GetMachineOpcode() == MOP_xcnegrrrc) { + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm Rn==Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftSixteen; + /* Cond */ + uint8 cond = GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)); + /* invert cond */ + opnd |= ((cond ^ 1u) & 0xfu) << kShiftTwelve; + } else { + /* Cond */ + uint8 cond = GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)); + /* invert cond */ + opnd |= ((cond ^ 1u) & 0xfu) << kShiftTwelve; + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenDataProcess1SrcInsn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenDataProcess2SrcInsn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + return opnd; +} + +uint32 AArch64ObjEmitter::GenDataProcess3SrcInsn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + /* Ra */ + uint32 operandSize = 4; + if (insn.GetOperandSize() == operandSize) { + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)) << kShiftTen; + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenFloatIntConversionsInsn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenFloatCompareInsn(const Insn &insn) const +{ + /* Rn */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + if (insn.GetOperand(kInsnThirdOpnd).IsRegister()) { + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenFloatDataProcessing1Insn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenFloatDataProcessing2Insn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + return opnd; +} + +uint32 AArch64ObjEmitter::GenFloatImmInsn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Imm */ + opnd |= (GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) & 0xff) << kShiftThirteen; + return opnd; +} + +uint32 AArch64ObjEmitter::GenFloatCondSelectInsn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + /* Cond */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFourthOpnd)) << kShiftTwelve; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadStoreModeLiteral(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const +{ + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + FixupKind fixupKind = FixupKind(kAArch64LoadPCRelImm19); + LocalFixup *fixup = + memPool->New(objFuncEmitInfo.GetCGFunc().GetLocalSymLabelIndex(*memOpnd.GetSymbol()), + objFuncEmitInfo.GetTextDataSize(), fixupKind); + objFuncEmitInfo.AppendLocalFixups(*fixup); + MOperator mOp = insn.GetMachineOpcode(); + if (mOp == MOP_sldr) { + mOp = MOP_sldli; + } else if (mOp == MOP_dldr) { + mOp = MOP_dldli; + } else if (mOp == MOP_xldr) { + mOp = MOP_xldli; + } else if (mOp == MOP_wldr) { + mOp = MOP_wldli; + } else { + CHECK_FATAL(false, "unsupported mOp"); + } + auto &md = AArch64CG::kMd[mOp]; + return md.GetMopEncode() | opnd; +} + +uint32 AArch64ObjEmitter::GenLoadStoreModeBOi(const Insn &insn) const +{ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + OfstOperand *ofstOpnd = static_cast(memOpnd.GetOffsetImmediate()); + /* Imm */ + int32 offsetValue = ofstOpnd->GetOffsetValue(); + uint32 imm9Mask = 0x1ff; + uint32 opnd = 0U; + if (memOpnd.IsPostIndexed()) { + opnd |= (static_cast(offsetValue) & imm9Mask) << kShiftTwelve; + uint32 specialOpCode = 0x1; + opnd |= specialOpCode << kShiftTen; + } else if (memOpnd.IsPreIndexed()) { + opnd |= (static_cast(offsetValue) & imm9Mask) << kShiftTwelve; + uint32 specialOpCode = 0x3; + opnd |= specialOpCode << kShiftTen; + } else { + DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + uint32 specialOpCode = 0x1; + opnd |= specialOpCode << kShiftTwentyFour; + uint32 divisor = 1; + MOperator mOp = insn.GetMachineOpcode(); + if ((mOp == MOP_xldr) || (mOp == MOP_xstr) || (mOp == MOP_dldr) || (mOp == MOP_dstr)) { + divisor = k8BitSize; + } else if ((mOp == MOP_wldr) || (mOp == MOP_wstr) || (mOp == MOP_sstr) || (mOp == MOP_sldr)) { + divisor = k4BitSize; + } else if (mOp == MOP_hldr) { + divisor = k2BitSize; + } + uint32 shiftRightNum = 0; + if ((mOp == MOP_wldrsh) || (mOp == MOP_wldrh) || (mOp == MOP_wstrh)) { + shiftRightNum = 1; + } + opnd |= ((static_cast(offsetValue) >> shiftRightNum) / divisor) << kShiftTen; + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadStoreModeBOrX(const Insn &insn) const +{ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + uint32 opnd = 0; + opnd |= 0x1 << kShiftTwentyOne; + opnd |= 0x2 << kShiftTen; + RegOperand *offsetReg = memOpnd.GetIndexRegister(); + opnd |= GetOpndMachineValue(*offsetReg) << kShiftSixteen; + std::string extend = memOpnd.GetExtendAsString(); + uint32 option = 0; + if (extend == "UXTW") { + option = 0x2; + } else if (extend == "LSL") { + option = 0x3; + } else if (extend == "SXTW") { + option = 0x6; + } else { + DEBUG_ASSERT(extend == "SXTX", "must be SXTX!"); + option = 0x7; + } + opnd |= option << kShiftThirteen; + uint32 s = (memOpnd.ShiftAmount() > 0) ? 1 : 0; + opnd |= s << kShiftTwelve; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadStoreRegInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const +{ + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeLiteral) { + return GenLoadStoreModeLiteral(insn, objFuncEmitInfo); + } + + MOperator mOp = insn.GetMachineOpcode(); +#ifdef USE_32BIT_REF + if (((mOp == MOP_xstr) || (mOp == MOP_xldr)) && + static_cast(insn.GetOperand(kInsnFirstOpnd)).IsRefField()) { + mOp = (mOp == MOP_xstr) ? MOP_wstr : MOP_wldr; + } +#endif + auto &md = AArch64CG::kMd[mOp]; + uint32 binInsn = md.GetMopEncode(); + // invalid insn generated by the eval node + if (static_cast(insn.GetOperand(kFirstOpnd)).GetRegisterNumber() == RZR) { + if (mOp == MOP_sldr) { + binInsn = AArch64CG::kMd[MOP_wldr].GetMopEncode(); + } else if (mOp == MOP_dldr) { + binInsn = AArch64CG::kMd[MOP_xldr].GetMopEncode(); + } else if (mOp == MOP_sstr) { + binInsn = AArch64CG::kMd[MOP_wstr].GetMopEncode(); + } else if (mOp == MOP_sstr) { + binInsn = AArch64CG::kMd[MOP_xstr].GetMopEncode(); + } + } + /* Rt */ + binInsn |= GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rn */ + Operand *baseReg = memOpnd.GetBaseRegister(); + binInsn |= GetOpndMachineValue(*baseReg) << kShiftFive; + + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) { + uint32 size = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); + OfstOperand *ofstOpnd = static_cast(memOpnd.GetOffsetImmediate()); + /* Imm */ + int32 offsetValue = ofstOpnd != nullptr ? ofstOpnd->GetOffsetValue() : 0; + if ((((size == k16BitSize) && (offsetValue % k2BitSize) != 0) || + ((size == k32BitSize) && (offsetValue % k4BitSize) != 0) || + ((size == k64BitSize) && (offsetValue % k8BitSize) != 0)) && + ((offsetValue < 256) && (offsetValue > -257))) { + uint32 mopEncode = 0; + // ldur, ldurh, ldurb + if (insn.IsLoad()) { + if (insn.GetDesc()->GetEncodeType() == kLoadStoreFloat) { + mopEncode = size == k16BitSize ? 0x7c400000 : (size == k32BitSize ? 0xbc400000 : 0xfc400000); + } else { + mopEncode = size == k16BitSize ? 0x78400000 : (size == k32BitSize ? 0xb8400000 : 0xf8400000); + } + } else { // stur, sturh, sturb + if (insn.GetDesc()->GetEncodeType() == kLoadStoreFloat) { + mopEncode = size == k16BitSize ? 0x7c000000 : (size == k32BitSize ? 0xbc000000 : 0xfc000000); + } else { + mopEncode = size == k16BitSize ? 0x78000000 : (size == k32BitSize ? 0xb8000000 : 0xf8000000); + } + } + binInsn = + GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)) | (GetOpndMachineValue(*baseReg) << kShiftFive); + return binInsn | mopEncode | (offsetValue << kShiftTwelve); + } + return binInsn | GenLoadStoreModeBOi(insn); + } else if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOrX) { + return binInsn | GenLoadStoreModeBOrX(insn); + } + DEBUG_ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeLo12Li, "support kAddrModeLo12Li only!"); + DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + binInsn |= GetLo12LitrealOpndValue(insn.GetMachineOpcode(), memOpnd, objFuncEmitInfo) << kShiftTen; + uint32 specialOpCode = 0x1; + binInsn |= specialOpCode << kShiftTwentyFour; + + return binInsn; +} + +uint32 AArch64ObjEmitter::GenLoadStoreARInsn(const Insn &insn) const +{ + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + DEBUG_ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + Operand *baseReg = memOpnd.GetBaseRegister(); + /* Rn */ + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadExclusiveInsn(const Insn &insn) const +{ + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + DEBUG_ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + Operand *baseReg = memOpnd.GetBaseRegister(); + /* Rn */ + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadExclusivePairInsn(const Insn &insn) const +{ + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rt2 */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftTen; + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + DEBUG_ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + Operand *baseReg = memOpnd.GetBaseRegister(); + /* Rn */ + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenStoreExclusiveInsn(const Insn &insn) const +{ + /* Rs */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)) << kShiftSixteen; + /* Rt */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)); + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + DEBUG_ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + Operand *baseReg = memOpnd.GetBaseRegister(); + /* Rn */ + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenStoreExclusivePairInsn(const Insn &insn) const +{ + /* Rs */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)) << kShiftSixteen; + /* Rt */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)); + /* Rt2 */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftTen; + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnFourthOpnd)); + DEBUG_ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + Operand *baseReg = memOpnd.GetBaseRegister(); + /* Rn */ + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadPairInsn(const Insn &insn) const +{ + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rt2 */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftTen; + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + DEBUG_ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + /* Rn */ + Operand *baseReg = memOpnd.GetBaseRegister(); + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + /* Imm */ + OfstOperand *ofstOpnd = static_cast(memOpnd.GetOffsetImmediate()); + int32 offsetValue = ofstOpnd->GetOffsetValue(); + uint32 divisor = 0; + if (memOpnd.GetSize() == k64BitSize) { + divisor = k8ByteSize; + } else { + divisor = k4ByteSize; + } + uint32 imm7Mask = 0x7f; + opnd |= (static_cast(offsetValue / divisor) & imm7Mask) << kShiftFifteen; + + uint32 specialOpCode = 0; + if (memOpnd.IsPostIndexed()) { + specialOpCode = 0x3; + } else if (memOpnd.IsPreIndexed()) { + specialOpCode = 0x7; + } else { + DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + specialOpCode = 0x5; + } + opnd |= specialOpCode << kShiftTwentyTwo; + return opnd; +} + +uint32 AArch64ObjEmitter::GenStorePairInsn(const Insn &insn) const +{ + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rt2 */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftTen; + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + DEBUG_ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + /* Rn */ + Operand *baseReg = memOpnd.GetBaseRegister(); + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + /* Imm */ + OfstOperand *ofstOpnd = static_cast(memOpnd.GetOffsetImmediate()); + int32 offsetValue = ofstOpnd->GetOffsetValue(); + uint32 divisor = 0; + if (memOpnd.GetSize() == k64BitSize) { + divisor = k8ByteSize; + } else { + divisor = k4ByteSize; + } + uint32 imm7Mask = 0x7f; + opnd |= (static_cast(offsetValue / divisor) & imm7Mask) << kShiftFifteen; + + uint32 specialOpCode = 0; + if (memOpnd.IsPostIndexed()) { + specialOpCode = 0x2; + } else if (memOpnd.IsPreIndexed()) { + specialOpCode = 0x6; + } else { + DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + specialOpCode = 0x4; + } + opnd |= specialOpCode << kShiftTwentyTwo; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadStoreFloatInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const +{ + return GenLoadStoreRegInsn(insn, objFuncEmitInfo); +} + +uint32 AArch64ObjEmitter::GenLoadPairFloatInsn(const Insn &insn) const +{ + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rt2 */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftTen; + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + DEBUG_ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + /* Rn */ + Operand *baseReg = memOpnd.GetBaseRegister(); + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + /* Imm */ + OfstOperand *ofstOpnd = static_cast(memOpnd.GetOffsetImmediate()); + int32 offsetValue = ofstOpnd->GetOffsetValue(); + uint32 divisor = 0; + if (memOpnd.GetSize() == k64BitSize) { + divisor = k8ByteSize; + } else { + divisor = k4ByteSize; + } + uint32 imm7Mask = 0x7f; + opnd |= (static_cast(offsetValue / divisor) & imm7Mask) << kShiftFifteen; + + uint32 specialOpCode = 0; + if (memOpnd.IsPostIndexed()) { + specialOpCode = 0x3; + } else if (memOpnd.IsPreIndexed()) { + specialOpCode = 0x7; + } else { + DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + specialOpCode = 0x5; + } + opnd |= specialOpCode << kShiftTwentyTwo; + return opnd; +} + +uint32 AArch64ObjEmitter::GenStorePairFloatInsn(const Insn &insn) const +{ + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Rt2 */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftTen; + /* Mem */ + MemOperand &memOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + DEBUG_ASSERT(memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi, "support kAddrModeBOi only!"); + /* Rn */ + Operand *baseReg = memOpnd.GetBaseRegister(); + opnd |= GetOpndMachineValue(*baseReg) << kShiftFive; + /* Imm */ + OfstOperand *ofstOpnd = static_cast(memOpnd.GetOffsetImmediate()); + int32 offsetValue = ofstOpnd->GetOffsetValue(); + uint32 divisor = 0; + if (memOpnd.GetSize() == k64BitSize) { + divisor = k8ByteSize; + } else { + divisor = k4ByteSize; + } + uint32 imm7Mask = 0x7f; + opnd |= (static_cast(offsetValue / divisor) & imm7Mask) << kShiftFifteen; + + uint32 specialOpCode = 0; + if (memOpnd.IsPostIndexed()) { + specialOpCode = 0x2; + } else if (memOpnd.IsPreIndexed()) { + specialOpCode = 0x6; + } else { + DEBUG_ASSERT(memOpnd.IsIntactIndexed(), "must be kIntact!"); + specialOpCode = 0x4; + } + opnd |= specialOpCode << kShiftTwentyTwo; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLoadLiteralRegInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const +{ + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Imm */ + opnd |= GetLoadLiteralOpndValue(insn.GetOperand(kInsnSecondOpnd), objFuncEmitInfo) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenLogicalRegInsn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + + uint32 operandSize = 2; // mvn insn + if (insn.GetOperandSize() == operandSize) { + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)) << kShiftFive; + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftSixteen; + return opnd; + } + + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Rm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnThirdOpnd)) << kShiftSixteen; + + operandSize = 4; + if (insn.GetOperandSize() == operandSize) { + BitShiftOperand &bitShiftOpnd = static_cast(insn.GetOperand(kInsnFourthOpnd)); + uint32 shift = 0; + switch (bitShiftOpnd.GetShiftOp()) { + case BitShiftOperand::kLSL: + shift = kShiftLSL; + break; + case BitShiftOperand::kLSR: + shift = kShiftLSR; + break; + case BitShiftOperand::kASR: + shift = kShiftASR; + break; + default: + break; + } + /* Shift */ + opnd |= shift << kShiftTwentyTwo; + /* Imm */ + opnd |= bitShiftOpnd.GetShiftAmount() << kShiftTen; + } + return opnd; +} + +uint32 AArch64ObjEmitter::GenLogicalImmInsn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + if (insn.GetMachineOpcode() == MOP_wmovri32 || insn.GetMachineOpcode() == MOP_xmovri64) { + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftTen; + return opnd; + } + + // tst insn + if (insn.GetMachineOpcode() == MOP_wtstri32 || insn.GetMachineOpcode() == MOP_xtstri64) { + // Rn + uint32 opndValue = GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + // Imm + ImmOperand &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint64 value = static_cast(immOpnd.GetValue()); + uint32 size = insn.GetDesc()->GetOpndDes(kInsnThirdOpnd)->GetSize(); + opndValue |= EncodeLogicaImm(value, size) << kShiftTen; + return opndValue; + } + + /* Rn */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + /* Imm */ + ImmOperand &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint64 value = static_cast(immOpnd.GetValue()); + uint32 size = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); + opnd |= EncodeLogicaImm(value, size) << kShiftTen; + return opnd; +} + +uint32 AArch64ObjEmitter::GenMoveWideInsn(const Insn &insn) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Imm */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftFive; + + BitShiftOperand &lslOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint32 mod = 16; /* 16 from Armv8 Manual C5.6.128 */ + uint32 shift = lslOpnd.GetShiftAmount() / mod; + /* Shift */ + opnd |= shift << kShiftTwentyOne; + return opnd; +} + +uint32 AArch64ObjEmitter::GenPCRelAddrInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const +{ + /* Rd */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* Imm */ + opnd |= GetAdrLabelOpndValue(insn, insn.GetOperand(kInsnSecondOpnd), objFuncEmitInfo) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenSystemInsn(const Insn &insn) const +{ + (void)insn; + return 0; +} + +uint32 AArch64ObjEmitter::GenTestBranchInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const +{ + /* Rt */ + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + /* b40 */ + opnd |= GetOpndMachineValue(insn.GetOperand(kInsnSecondOpnd)) << kShiftNineteen; + /* Imm */ + opnd |= GetTestBranchOpndValue(insn.GetOperand(kInsnThirdOpnd), objFuncEmitInfo) << kShiftFive; + return opnd; +} + +uint32 AArch64ObjEmitter::GenCondBranchInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const +{ + /* Imm */ + uint32 opnd = GetCondBranchOpndValue(insn.GetOperand(kInsnSecondOpnd), objFuncEmitInfo) << kShiftFive; + return opnd; +} + +void AArch64ObjEmitter::InsertNopInsn(ObjFuncEmitInfo &objFuncEmitInfo) const +{ + AArch64CGFunc &cgFunc = static_cast(objFuncEmitInfo.GetCGFunc()); + bool found = false; + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (insn->IsMachineInstruction()) { + if (insn->IsCall()) { + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_nop); + bb->InsertInsnAfter(*insn, newInsn); + } + found = true; + break; + } + } + if (found) { + break; + } + } +} + +uint32 AArch64ObjEmitter::EncodeLogicaImm(uint64 imm, uint32 size) const +{ + /* the element size */ + uint32 elementSize = size; + while (elementSize > 2) { + elementSize /= 2; + uint64 mask = (1ULL << elementSize) - 1; + if ((imm & mask) != ((imm >> elementSize) & mask)) { + elementSize *= 2; + break; + } + } + + if (elementSize != k64BitSize) { + imm &= ((1ULL << elementSize) - 1); + } + std::bitset bitValue(imm); + uint32 trailCount = 0; + for (uint32 i = 1; i < elementSize; ++i) { + if (bitValue[i] ^ bitValue[0]) { + trailCount = i; + break; + } + } + + uint32 immr = 0; + uint32 oneNum = bitValue.count(); + if (bitValue.test(0)) { /* for 1+0+1+ pattern */ + immr = oneNum - trailCount; + } else { /* for 0+1+0+ pattern */ + immr = elementSize - trailCount; + } + + uint32 imms = ~(elementSize - 1) << 1; + imms |= oneNum - 1; + uint32 n = (elementSize == k64BitSize) ? 1 : 0; + return (n << kShiftTwelve) | (immr << kShiftSix) | (imms & 0x3f); +} + +void AArch64ObjEmitter::EmitIntrinsicInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) +{ + switch (insn.GetMachineOpcode()) { + // adrp xd, label + // add xd, xd, #:lo12:label + case MOP_adrp_label: { + uint32 opnd = GetOpndMachineValue(insn.GetOperand(kInsnFirstOpnd)); + uint32 binInsn = AArch64CG::kMd[MOP_xadrp].GetMopEncode(); + binInsn |= opnd; + objFuncEmitInfo.AppendTextData(binInsn, k4ByteSize); + binInsn = AArch64CG::kMd[MOP_xaddrri12].GetMopEncode(); + binInsn |= opnd | (opnd << kShiftFive); + objFuncEmitInfo.AppendTextData(binInsn, k4ByteSize); + break; + } + default: + CHECK_FATAL(false, "unsupport mop in EmitIntrinsicInsn!\n"); + } +} + +void AArch64ObjEmitter::EmitSpinIntrinsicInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) +{ + switch (insn.GetMachineOpcode()) { + case MOP_tls_desc_rel: { + objFuncEmitInfo.AppendTextData(0x91400000, k4ByteSize); + objFuncEmitInfo.AppendTextData(0x91000000, k4ByteSize); + break; + } + default: + CHECK_FATAL(false, "unsupport mop in EmitSpinIntrinsicInsn!\n"); + } +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp new file mode 100644 index 0000000000000000000000000000000000000000..aec06ad3cfa5b4c1de3595cab8fbd174e958a36b --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_offset_adjust.h" +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" + +namespace maplebe { +void AArch64FPLROffsetAdjustment::Run() +{ + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + FOR_ALL_BB(bb, aarchCGFunc) { + FOR_BB_INSNS_SAFE(insn, bb, ninsn) { // AdjustmentOffsetForOpnd may replace curInsn + if (!insn->IsMachineInstruction()) { + continue; + } + AdjustmentOffsetForOpnd(*insn); + } + } +#ifdef STKLAY_DEBUG + AArch64MemLayout *aarch64memlayout = static_cast(cgFunc->GetMemlayout()); + LogInfo::MapleLogger() << "--------layout of " << cgFunc->GetName() << "-------------" + << "\n"; + LogInfo::MapleLogger() << "stkpassed: " << aarch64memlayout->GetSegArgsStkPassed().GetSize() << "\n"; + LogInfo::MapleLogger() << "real framesize: " << aarch64memlayout->RealStackFrameSize() << "\n"; + LogInfo::MapleLogger() << "gr save: " << aarch64memlayout->GetSizeOfGRSaveArea() << "\n"; + LogInfo::MapleLogger() << "vr save: " << aarch64memlayout->GetSizeOfVRSaveArea() << "\n"; + LogInfo::MapleLogger() << "calleesave (includes fp lr): " + << static_cast(cgFunc)->SizeOfCalleeSaved() << "\n"; + LogInfo::MapleLogger() << "regspill: " << aarch64memlayout->GetSizeOfSpillReg() << "\n"; + LogInfo::MapleLogger() << "ref local: " << aarch64memlayout->GetSizeOfRefLocals() << "\n"; + LogInfo::MapleLogger() << "local: " << aarch64memlayout->GetSizeOfLocals() << "\n"; + LogInfo::MapleLogger() << "regpass: " << aarch64memlayout->GetSegArgsRegPassed().GetSize() << "\n"; + LogInfo::MapleLogger() << "stkpass: " << aarch64memlayout->GetSegArgsToStkPass().GetSize() << "\n"; + LogInfo::MapleLogger() << "-------------------------------------------------" + << "\n"; +#endif +} + +void AArch64FPLROffsetAdjustment::AdjustmentOffsetForOpnd(Insn &insn) const +{ + uint32 opndNum = insn.GetOperandSize(); + bool replaceFP = false; + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + if (regOpnd.IsOfVary()) { + insn.SetOperand(i, aarchCGFunc->GetOrCreateStackBaseRegOperand()); + regOpnd = aarchCGFunc->GetOrCreateStackBaseRegOperand(); + } + if (regOpnd.GetRegisterNumber() == RFP) { + insn.SetOperand(i, aarchCGFunc->GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt)); + replaceFP = true; + } + } else if (opnd.IsMemoryAccessOperand()) { + AdjustMemBaseReg(insn, i, replaceFP); + AdjustMemOfstVary(insn, i); + } else if (opnd.IsIntImmediate()) { + AdjustmentOffsetForImmOpnd(insn, i); + } + } + if (replaceFP && !aarchCGFunc->UseFP()) { + AdjustmentStackPointer(insn); + } + if (insn.IsLoad() || insn.IsStore()) { + + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + if ((memOpnd.GetAddrMode() != MemOperand::kAddrModeBOi) || !memOpnd.IsIntactIndexed()) { + continue; + } + if (!aarchCGFunc->IsOperandImmValid(insn.GetMachineOpcode(), &memOpnd, i)) { + MemOperand &newMemOpnd = aarchCGFunc->SplitOffsetWithAddInstruction( + memOpnd, memOpnd.GetSize(), static_cast(R16), false, &insn); + insn.SetOperand(i, newMemOpnd); + } + } + } + } else { + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd); + if (!aarchCGFunc->IsOperandImmValid(insn.GetMachineOpcode(), &immOpnd, i)) { + if (insn.GetMachineOpcode() >= MOP_xaddrri24 && insn.GetMachineOpcode() <= MOP_waddrri12) { + PrimType destTy = + static_cast(insn.GetOperand(kInsnFirstOpnd)).GetSize() == k64BitSize ? PTY_i64 : PTY_i32; + RegOperand *resOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + ImmOperand ©ImmOpnd = + aarchCGFunc->CreateImmOperand(immOpnd.GetValue(), immOpnd.GetSize(), immOpnd.IsSignedValue()); + aarchCGFunc->SelectAddAfterInsn(*resOpnd, insn.GetOperand(kInsnSecondOpnd), copyImmOpnd, destTy, false, + insn); + insn.GetBB()->RemoveInsn(insn); + } else if (insn.GetMachineOpcode() == MOP_xsubrri12 || insn.GetMachineOpcode() == MOP_wsubrri12) { + if (immOpnd.IsSingleInstructionMovable()) { + RegOperand &tempReg = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + bool is64bit = insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize; + MOperator tempMovOp = is64bit ? MOP_xmovri64 : MOP_wmovri32; + Insn &tempMov = cgFunc->GetInsnBuilder()->BuildInsn(tempMovOp, tempReg, immOpnd); + insn.SetOperand(i, tempReg); + insn.SetMOP(is64bit ? AArch64CG::kMd[MOP_xsubrrr] : AArch64CG::kMd[MOP_wsubrrr]); + (void)insn.GetBB()->InsertInsnBefore(insn, tempMov); + } + } else { + CHECK_FATAL(false, "NIY"); + } + } + } + } + } +} + +void AArch64FPLROffsetAdjustment::AdjustMemBaseReg(Insn &insn, uint32 i, bool &replaceFP) const +{ + Operand &opnd = insn.GetOperand(i); + auto &currMemOpnd = static_cast(opnd); + MemOperand *newMemOpnd = currMemOpnd.Clone(*aarchCGFunc->GetMemoryPool()); + CHECK_NULL_FATAL(newMemOpnd); + if (newMemOpnd->GetBaseRegister() != nullptr) { + if (newMemOpnd->GetBaseRegister()->IsOfVary()) { + newMemOpnd->SetBaseRegister(static_cast(aarchCGFunc->GetOrCreateStackBaseRegOperand())); + } + RegOperand *memBaseReg = newMemOpnd->GetBaseRegister(); + if (memBaseReg->GetRegisterNumber() == RFP) { + RegOperand &newBaseOpnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); + newMemOpnd->SetBaseRegister(newBaseOpnd); + replaceFP = true; + } + } + if (newMemOpnd->GetBaseRegister() != nullptr && + (newMemOpnd->GetBaseRegister()->GetRegisterNumber() == RFP || + newMemOpnd->GetBaseRegister()->GetRegisterNumber() == RSP)) { + newMemOpnd->SetStackMem(true); + } + insn.SetOperand(i, *newMemOpnd); +} + +void AArch64FPLROffsetAdjustment::AdjustMemOfstVary(Insn &insn, uint32 i) const +{ + // Note: SplitInsn invalidates the current insn. However, the current insn will still be manipulated + // in subsequent steps, which will cause some unknown errors. So, we're going to do a unified + // split after all the steps are complete. + Operand &opnd = insn.GetOperand(i); + auto &currMemOpnd = static_cast(opnd); + if (currMemOpnd.GetAddrMode() != MemOperand::kAddrModeBOi) { + return; + } + OfstOperand *ofstOpnd = currMemOpnd.GetOffsetImmediate(); + CHECK_NULL_FATAL(ofstOpnd); + if (ofstOpnd->GetVary() == kUnAdjustVary) { + MemLayout *memLayout = aarchCGFunc->GetMemlayout(); + ofstOpnd->AdjustOffset(static_cast( + static_cast(memLayout)->RealStackFrameSize() - + memLayout->SizeOfArgsToStackPass() - cgFunc->GetFunction().GetFrameReseverdSlot())); + ofstOpnd->SetVary(kAdjustVary); + } +} + +void AArch64FPLROffsetAdjustment::AdjustmentOffsetForImmOpnd(Insn &insn, uint32 index) const +{ + // Note: SplitInsn invalidates the current insn. However, the current insn will still be manipulated + // in subsequent steps, which will cause some unknown errors. So, we're going to do a unified + // split after all the steps are complete. + auto &immOpnd = static_cast(insn.GetOperand(index)); + auto *memLayout = static_cast(aarchCGFunc->GetMemlayout()); + if (immOpnd.GetVary() == kUnAdjustVary) { + int64 ofst = static_cast(memLayout)->RealStackFrameSize() - + memLayout->SizeOfArgsToStackPass() - cgFunc->GetFunction().GetFrameReseverdSlot(); + if (insn.GetMachineOpcode() == MOP_xsubrri12 || insn.GetMachineOpcode() == MOP_wsubrri12) { + immOpnd.SetValue(immOpnd.GetValue() - ofst); + if (immOpnd.GetValue() < 0) { + immOpnd.Negate(); + } + insn.SetMOP(AArch64CG::kMd[A64ConstProp::GetReversalMOP(insn.GetMachineOpcode())]); + } else { + immOpnd.Add(ofst); + } + immOpnd.SetVary(kAdjustVary); + } +} + +void AArch64FPLROffsetAdjustment::AdjustmentStackPointer(Insn &insn) const +{ + // Note: SplitInsn invalidates the current insn. However, the current insn will still be manipulated + // in subsequent steps, which will cause some unknown errors. So, we're going to do a unified + // split after all the steps are complete. + auto *aarch64memlayout = static_cast(aarchCGFunc->GetMemlayout()); + uint32 offset = static_cast(aarch64memlayout->SizeOfArgsToStackPass() + cgFunc->GetFunction().GetFrameReseverdSlot()); + if (offset == 0) { + return; + } + if (insn.IsLoad() || insn.IsStore()) { + auto *memOpnd = static_cast(insn.GetMemOpnd()); + CHECK_NULL_FATAL(memOpnd); + CHECK_FATAL(memOpnd->IsIntactIndexed(), "unsupport yet"); + ImmOperand *ofstOpnd = memOpnd->GetOffsetOperand(); + CHECK_NULL_FATAL(ofstOpnd); + ImmOperand *newOfstOpnd = &aarchCGFunc->GetOrCreateOfstOpnd( + static_cast(ofstOpnd->GetValue() + offset), ofstOpnd->GetSize()); + memOpnd->SetOffsetOperand(*newOfstOpnd); + } else { + switch (insn.GetMachineOpcode()) { + case MOP_waddrri12: + case MOP_xaddrri12: { + auto *newAddImmOpnd = static_cast( + static_cast(insn.GetOperand(kInsnThirdOpnd)).Clone(*cgFunc->GetMemoryPool())); + newAddImmOpnd->SetValue(newAddImmOpnd->GetValue() + offset); + insn.SetOperand(kInsnThirdOpnd, *newAddImmOpnd); + break; + } + case MOP_waddrri24: + case MOP_xaddrri24: { + RegOperand &tempReg = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + ImmOperand &offsetReg = aarchCGFunc->CreateImmOperand(offset, k64BitSize, false); + aarchCGFunc->SelectAddAfterInsn(tempReg, insn.GetOperand(kInsnSecondOpnd), offsetReg, PTY_i64, false, insn); + insn.SetOperand(kInsnSecondOpnd, tempReg); + break; + } + case MOP_wsubrri12: + case MOP_xsubrri12: { + auto *newAddImmOpnd = static_cast( + static_cast(insn.GetOperand(kInsnThirdOpnd)).Clone(*cgFunc->GetMemoryPool())); + newAddImmOpnd->SetValue(newAddImmOpnd->GetValue() - offset); + if (newAddImmOpnd->GetValue() < 0) { + newAddImmOpnd->Negate(); + } + insn.SetMOP(AArch64CG::kMd[A64ConstProp::GetReversalMOP(insn.GetMachineOpcode())]); + insn.SetOperand(kInsnThirdOpnd, *newAddImmOpnd); + break; + } + default: { + // Only some special insn will replace FP, + insn.Dump(); + CHECK_FATAL(false, "Unexpect offset adjustment insn"); + } + } + } +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_operand.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_operand.cpp new file mode 100644 index 0000000000000000000000000000000000000000..17a1794d56bfc5f93100b6a30f0941005b6199ca --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_operand.cpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_operand.h" +#include +#include +#include "aarch64_abi.h" +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" + +namespace maplebe { +bool StImmOperand::Less(const Operand &right) const +{ + if (&right == this) { + return false; + } + + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + const StImmOperand *rightOpnd = static_cast(&right); + if (symbol != rightOpnd->symbol) { + return symbol < rightOpnd->symbol; + } + if (offset != rightOpnd->offset) { + return offset < rightOpnd->offset; + } + return relocs < rightOpnd->relocs; +} + +bool ExtendShiftOperand::Less(const Operand &right) const +{ + if (&right == this) { + return false; + } + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + const ExtendShiftOperand *rightOpnd = static_cast(&right); + + /* The same type. */ + if (extendOp != rightOpnd->extendOp) { + return extendOp < rightOpnd->extendOp; + } + return shiftAmount < rightOpnd->shiftAmount; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_optimize_common.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_optimize_common.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dd6f819d936945f2bd003d34b2d1d9e13b31967e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_optimize_common.cpp @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_optimize_common.h" +#include "aarch64_isa.h" +#include "aarch64_cgfunc.h" +#include "cgbb.h" + +namespace maplebe { +void AArch64InsnVisitor::ModifyJumpTarget(Operand &targetOperand, BB &bb) +{ + if (bb.GetKind() == BB::kBBIgoto) { + bool modified = false; + for (Insn *insn = bb.GetLastInsn(); insn != nullptr; insn = insn->GetPrev()) { + if (insn->GetMachineOpcode() == MOP_adrp_label) { + LabelIdx labIdx = static_cast(targetOperand).GetLabelIndex(); + ImmOperand &immOpnd = + static_cast(GetCGFunc())->CreateImmOperand(labIdx, k8BitSize, false); + insn->SetOperand(1, immOpnd); + modified = true; + } + } + CHECK_FATAL(modified, "ModifyJumpTarget: Could not change jump target"); + return; + } else if (bb.GetKind() == BB::kBBGoto) { + for (Insn *insn = bb.GetLastInsn(); insn != nullptr; insn = insn->GetPrev()) { + if (insn->GetMachineOpcode() == MOP_adrp_label) { + maple::LabelIdx labidx = static_cast(targetOperand).GetLabelIndex(); + LabelOperand &label = static_cast(GetCGFunc())->GetOrCreateLabelOperand(labidx); + insn->SetOperand(1, label); + break; + } + } + // fallthru below to patch the branch insn + } + bb.GetLastInsn()->SetOperand(AArch64isa::GetJumpTargetIdx(*bb.GetLastInsn()), targetOperand); +} + +void AArch64InsnVisitor::ModifyJumpTarget(maple::LabelIdx targetLabel, BB &bb) +{ + ModifyJumpTarget(static_cast(GetCGFunc())->GetOrCreateLabelOperand(targetLabel), bb); +} + +void AArch64InsnVisitor::ModifyJumpTarget(BB &newTarget, BB &bb) +{ + ModifyJumpTarget(newTarget.GetLastInsn()->GetOperand(AArch64isa::GetJumpTargetIdx(*newTarget.GetLastInsn())), bb); +} + +Insn *AArch64InsnVisitor::CloneInsn(Insn &originalInsn) +{ + MemPool *memPool = const_cast(CG::GetCurCGFunc()->GetMemoryPool()); + if (originalInsn.IsTargetInsn()) { + if (!originalInsn.IsVectorOp()) { + return memPool->Clone(originalInsn); + } else { + auto *insn = memPool->Clone(*static_cast(&originalInsn)); + insn->SetRegSpecList(static_cast(originalInsn).GetRegSpecList()); + return insn; + } + } else if (originalInsn.IsCfiInsn()) { + return memPool->Clone(*static_cast(&originalInsn)); + } else if (originalInsn.IsDbgInsn()) { + return memPool->Clone(*static_cast(&originalInsn)); + } + if (originalInsn.IsComment()) { + return memPool->Clone(originalInsn); + } + CHECK_FATAL(false, "Cannot clone"); + return nullptr; +} + +/* + * Precondition: The given insn is a jump instruction. + * Get the jump target label from the given instruction. + * Note: MOP_xbr is a branching instruction, but the target is unknown at compile time, + * because a register instead of label. So we don't take it as a branching instruction. + */ +LabelIdx AArch64InsnVisitor::GetJumpLabel(const Insn &insn) const +{ + uint32 operandIdx = AArch64isa::GetJumpTargetIdx(insn); + if (insn.GetOperand(operandIdx).IsLabelOpnd()) { + return static_cast(insn.GetOperand(operandIdx)).GetLabelIndex(); + } + DEBUG_ASSERT(false, "Operand is not label"); + return 0; +} + +bool AArch64InsnVisitor::IsCompareInsn(const Insn &insn) const +{ + switch (insn.GetMachineOpcode()) { + case MOP_wcmpri: + case MOP_wcmprr: + case MOP_xcmpri: + case MOP_xcmprr: + case MOP_hcmperi: + case MOP_hcmperr: + case MOP_scmperi: + case MOP_scmperr: + case MOP_dcmperi: + case MOP_dcmperr: + case MOP_hcmpqri: + case MOP_hcmpqrr: + case MOP_scmpqri: + case MOP_scmpqrr: + case MOP_dcmpqri: + case MOP_dcmpqrr: + case MOP_wcmnri: + case MOP_wcmnrr: + case MOP_xcmnri: + case MOP_xcmnrr: + return true; + default: + return false; + } +} + +bool AArch64InsnVisitor::IsCompareAndBranchInsn(const Insn &insn) const +{ + switch (insn.GetMachineOpcode()) { + case MOP_wcbnz: + case MOP_xcbnz: + case MOP_wcbz: + case MOP_xcbz: + return true; + default: + return false; + } +} + +bool AArch64InsnVisitor::IsAddOrSubInsn(const Insn &insn) const +{ + switch (insn.GetMachineOpcode()) { + case MOP_xaddrrr: + case MOP_xaddrri12: + case MOP_waddrrr: + case MOP_waddrri12: + case MOP_xsubrrr: + case MOP_xsubrri12: + case MOP_wsubrrr: + case MOP_wsubrri12: + return true; + default: + return false; + } +} + +RegOperand *AArch64InsnVisitor::CreateVregFromReg(const RegOperand &pReg) +{ + return &static_cast(GetCGFunc()) + ->CreateRegisterOperandOfType(pReg.GetRegisterType(), pReg.GetSize() / k8BitSize); +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_peep.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_peep.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3284eef8630b937467f9a0681bfe553653bef98c --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -0,0 +1,6017 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_peep.h" +#include "cg.h" +#include "mpl_logging.h" +#include "common_utils.h" +#include "cg_option.h" +#include "aarch64_utils.h" + +namespace maplebe { +#define JAVALANG (cgFunc->GetMirModule().IsJavaModule()) +#define CG_PEEP_DUMP CG_DEBUG_FUNC(*cgFunc) +namespace { +const std::string kMccLoadRef = "MCC_LoadRefField"; +const std::string kMccLoadRefV = "MCC_LoadVolatileField"; +const std::string kMccLoadRefS = "MCC_LoadRefStatic"; +const std::string kMccLoadRefVS = "MCC_LoadVolatileStaticField"; +const std::string kMccDummy = "MCC_Dummy"; + +const uint32 kSizeOfSextMopTable = 5; +const uint32 kSizeOfUextMopTable = 3; + +MOperator sextMopTable[kSizeOfSextMopTable] = {MOP_xsxtb32, MOP_xsxtb64, MOP_xsxth32, MOP_xsxth64, MOP_xsxtw64}; + +MOperator uextMopTable[kSizeOfUextMopTable] = {MOP_xuxtb32, MOP_xuxth32, MOP_xuxtw64}; + +const std::string GetReadBarrierName(const Insn &insn) +{ + constexpr int32 totalBarrierNamesNum = 5; + std::array barrierNames = {kMccLoadRef, kMccLoadRefV, kMccLoadRefS, + kMccLoadRefVS, kMccDummy}; + if (insn.GetMachineOpcode() == MOP_xbl || insn.GetMachineOpcode() == MOP_tail_call_opt_xbl) { + auto &op = static_cast(insn.GetOperand(kInsnFirstOpnd)); + const std::string &funcName = op.GetName(); + for (const std::string &singleBarrierName : barrierNames) { + if (funcName == singleBarrierName) { + return singleBarrierName; + } + } + } + return ""; +} + +MOperator GetLoadOperator(uint32 refSize, bool isVolatile) +{ + if (refSize == k32BitSize) { + return isVolatile ? MOP_wldar : MOP_wldr; + } + return isVolatile ? MOP_xldar : MOP_xldr; +} +} // namespace + +static bool IsZeroRegister(const Operand &opnd) +{ + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + +void AArch64CGPeepHole::Run() +{ + bool optSuccess = false; + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (ssaInfo != nullptr) { + optSuccess = DoSSAOptimize(*bb, *insn); + } else { + DoNormalOptimize(*bb, *insn); + } + } + } + if (optSuccess) { + Run(); + } +} + +bool AArch64CGPeepHole::DoSSAOptimize(BB &bb, Insn &insn) +{ + MOperator thisMop = insn.GetMachineOpcode(); + manager = peepMemPool->New(*cgFunc, bb, insn, *ssaInfo); + switch (thisMop) { + case MOP_xandrrr: + case MOP_wandrrr: { + manager->Optimize(true); + break; + } + case MOP_wiorrri12: + case MOP_xiorrri13: { + manager->Optimize(true); + break; + } + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: { + manager->Optimize(true); + manager->Optimize(true); + manager->Optimize(true); + break; + } + case MOP_beq: + case MOP_bne: { + manager->Optimize(true); + break; + } + case MOP_wcsetrc: + case MOP_xcsetrc: { + manager->Optimize(true); + break; + } + case MOP_waddrrr: + case MOP_xaddrrr: + case MOP_dadd: + case MOP_sadd: + case MOP_wsubrrr: + case MOP_xsubrrr: + case MOP_dsub: + case MOP_ssub: + case MOP_xinegrr: + case MOP_winegrr: + case MOP_wfnegrr: + case MOP_xfnegrr: { + manager->Optimize(true); + break; + } + case MOP_wandrri12: + case MOP_xandrri13: { + manager->Optimize(true); + break; + } + case MOP_wcselrrrc: + case MOP_xcselrrrc: { + manager->Optimize(true); + break; + } + case MOP_wiorrrr: + case MOP_xiorrrr: + case MOP_wiorrrrs: + case MOP_xiorrrrs: { + manager->Optimize(true); + break; + } + case MOP_bge: + case MOP_ble: + case MOP_blt: + case MOP_bgt: { + manager->Optimize(true); + break; + } + case MOP_wcmprr: + case MOP_xcmprr: { + manager->Optimize(true); + break; + } + case MOP_xlslrri6: { + manager->Optimize(); + manager->Optimize(true); + break; + } + case MOP_xsxtb32: + case MOP_xsxtb64: + case MOP_xsxth32: + case MOP_xsxth64: + case MOP_xsxtw64: + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_xuxtw64: { + manager->Optimize(true); + break; + } + case MOP_wlsrrri5: + case MOP_xlsrrri6: + case MOP_wasrrri5: + case MOP_xasrrri6: + case MOP_wlslrri5: + case MOP_waddrri12: + case MOP_xaddrri12: + case MOP_wsubrri12: + case MOP_xsubrri12: { + manager->Optimize(true); + break; + } + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: { + manager->Optimize(true); + break; + } + default: + break; + } + return manager->OptSuccess(); +} + +bool ContinuousCmpCsetPattern::CheckCondCode(const CondOperand &condOpnd) const +{ + switch (condOpnd.GetCode()) { + case CC_NE: + case CC_EQ: + case CC_LT: + case CC_GE: + case CC_GT: + case CC_LE: + return true; + default: + return false; + } +} + +bool ContinuousCmpCsetPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcsetrc && curMop != MOP_xcsetrc) { + return false; + } + auto &condOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (condOpnd.GetCode() != CC_NE && condOpnd.GetCode() != CC_EQ) { + return false; + } + reverse = (condOpnd.GetCode() == CC_EQ); + auto &ccReg = static_cast(insn.GetOperand(kInsnThirdOpnd)); + prevCmpInsn = GetDefInsn(ccReg); + if (prevCmpInsn == nullptr) { + return false; + } + MOperator prevCmpMop = prevCmpInsn->GetMachineOpcode(); + if (prevCmpMop != MOP_wcmpri && prevCmpMop != MOP_xcmpri) { + return false; + } + if (!static_cast(prevCmpInsn->GetOperand(kInsnThirdOpnd)).IsZero()) { + return false; + } + auto &cmpCCReg = static_cast(prevCmpInsn->GetOperand(kInsnFirstOpnd)); + InsnSet useSet = GetAllUseInsn(cmpCCReg); + if (useSet.size() > 1) { + return false; + } + auto &cmpUseReg = static_cast(prevCmpInsn->GetOperand(kInsnSecondOpnd)); + prevCsetInsn1 = GetDefInsn(cmpUseReg); + if (prevCsetInsn1 == nullptr) { + return false; + } + MOperator prevCsetMop1 = prevCsetInsn1->GetMachineOpcode(); + if (prevCsetMop1 != MOP_wcsetrc && prevCsetMop1 != MOP_xcsetrc) { + return false; + } + auto &condOpnd1 = static_cast(prevCsetInsn1->GetOperand(kInsnSecondOpnd)); + if (!CheckCondCode(condOpnd1)) { + return false; + } + auto &ccReg1 = static_cast(prevCsetInsn1->GetOperand(kInsnThirdOpnd)); + prevCmpInsn1 = GetDefInsn(ccReg1); + if (prevCmpInsn1 == nullptr) { + return false; + } + if (IsCCRegCrossVersion(*prevCsetInsn1, *prevCmpInsn, ccReg1)) { + return false; + } + return true; +} + +void ContinuousCmpCsetPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + MOperator curMop = insn.GetMachineOpcode(); + Operand &resOpnd = insn.GetOperand(kInsnFirstOpnd); + Insn *newCsetInsn = nullptr; + if (reverse) { + MOperator prevCsetMop = prevCsetInsn1->GetMachineOpcode(); + auto &prevCsetCondOpnd = static_cast(prevCsetInsn1->GetOperand(kInsnSecondOpnd)); + CondOperand &newCondOpnd = aarFunc->GetCondOperand(GetReverseBasicCC(prevCsetCondOpnd.GetCode())); + regno_t tmpRegNO = 0; + auto *tmpDefOpnd = aarFunc->CreateVirtualRegisterOperand(tmpRegNO, resOpnd.GetSize(), + static_cast(resOpnd).GetRegisterType()); + tmpDefOpnd->SetValidBitsNum(k1BitSize); + newCsetInsn = &cgFunc->GetInsnBuilder()->BuildInsn(prevCsetMop, *tmpDefOpnd, newCondOpnd, + prevCsetInsn1->GetOperand(kInsnThirdOpnd)); + BB *prevCsetBB = prevCsetInsn1->GetBB(); + (void)prevCsetBB->InsertInsnAfter(*prevCsetInsn1, *newCsetInsn); + /* update ssa info */ + auto *a64SSAInfo = static_cast(ssaInfo); + a64SSAInfo->CreateNewInsnSSAInfo(*newCsetInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevCmpInsn1); + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, prevCmpInsn, newCsetInsn); + } + } + MOperator newMop = (curMop == MOP_wcsetrc) ? MOP_wmovrr : MOP_xmovrr; + Insn *newInsn = nullptr; + if (newCsetInsn == nullptr) { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), + prevCsetInsn1->GetOperand(kInsnFirstOpnd)); + } else { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), + newCsetInsn->GetOperand(kInsnFirstOpnd)); + } + if (newInsn == nullptr) { + return; + } + bb.ReplaceInsn(insn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, *newInsn); + optSuccess = true; + SetCurrInsn(newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevCmpInsn1); + prevs.emplace_back(prevCsetInsn1); + if (newCsetInsn == nullptr) { + (void)prevs.emplace_back(prevCmpInsn); + } else { + (void)prevs.emplace_back(newCsetInsn); + } + DumpAfterPattern(prevs, &insn, newInsn); + } +} + +bool NegCmpToCmnPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcmprr && curMop != MOP_xcmprr) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnThirdOpnd)); + prevInsn = GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_winegrr && prevMop != MOP_xinegrr && prevMop != MOP_winegrrs && prevMop != MOP_xinegrrs) { + return false; + } + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + InsnSet useInsns = GetAllUseInsn(ccReg); + for (auto *useInsn : useInsns) { + if (useInsn == nullptr) { + continue; + } + MOperator useMop = useInsn->GetMachineOpcode(); + if (useMop == MOP_bhi || useMop == MOP_bls) { + return false; + } + bool findUnsignedCond = false; + for (size_t i = 0; i < useInsn->GetOperandSize(); ++i) { + if (useInsn->GetOperand(i).GetKind() == Operand::kOpdCond) { + ConditionCode cond = static_cast(useInsn->GetOperand(i)).GetCode(); + if (cond == CC_HI || cond == CC_LS) { + findUnsignedCond = true; + break; + } + } + } + if (findUnsignedCond) { + return false; + } + } + return true; +} + +void NegCmpToCmnPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + Operand &opnd1 = insn.GetOperand(kInsnSecondOpnd); + Operand &opnd2 = prevInsn->GetOperand(kInsnSecondOpnd); + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + MOperator prevMop = prevInsn->GetMachineOpcode(); + MOperator currMop = insn.GetMachineOpcode(); + Insn *newInsn = nullptr; + if (prevMop == MOP_winegrr || prevMop == MOP_xinegrr) { + MOperator newMop = (currMop == MOP_wcmprr) ? MOP_wcmnrr : MOP_xcmnrr; + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, ccReg, opnd1, opnd2)); + } else { + /* prevMop == MOP_winegrrs || prevMop == MOP_xinegrrs */ + MOperator newMop = (currMop == MOP_wcmprr) ? MOP_wcmnrrs : MOP_xcmnrrs; + Operand &shiftOpnd = prevInsn->GetOperand(kInsnThirdOpnd); + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, ccReg, opnd1, opnd2, shiftOpnd)); + } + CHECK_FATAL(newInsn != nullptr, "must create newInsn"); + bb.ReplaceInsn(insn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, *newInsn); + optSuccess = true; + SetCurrInsn(newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, newInsn); + } +} + +bool CsetCbzToBeqPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcbz && curMop != MOP_xcbz && curMop != MOP_wcbnz && curMop != MOP_xcbnz) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevInsn = GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wcsetrc && prevMop != MOP_xcsetrc) { + return false; + } + auto &ccReg = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (IsCCRegCrossVersion(*prevInsn, insn, ccReg)) { + return false; + } + return true; +} + +MOperator CsetCbzToBeqPattern::SelectNewMop(ConditionCode condCode, bool inverse) const +{ + switch (condCode) { + case CC_NE: + return inverse ? MOP_beq : MOP_bne; + case CC_EQ: + return inverse ? MOP_bne : MOP_beq; + case CC_MI: + return inverse ? MOP_bpl : MOP_bmi; + case CC_PL: + return inverse ? MOP_bmi : MOP_bpl; + case CC_VS: + return inverse ? MOP_bvc : MOP_bvs; + case CC_VC: + return inverse ? MOP_bvs : MOP_bvc; + case CC_HI: + return inverse ? MOP_bls : MOP_bhi; + case CC_LS: + return inverse ? MOP_bhi : MOP_bls; + case CC_GE: + return inverse ? MOP_blt : MOP_bge; + case CC_LT: + return inverse ? MOP_bge : MOP_blt; + case CC_HS: + return inverse ? MOP_blo : MOP_bhs; + case CC_LO: + return inverse ? MOP_bhs : MOP_blo; + case CC_LE: + return inverse ? MOP_bgt : MOP_ble; + case CC_GT: + return inverse ? MOP_ble : MOP_bgt; + case CC_CS: + return inverse ? MOP_bcc : MOP_bcs; + default: + return MOP_undef; + } +} + +void CsetCbzToBeqPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + MOperator curMop = insn.GetMachineOpcode(); + bool reverse = (curMop == MOP_wcbz || curMop == MOP_xcbz); + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &condOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + MOperator newMop = SelectNewMop(condOpnd.GetCode(), reverse); + DEBUG_ASSERT(newMop != MOP_undef, "unknown condition code"); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, prevInsn->GetOperand(kInsnThirdOpnd), labelOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool ExtLslToBitFieldInsertPattern::CheckCondition(Insn &insn) +{ + auto &useReg = static_cast(insn.GetOperand(kInsnSecondOpnd)); + prevInsn = GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_xsxtw64 && prevMop != MOP_xuxtw64) { + return false; + } + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + if (immOpnd.GetValue() > k32BitSize) { + return false; + } + return true; +} + +void ExtLslToBitFieldInsertPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + auto &prevSrcReg = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + cgFunc->InsertExtendSet(prevSrcReg.GetRegisterNumber()); + MOperator newMop = (prevInsn->GetMachineOpcode() == MOP_xsxtw64) ? MOP_xsbfizrri6i6 : MOP_xubfizrri6i6; + auto *aarFunc = static_cast(cgFunc); + auto &newImmOpnd1 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + ImmOperand &newImmOpnd2 = aarFunc->CreateImmOperand(k32BitSize, k6BitSize, false); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), prevSrcReg, + newImmOpnd1, newImmOpnd2); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool CselToCsetPattern::IsOpndDefByZero(const Insn &insn) const +{ + MOperator movMop = insn.GetMachineOpcode(); + switch (movMop) { + case MOP_xmovrr: + case MOP_wmovrr: { + return IsZeroRegister(insn.GetOperand(kInsnSecondOpnd)); + } + case MOP_wmovri32: + case MOP_xmovri64: { + auto &immOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + return immOpnd.GetValue() == 0; + } + default: + return false; + } +} + +bool CselToCsetPattern::IsOpndDefByOne(const Insn &insn) const +{ + MOperator movMop = insn.GetMachineOpcode(); + if ((movMop != MOP_wmovri32) && (movMop != MOP_xmovri64)) { + return false; + } + auto &immOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + return immOpnd.GetValue() == 1; +} + +bool CselToCsetPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcselrrrc && curMop != MOP_xcselrrrc) { + return false; + } + auto &useOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + prevMovInsn1 = GetDefInsn(useOpnd1); + if (prevMovInsn1 == nullptr) { + return false; + } + MOperator prevMop1 = prevMovInsn1->GetMachineOpcode(); + if (prevMop1 != MOP_wmovri32 && prevMop1 != MOP_xmovri64 && prevMop1 != MOP_wmovrr && prevMop1 != MOP_xmovrr) { + return false; + } + auto &useOpnd2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + prevMovInsn2 = GetDefInsn(useOpnd2); + if (prevMovInsn2 == nullptr) { + return false; + } + MOperator prevMop2 = prevMovInsn2->GetMachineOpcode(); + if (prevMop2 != MOP_wmovri32 && prevMop2 != MOP_xmovri64 && prevMop2 != MOP_wmovrr && prevMop2 != MOP_xmovrr) { + return false; + } + return true; +} + +void CselToCsetPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + Operand &dstOpnd = insn.GetOperand(kInsnFirstOpnd); + MOperator newMop = (dstOpnd.GetSize() == k64BitSize ? MOP_xcsetrc : MOP_wcsetrc); + Operand &condOpnd = insn.GetOperand(kInsnFourthOpnd); + Operand &rflag = insn.GetOperand(kInsnFifthOpnd); + Insn *newInsn = nullptr; + if (IsOpndDefByOne(*prevMovInsn1) && IsOpndDefByZero(*prevMovInsn2)) { + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, dstOpnd, condOpnd, rflag)); + } else if (IsOpndDefByZero(*prevMovInsn1) && IsOpndDefByOne(*prevMovInsn2)) { + auto &origCondOpnd = static_cast(condOpnd); + ConditionCode inverseCondCode = GetReverseCC(origCondOpnd.GetCode()); + if (inverseCondCode == kCcLast) { + return; + } + auto *aarFunc = static_cast(cgFunc); + CondOperand &inverseCondOpnd = aarFunc->GetCondOperand(inverseCondCode); + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, dstOpnd, inverseCondOpnd, rflag)); + } + if (newInsn == nullptr) { + return; + } + bb.ReplaceInsn(insn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, *newInsn); + optSuccess = true; + SetCurrInsn(newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevMovInsn1); + prevs.emplace_back(prevMovInsn2); + DumpAfterPattern(prevs, &insn, newInsn); + } +} + +bool AndCmpBranchesToTbzPattern::CheckAndSelectPattern(const Insn &currInsn) +{ + MOperator curMop = currInsn.GetMachineOpcode(); + MOperator prevAndMop = prevAndInsn->GetMachineOpcode(); + auto &andImmOpnd = static_cast(prevAndInsn->GetOperand(kInsnThirdOpnd)); + auto &cmpImmOpnd = static_cast(prevCmpInsn->GetOperand(kInsnThirdOpnd)); + if (cmpImmOpnd.GetValue() == 0) { + tbzImmVal = GetLogValueAtBase2(andImmOpnd.GetValue()); + if (tbzImmVal < 0) { + return false; + } + switch (curMop) { + case MOP_beq: + newMop = (prevAndMop == MOP_wandrri12) ? MOP_wtbz : MOP_xtbz; + break; + case MOP_bne: + newMop = (prevAndMop == MOP_wandrri12) ? MOP_wtbnz : MOP_xtbnz; + break; + default: + return false; + } + } else { + tbzImmVal = GetLogValueAtBase2(andImmOpnd.GetValue()); + int64 tmpVal = GetLogValueAtBase2(cmpImmOpnd.GetValue()); + if (tbzImmVal < 0 || tmpVal < 0 || tbzImmVal != tmpVal) { + return false; + } + switch (curMop) { + case MOP_beq: + newMop = (prevAndMop == MOP_wandrri12) ? MOP_wtbnz : MOP_xtbnz; + break; + case MOP_bne: + newMop = (prevAndMop == MOP_wandrri12) ? MOP_wtbz : MOP_xtbz; + break; + default: + return false; + } + } + return true; +} + +bool AndCmpBranchesToTbzPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_beq && curMop != MOP_bne) { + return false; + } + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevCmpInsn = GetDefInsn(ccReg); + if (prevCmpInsn == nullptr) { + return false; + } + MOperator prevCmpMop = prevCmpInsn->GetMachineOpcode(); + if (prevCmpMop != MOP_wcmpri && prevCmpMop != MOP_xcmpri) { + return false; + } + auto &cmpUseReg = static_cast(prevCmpInsn->GetOperand(kInsnSecondOpnd)); + prevAndInsn = GetDefInsn(cmpUseReg); + if (prevAndInsn == nullptr) { + return false; + } + MOperator prevAndMop = prevAndInsn->GetMachineOpcode(); + if (prevAndMop != MOP_wandrri12 && prevAndMop != MOP_xandrri13) { + return false; + } + CHECK_FATAL(prevAndInsn->GetOperand(kInsnFirstOpnd).GetSize() == prevCmpInsn->GetOperand(kInsnSecondOpnd).GetSize(), + "def-use reg size must be same based-on ssa"); + if (!CheckAndSelectPattern(insn)) { + return false; + } + return true; +} + +void AndCmpBranchesToTbzPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &tbzImmOpnd = aarFunc->CreateImmOperand(tbzImmVal, k8BitSize, false); + Insn &newInsn = + cgFunc->GetInsnBuilder()->BuildInsn(newMop, prevAndInsn->GetOperand(kInsnSecondOpnd), tbzImmOpnd, labelOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevAndInsn); + prevs.emplace_back(prevCmpInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool ZeroCmpBranchesToTbzPattern::CheckAndSelectPattern(const Insn &currInsn) +{ + MOperator currMop = currInsn.GetMachineOpcode(); + MOperator prevMop = prevInsn->GetMachineOpcode(); + switch (prevMop) { + case MOP_wcmpri: + case MOP_xcmpri: { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto &immOpnd = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (immOpnd.GetValue() != 0) { + return false; + } + switch (currMop) { + case MOP_bge: + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + break; + case MOP_blt: + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + break; + default: + return false; + } + break; + } + case MOP_wcmprr: + case MOP_xcmprr: { + auto ®Opnd0 = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto ®Opnd1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (!IsZeroRegister(regOpnd0) && !IsZeroRegister(regOpnd1)) { + return false; + } + switch (currMop) { + case MOP_bge: + if (IsZeroRegister(regOpnd1)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + } else { + return false; + } + break; + case MOP_ble: + if (IsZeroRegister(regOpnd0)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + } else { + return false; + } + break; + case MOP_blt: + if (IsZeroRegister(regOpnd1)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + } else { + return false; + } + break; + case MOP_bgt: + if (IsZeroRegister(regOpnd0)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + } else { + return false; + } + break; + default: + return false; + } + break; + } + // fall through + [[clang::fallthrough]]; + default: + return false; + } + return true; +} + +bool ZeroCmpBranchesToTbzPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_bge && curMop != MOP_ble && curMop != MOP_blt && curMop != MOP_bgt) { + return false; + } + CHECK_FATAL(insn.GetOperand(kInsnSecondOpnd).IsLabel(), "must be labelOpnd"); + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevInsn = GetDefInsn(ccReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wcmpri && prevMop != MOP_xcmpri && prevMop != MOP_wcmprr && prevMop != MOP_xcmprr) { + return false; + } + if (!CheckAndSelectPattern(insn)) { + return false; + } + return true; +} + +void ZeroCmpBranchesToTbzPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + CHECK_FATAL(regOpnd != nullptr, "must have regOpnd"); + auto *aarFunc = static_cast(cgFunc); + ImmOperand &bitOpnd = aarFunc->CreateImmOperand( + (regOpnd->GetSize() <= k32BitSize) ? (k32BitSize - 1) : (k64BitSize - 1), k8BitSize, false); + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + Insn &newInsn = + cgFunc->GetInsnBuilder()->BuildInsn(newMop, *static_cast(regOpnd), bitOpnd, labelOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool LsrAndToUbfxPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wandrri12 && curMop != MOP_xandrri13) { + return false; + } + int64 immValue = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + /* and_imm value must be (1 << n - 1) */ + if (immValue <= 0 || (((static_cast(immValue)) & (static_cast(immValue) + 1)) != 0)) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnSecondOpnd)); + prevInsn = GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wlsrrri5 && prevMop != MOP_xlsrrri6) { + return false; + } + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currUseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + /* check def-use reg size found by ssa */ + CHECK_FATAL(prevDstOpnd.GetSize() == currUseOpnd.GetSize(), "def-use reg size must be same"); + auto &andDstReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + VRegVersion *andDstVersion = ssaInfo->FindSSAVersion(andDstReg.GetRegisterNumber()); + DEBUG_ASSERT(andDstVersion != nullptr, "find destReg Version failed"); + for (auto useDUInfoIt : andDstVersion->GetAllUseInsns()) { + if (useDUInfoIt.second == nullptr) { + continue; + } + Insn *useInsn = (useDUInfoIt.second)->GetInsn(); + if (useInsn == nullptr) { + continue; + } + MOperator useMop = useInsn->GetMachineOpcode(); + /* combine [and & cbz --> tbz] first, to eliminate more insns becase of incompleted copy prop */ + if (useMop == MOP_wcbz || useMop == MOP_xcbz || useMop == MOP_wcbnz || useMop == MOP_xcbnz) { + return false; + } + } + return true; +} + +void LsrAndToUbfxPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + bool is64Bits = (static_cast(insn.GetOperand(kInsnFirstOpnd)).GetSize() == k64BitSize); + Operand &resOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = prevInsn->GetOperand(kInsnSecondOpnd); + int64 immVal1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + Operand &immOpnd1 = is64Bits ? aarFunc->CreateImmOperand(immVal1, kMaxImmVal6Bits, false) + : aarFunc->CreateImmOperand(immVal1, kMaxImmVal5Bits, false); + int64 tmpVal = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + int64 immVal2 = __builtin_ffsll(tmpVal + 1) - 1; + if ((immVal2 < k1BitSize) || (is64Bits && (immVal1 + immVal2) > k64BitSize) || + (!is64Bits && (immVal1 + immVal2) > k32BitSize)) { + return; + } + Operand &immOpnd2 = is64Bits ? aarFunc->CreateImmOperand(immVal2, kMaxImmVal6Bits, false) + : aarFunc->CreateImmOperand(immVal2, kMaxImmVal5Bits, false); + MOperator newMop = (is64Bits ? MOP_xubfxrri6i6 : MOP_wubfxrri5i5); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, resOpnd, srcOpnd, immOpnd1, immOpnd2); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool MvnAndToBicPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wandrrr && curMop != MOP_xandrrr) { + return false; + } + auto &useReg1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &useReg2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + prevInsn1 = GetDefInsn(useReg1); + prevInsn2 = GetDefInsn(useReg2); + MOperator mop = insn.GetMachineOpcode(); + MOperator desMop = mop == MOP_xandrrr ? MOP_xnotrr : MOP_wnotrr; + op1IsMvnDef = prevInsn1 != nullptr && prevInsn1->GetMachineOpcode() == desMop; + op2IsMvnDef = prevInsn2 != nullptr && prevInsn2->GetMachineOpcode() == desMop; + if (op1IsMvnDef || op2IsMvnDef) { + return true; + } + return false; +} + +void MvnAndToBicPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + MOperator newMop = insn.GetMachineOpcode() == MOP_xandrrr ? MOP_xbicrrr : MOP_wbicrrr; + Insn *prevInsn = op1IsMvnDef ? prevInsn1 : prevInsn2; + auto &prevOpnd1 = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto &opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &opnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &opnd2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, opnd0, op1IsMvnDef ? opnd2 : opnd1, prevOpnd1); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + bb.ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool AndCbzToTbzPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcbz && curMop != MOP_xcbz && curMop != MOP_wcbnz && curMop != MOP_xcbnz) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevInsn = ssaInfo ? GetDefInsn(useReg) : insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wandrri12 && prevMop != MOP_xandrri13) { + return false; + } + if (!ssaInfo && (&(prevInsn->GetOperand(kInsnFirstOpnd)) != &(insn.GetOperand(kInsnFirstOpnd)))) { + return false; + } + return true; +} + +void AndCbzToTbzPattern::Run(BB &bb, Insn &insn) +{ + auto *aarchFunc = static_cast(cgFunc); + if (!CheckCondition(insn)) { + return; + } + auto &andImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + int64 tbzVal = GetLogValueAtBase2(andImm.GetValue()); + if (tbzVal == -1) { + return; + } + MOperator mOp = insn.GetMachineOpcode(); + MOperator newMop = MOP_undef; + switch (mOp) { + case MOP_wcbz: + newMop = MOP_wtbz; + break; + case MOP_wcbnz: + newMop = MOP_wtbnz; + break; + case MOP_xcbz: + newMop = MOP_xtbz; + break; + case MOP_xcbnz: + newMop = MOP_xtbnz; + break; + default: + CHECK_FATAL(false, "must be cbz/cbnz"); + break; + } + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &tbzImm = aarchFunc->CreateImmOperand(tbzVal, k8BitSize, false); + Insn &newInsn = + cgFunc->GetInsnBuilder()->BuildInsn(newMop, prevInsn->GetOperand(kInsnSecondOpnd), tbzImm, labelOpnd); + bb.ReplaceInsn(insn, newInsn); + if (ssaInfo) { + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + } + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool CombineSameArithmeticPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (std::find(validMops.begin(), validMops.end(), curMop) == validMops.end()) { + return false; + } + Operand &useOpnd = insn.GetOperand(kInsnSecondOpnd); + CHECK_FATAL(useOpnd.IsRegister(), "expect regOpnd"); + prevInsn = GetDefInsn(static_cast(useOpnd)); + if (prevInsn == nullptr) { + return false; + } + if (prevInsn->GetMachineOpcode() != curMop) { + return false; + } + auto &prevDefOpnd = prevInsn->GetOperand(kInsnFirstOpnd); + CHECK_FATAL(prevDefOpnd.IsRegister(), "expect regOpnd"); + InsnSet useInsns = GetAllUseInsn(static_cast(prevDefOpnd)); + if (useInsns.size() > 1) { + return false; + } + auto *aarFunc = static_cast(cgFunc); + CHECK_FATAL(prevInsn->GetOperand(kInsnThirdOpnd).IsIntImmediate(), "expect immOpnd"); + CHECK_FATAL(insn.GetOperand(kInsnThirdOpnd).IsIntImmediate(), "expect immOpnd"); + auto &prevImmOpnd = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + auto &curImmOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + int64 prevImm = prevImmOpnd.GetValue(); + int64 curImm = curImmOpnd.GetValue(); + newImmOpnd = &aarFunc->CreateImmOperand(prevImmOpnd.GetValue() + curImmOpnd.GetValue(), curImmOpnd.GetSize(), + curImmOpnd.IsSignedValue()); + switch (curMop) { + case MOP_wlsrrri5: + case MOP_wasrrri5: + case MOP_wlslrri5: { + if ((prevImm + curImm) < k0BitSizeInt || (prevImm + curImm) >= k32BitSizeInt) { + return false; + } + break; + } + case MOP_xlsrrri6: + case MOP_xasrrri6: + case MOP_xlslrri6: { + if ((prevImm + curImm) < k0BitSizeInt || (prevImm + curImm) >= k64BitSizeInt) { + return false; + } + break; + } + case MOP_waddrri12: + case MOP_xaddrri12: + case MOP_wsubrri12: + case MOP_xsubrri12: { + if (!newImmOpnd->IsSingleInstructionMovable()) { + return false; + } + break; + } + default: + return false; + } + return true; +} + +void CombineSameArithmeticPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(insn.GetMachineOpcode(), insn.GetOperand(kInsnFirstOpnd), + prevInsn->GetOperand(kInsnSecondOpnd), *newImmOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + (void)prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool LogicShiftAndOrrToExtrPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wiorrrr && curMop != MOP_xiorrrr && curMop != MOP_wiorrrrs && curMop != MOP_xiorrrrs) { + return false; + } + Operand &curDstOpnd = insn.GetOperand(kInsnFirstOpnd); + is64Bits = (curDstOpnd.GetSize() == k64BitSize); + if (curMop == MOP_wiorrrr || curMop == MOP_xiorrrr) { + auto &useReg1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + Insn *prevInsn1 = GetDefInsn(useReg1); + auto &useReg2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + Insn *prevInsn2 = GetDefInsn(useReg2); + if (prevInsn1 == nullptr || prevInsn2 == nullptr) { + return false; + } + MOperator prevMop1 = prevInsn1->GetMachineOpcode(); + MOperator prevMop2 = prevInsn2->GetMachineOpcode(); + if ((prevMop1 == MOP_wlsrrri5 || prevMop1 == MOP_xlsrrri6) && + (prevMop2 == MOP_wlslrri5 || prevMop2 == MOP_xlslrri6)) { + prevLsrInsn = prevInsn1; + prevLslInsn = prevInsn2; + } else if ((prevMop2 == MOP_wlsrrri5 || prevMop2 == MOP_xlsrrri6) && + (prevMop1 == MOP_wlslrri5 || prevMop1 == MOP_xlslrri6)) { + prevLsrInsn = prevInsn2; + prevLslInsn = prevInsn1; + } else { + return false; + } + int64 prevLsrImmValue = static_cast(prevLsrInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + int64 prevLslImmValue = static_cast(prevLslInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + if ((prevLsrImmValue + prevLslImmValue) < 0) { + return false; + } + if ((is64Bits && (prevLsrImmValue + prevLslImmValue) != k64BitSize) || + (!is64Bits && (prevLsrImmValue + prevLslImmValue) != k32BitSize)) { + return false; + } + shiftValue = prevLsrImmValue; + } else if (curMop == MOP_wiorrrrs || curMop == MOP_xiorrrrs) { + auto &useReg = static_cast(insn.GetOperand(kInsnSecondOpnd)); + Insn *prevInsn = GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wlsrrri5 && prevMop != MOP_xlsrrri6 && prevMop != MOP_wlslrri5 && prevMop != MOP_xlslrri6) { + return false; + } + int64 prevImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + auto &shiftOpnd = static_cast(insn.GetOperand(kInsnFourthOpnd)); + uint32 shiftAmount = shiftOpnd.GetShiftAmount(); + if (shiftOpnd.GetShiftOp() == BitShiftOperand::kLSL && (prevMop == MOP_wlsrrri5 || prevMop == MOP_xlsrrri6)) { + prevLsrInsn = prevInsn; + shiftValue = prevImm; + } else if (shiftOpnd.GetShiftOp() == BitShiftOperand::kLSR && + (prevMop == MOP_wlslrri5 || prevMop == MOP_xlslrri6)) { + prevLslInsn = prevInsn; + shiftValue = shiftAmount; + } else { + return false; + } + if (prevImm + static_cast(shiftAmount) < 0) { + return false; + } + if ((is64Bits && (prevImm + static_cast(shiftAmount)) != k64BitSize) || + (!is64Bits && (prevImm + static_cast(shiftAmount)) != k32BitSize)) { + return false; + } + } else { + CHECK_FATAL(false, "must be above mop"); + return false; + } + return true; +} + +void LogicShiftAndOrrToExtrPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + Operand &opnd1 = + (prevLslInsn == nullptr ? insn.GetOperand(kInsnThirdOpnd) : prevLslInsn->GetOperand(kInsnSecondOpnd)); + Operand &opnd2 = + (prevLsrInsn == nullptr ? insn.GetOperand(kInsnThirdOpnd) : prevLsrInsn->GetOperand(kInsnSecondOpnd)); + ImmOperand &immOpnd = is64Bits ? aarFunc->CreateImmOperand(shiftValue, kMaxImmVal6Bits, false) + : aarFunc->CreateImmOperand(shiftValue, kMaxImmVal5Bits, false); + MOperator newMop = is64Bits ? MOP_xextrrrri6 : MOP_wextrrrri5; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), opnd1, opnd2, immOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevLsrInsn); + prevs.emplace_back(prevLslInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +void SimplifyMulArithmeticPattern::SetArithType(const Insn &currInsn) +{ + MOperator mOp = currInsn.GetMachineOpcode(); + switch (mOp) { + case MOP_waddrrr: + case MOP_xaddrrr: { + arithType = kAdd; + isFloat = false; + break; + } + case MOP_dadd: + case MOP_sadd: { + arithType = kFAdd; + isFloat = true; + break; + } + case MOP_wsubrrr: + case MOP_xsubrrr: { + arithType = kSub; + isFloat = false; + validOpndIdx = kInsnThirdOpnd; + break; + } + case MOP_dsub: + case MOP_ssub: { + arithType = kFSub; + isFloat = true; + validOpndIdx = kInsnThirdOpnd; + break; + } + case MOP_xinegrr: + case MOP_winegrr: { + arithType = kNeg; + isFloat = false; + validOpndIdx = kInsnSecondOpnd; + break; + } + case MOP_wfnegrr: + case MOP_xfnegrr: { + arithType = kFNeg; + isFloat = true; + validOpndIdx = kInsnSecondOpnd; + break; + } + default: { + CHECK_FATAL(false, "must be above mop"); + break; + } + } +} + +bool SimplifyMulArithmeticPattern::CheckCondition(Insn &insn) +{ + if (arithType == kUndef || validOpndIdx < 0) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(static_cast(validOpndIdx))); + prevInsn = GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + regno_t useRegNO = useReg.GetRegisterNumber(); + VRegVersion *useVersion = ssaInfo->FindSSAVersion(useRegNO); + if (useVersion->GetAllUseInsns().size() > 1) { + return false; + } + MOperator currMop = insn.GetMachineOpcode(); + if (currMop == MOP_dadd || currMop == MOP_sadd || currMop == MOP_dsub || currMop == MOP_ssub || + currMop == MOP_wfnegrr || currMop == MOP_xfnegrr) { + isFloat = true; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wmulrrr && prevMop != MOP_xmulrrr && prevMop != MOP_xvmuld && prevMop != MOP_xvmuls) { + return false; + } + if (isFloat && (prevMop == MOP_wmulrrr || prevMop == MOP_xmulrrr)) { + return false; + } + if (!isFloat && (prevMop == MOP_xvmuld || prevMop == MOP_xvmuls)) { + return false; + } + if ((currMop == MOP_xaddrrr) || (currMop == MOP_waddrrr)) { + return true; + } + return CGOptions::IsFastMath(); +} + +void SimplifyMulArithmeticPattern::DoOptimize(BB &currBB, Insn &currInsn) +{ + Operand &resOpnd = currInsn.GetOperand(kInsnFirstOpnd); + Operand &opndMulOpnd1 = prevInsn->GetOperand(kInsnSecondOpnd); + Operand &opndMulOpnd2 = prevInsn->GetOperand(kInsnThirdOpnd); + bool is64Bits = (static_cast(resOpnd).GetSize() == k64BitSize); + /* may overflow */ + if ((prevInsn->GetOperand(kInsnFirstOpnd).GetSize() == k32BitSize) && is64Bits) { + return; + } + MOperator newMop = is64Bits ? curMop2NewMopTable[arithType][1] : curMop2NewMopTable[arithType][0]; + Insn *newInsn = nullptr; + if (arithType == kNeg || arithType == kFNeg) { + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, resOpnd, opndMulOpnd1, opndMulOpnd2)); + } else { + Operand &opnd3 = (validOpndIdx == kInsnSecondOpnd) ? currInsn.GetOperand(kInsnThirdOpnd) + : currInsn.GetOperand(kInsnSecondOpnd); + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, resOpnd, opndMulOpnd1, opndMulOpnd2, opnd3)); + } + CHECK_FATAL(newInsn != nullptr, "must create newInsn"); + currBB.ReplaceInsn(currInsn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(currInsn, *newInsn); + optSuccess = true; + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &currInsn, newInsn); + } +} + +void SimplifyMulArithmeticPattern::Run(BB &bb, Insn &insn) +{ + SetArithType(insn); + if (arithType == kAdd || arithType == kFAdd) { + validOpndIdx = kInsnSecondOpnd; + if (CheckCondition(insn)) { + DoOptimize(bb, insn); + return; + } else { + validOpndIdx = kInsnThirdOpnd; + } + } + if (!CheckCondition(insn)) { + return; + } + DoOptimize(bb, insn); +} + +void ElimSpecificExtensionPattern::SetSpecificExtType(const Insn &currInsn) +{ + MOperator mOp = currInsn.GetMachineOpcode(); + switch (mOp) { + case MOP_xsxtb32: { + is64Bits = false; + extTypeIdx = SXTB; + break; + } + case MOP_xsxtb64: { + is64Bits = true; + extTypeIdx = SXTB; + break; + } + case MOP_xsxth32: { + is64Bits = false; + extTypeIdx = SXTH; + break; + } + case MOP_xsxth64: { + is64Bits = true; + extTypeIdx = SXTH; + break; + } + case MOP_xsxtw64: { + is64Bits = true; + extTypeIdx = SXTW; + break; + } + case MOP_xuxtb32: { + is64Bits = false; + extTypeIdx = UXTB; + break; + } + case MOP_xuxth32: { + is64Bits = false; + extTypeIdx = UXTH; + break; + } + case MOP_xuxtw64: { + is64Bits = true; + extTypeIdx = UXTW; + break; + } + default: { + extTypeIdx = EXTUNDEF; + } + } +} + +void ElimSpecificExtensionPattern::SetOptSceneType() +{ + if (prevInsn->IsCall()) { + sceneType = kSceneMov; + return; + } + MOperator preMop = prevInsn->GetMachineOpcode(); + switch (preMop) { + case MOP_wldr: + case MOP_wldrb: + case MOP_wldrsb: + case MOP_wldrh: + case MOP_wldrsh: + case MOP_xldrsw: { + sceneType = kSceneLoad; + break; + } + case MOP_wmovri32: + case MOP_xmovri64: { + sceneType = kSceneMov; + break; + } + case MOP_xsxtb32: + case MOP_xsxtb64: + case MOP_xsxth32: + case MOP_xsxth64: + case MOP_xsxtw64: + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_xuxtw64: { + sceneType = kSceneSameExt; + break; + } + default: { + sceneType = kSceneUndef; + } + } +} + +void ElimSpecificExtensionPattern::ReplaceExtWithMov(Insn &currInsn) +{ + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currDstOpnd = static_cast(currInsn.GetOperand(kInsnFirstOpnd)); + MOperator newMop = is64Bits ? MOP_xmovrr : MOP_wmovrr; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, currDstOpnd, prevDstOpnd); + currBB->ReplaceInsn(currInsn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(currInsn, newInsn); + optSuccess = true; + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &currInsn, &newInsn); + } +} + +void ElimSpecificExtensionPattern::ElimExtensionAfterMov(Insn &insn) +{ + if (&insn == currBB->GetFirstInsn()) { + return; + } + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currDstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &currSrcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (prevDstOpnd.GetSize() != currDstOpnd.GetSize()) { + return; + } + MOperator currMop = insn.GetMachineOpcode(); + /* example 2) [mov w0, R0] is return value of call and return size is not of range */ + if (prevInsn->IsCall() && (currSrcOpnd.GetRegisterNumber() == R0 || currSrcOpnd.GetRegisterNumber() == V0) && + currDstOpnd.GetRegisterNumber() == currSrcOpnd.GetRegisterNumber()) { + uint32 retSize = prevInsn->GetRetSize(); + if (retSize > 0 && + ((currMop == MOP_xuxtb32 && retSize <= k1ByteSize) || (currMop == MOP_xuxth32 && retSize <= k2ByteSize) || + (currMop == MOP_xuxtw64 && retSize <= k4ByteSize))) { + ReplaceExtWithMov(insn); + } + return; + } + if (prevInsn->IsCall() && prevInsn->GetIsCallReturnSigned()) { + return; + } + auto &immMovOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + int64 value = immMovOpnd.GetValue(); + uint64 minRange = extValueRangeTable[extTypeIdx][0]; + uint64 maxRange = extValueRangeTable[extTypeIdx][1]; + if (currMop == MOP_xsxtb32 || currMop == MOP_xsxth32) { + /* value should be in valid range */ + if (static_cast(value) >= minRange && static_cast(value) <= maxRange && + immMovOpnd.IsSingleInstructionMovable(currDstOpnd.GetSize())) { + ReplaceExtWithMov(insn); + } + } else if (currMop == MOP_xuxtb32 || currMop == MOP_xuxth32) { + if (!(static_cast(value) & minRange)) { + ReplaceExtWithMov(insn); + } + } else if (currMop == MOP_xuxtw64) { + ReplaceExtWithMov(insn); + } else { + /* MOP_xsxtb64 & MOP_xsxth64 & MOP_xsxtw64 */ + if (!(static_cast(value) & minRange) && immMovOpnd.IsSingleInstructionMovable(currDstOpnd.GetSize())) { + ReplaceExtWithMov(insn); + } + } +} + +bool ElimSpecificExtensionPattern::IsValidLoadExtPattern(Insn &currInsn, MOperator oldMop, MOperator newMop) const +{ + if (oldMop == newMop) { + return true; + } + auto *aarFunc = static_cast(cgFunc); + auto *memOpnd = static_cast(prevInsn->GetMemOpnd()); + DEBUG_ASSERT(!prevInsn->IsStorePair(), "do not do ElimSpecificExtensionPattern for str pair"); + DEBUG_ASSERT(!prevInsn->IsLoadPair(), "do not do ElimSpecificExtensionPattern for ldr pair"); + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi && + !aarFunc->IsOperandImmValid(newMop, memOpnd, kInsnSecondOpnd)) { + return false; + } + uint32 shiftAmount = memOpnd->ShiftAmount(); + if (shiftAmount == 0) { + return true; + } + const InsnDesc *md = &AArch64CG::kMd[newMop]; + uint32 memSize = md->GetOperandSize() / k8BitSize; + uint32 validShiftAmount = + ((memSize == k8BitSize) + ? k3BitSize + : ((memSize == k4BitSize) ? k2BitSize : ((memSize == k2BitSize) ? k1BitSize : k0BitSize))); + if (shiftAmount != validShiftAmount) { + return false; + } + return true; +} + +MOperator ElimSpecificExtensionPattern::SelectNewLoadMopByBitSize(MOperator lowBitMop) const +{ + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + switch (lowBitMop) { + case MOP_wldrsb: { + prevDstOpnd.SetSize(k64BitSize); + return MOP_xldrsb; + } + case MOP_wldrsh: { + prevDstOpnd.SetSize(k64BitSize); + return MOP_xldrsh; + } + default: + break; + } + return lowBitMop; +} + +void ElimSpecificExtensionPattern::ElimExtensionAfterLoad(Insn &insn) +{ + if (extTypeIdx == EXTUNDEF) { + return; + } + MOperator prevOrigMop = prevInsn->GetMachineOpcode(); + for (uint8 i = 0; i < kPrevLoadPatternNum; i++) { + DEBUG_ASSERT(extTypeIdx < SpecificExtTypeSize, "extTypeIdx must be lower than SpecificExtTypeSize"); + if (prevOrigMop != loadMappingTable[extTypeIdx][i][0]) { + continue; + } + MOperator prevNewMop = loadMappingTable[extTypeIdx][i][1]; + if (!IsValidLoadExtPattern(insn, prevOrigMop, prevNewMop)) { + return; + } + if (is64Bits && extTypeIdx >= SXTB && extTypeIdx <= SXTW) { + prevNewMop = SelectNewLoadMopByBitSize(prevNewMop); + } + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currDstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + /* to avoid {mov [64], [32]} in the case of big endian */ + if (prevDstOpnd.GetSize() != currDstOpnd.GetSize()) { + return; + } + + auto *newMemOp = GetOrCreateMemOperandForNewMOP(*cgFunc, *prevInsn, prevNewMop); + + if (newMemOp == nullptr) { + return; + } + + auto *aarCGSSAInfo = static_cast(ssaInfo); + if (CG_PEEP_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In " << GetPatternName() << " : <<<<<<<\n"; + if (prevOrigMop != prevNewMop) { + LogInfo::MapleLogger() << "======= OrigPrevInsn : \n"; + prevInsn->Dump(); + aarCGSSAInfo->DumpInsnInSSAForm(*prevInsn); + } + } + + prevInsn->SetMemOpnd(newMemOp); + prevInsn->SetMOP(AArch64CG::kMd[prevNewMop]); + + if ((prevOrigMop != prevNewMop) && CG_PEEP_DUMP) { + LogInfo::MapleLogger() << "======= NewPrevInsn : \n"; + prevInsn->Dump(); + aarCGSSAInfo->DumpInsnInSSAForm(*prevInsn); + } + + MOperator movMop = is64Bits ? MOP_xmovrr : MOP_wmovrr; + Insn &newMovInsn = cgFunc->GetInsnBuilder()->BuildInsn(movMop, insn.GetOperand(kInsnFirstOpnd), + prevInsn->GetOperand(kInsnFirstOpnd)); + currBB->ReplaceInsn(insn, newMovInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newMovInsn); + optSuccess = true; + /* dump pattern info */ + if (CG_PEEP_DUMP) { + LogInfo::MapleLogger() << "======= ReplacedInsn :\n"; + insn.Dump(); + aarCGSSAInfo->DumpInsnInSSAForm(insn); + LogInfo::MapleLogger() << "======= NewInsn :\n"; + newMovInsn.Dump(); + aarCGSSAInfo->DumpInsnInSSAForm(newMovInsn); + } + } +} + +void ElimSpecificExtensionPattern::ElimExtensionAfterSameExt(Insn &insn) +{ + if (extTypeIdx == EXTUNDEF) { + return; + } + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currDstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (prevDstOpnd.GetSize() != currDstOpnd.GetSize()) { + return; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + MOperator currMop = insn.GetMachineOpcode(); + for (uint8 i = 0; i < kSameExtPatternNum; i++) { + DEBUG_ASSERT(extTypeIdx < SpecificExtTypeSize, "extTypeIdx must be lower than SpecificExtTypeSize"); + if (sameExtMappingTable[extTypeIdx][i][0] == MOP_undef || sameExtMappingTable[extTypeIdx][i][1] == MOP_undef) { + continue; + } + if (prevMop == sameExtMappingTable[extTypeIdx][i][0] && currMop == sameExtMappingTable[extTypeIdx][i][1]) { + ReplaceExtWithMov(insn); + } + } +} + +bool ElimSpecificExtensionPattern::CheckCondition(Insn &insn) +{ + auto &useReg = static_cast(insn.GetOperand(kInsnSecondOpnd)); + prevInsn = GetDefInsn(useReg); + InsnSet useInsns = GetAllUseInsn(useReg); + if ((prevInsn == nullptr) || (useInsns.size() != 1)) { + return false; + } + SetOptSceneType(); + SetSpecificExtType(insn); + if (sceneType == kSceneUndef) { + return false; + } + return true; +} + +void ElimSpecificExtensionPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + if (sceneType == kSceneMov) { + ElimExtensionAfterMov(insn); + } else if (sceneType == kSceneLoad) { + ElimExtensionAfterLoad(insn); + } else if (sceneType == kSceneSameExt) { + ElimExtensionAfterSameExt(insn); + } +} + +void OneHoleBranchPattern::FindNewMop(const BB &bb, const Insn &insn) +{ + if (&insn != bb.GetLastInsn()) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_wcbz: + newOp = MOP_wtbnz; + break; + case MOP_wcbnz: + newOp = MOP_wtbz; + break; + case MOP_xcbz: + newOp = MOP_xtbnz; + break; + case MOP_xcbnz: + newOp = MOP_xtbz; + break; + default: + break; + } +} + +/* + * pattern1: + * uxtb w0, w1 <-----(ValidBitsNum <= 8) + * cbz w0, .label + * ===> + * cbz w1, .label + * + * pattern2: + * uxtb w2, w1 <-----(ValidBitsNum == 1) + * eor w3, w2, #1 + * cbz w3, .label + * ===> + * tbnz w1, #0, .label + */ +void OneHoleBranchPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + LabelOperand &label = static_cast(insn.GetOperand(kInsnSecondOpnd)); + bool pattern1 = (prevInsn->GetMachineOpcode() == MOP_xuxtb32) && + (static_cast(prevInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() <= k8BitSize || + static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetValidBitsNum() <= k8BitSize); + if (pattern1) { + Insn &newCbzInsn = + cgFunc->GetInsnBuilder()->BuildInsn(insn.GetMachineOpcode(), prevInsn->GetOperand(kInsnSecondOpnd), label); + bb.ReplaceInsn(insn, newCbzInsn); + ssaInfo->ReplaceInsn(insn, newCbzInsn); + optSuccess = true; + SetCurrInsn(&newCbzInsn); + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &newCbzInsn, nullptr); + } + return; + } + bool pattern2 = (prevInsn->GetMachineOpcode() == MOP_xeorrri13 || prevInsn->GetMachineOpcode() == MOP_weorrri12) && + (static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue() == 1); + if (pattern2) { + if (!CheckPrePrevInsn()) { + return; + } + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + ImmOperand &oneHoleOpnd = aarch64CGFunc->CreateImmOperand(0, k8BitSize, false); + auto ®Operand = static_cast(prePrevInsn->GetOperand(kInsnSecondOpnd)); + Insn &newTbzInsn = cgFunc->GetInsnBuilder()->BuildInsn(newOp, regOperand, oneHoleOpnd, label); + bb.ReplaceInsn(insn, newTbzInsn); + ssaInfo->ReplaceInsn(insn, newTbzInsn); + optSuccess = true; + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + prevs.emplace_back(prePrevInsn); + DumpAfterPattern(prevs, &newTbzInsn, nullptr); + } + } +} + +bool OneHoleBranchPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcbz && curMop != MOP_xcbz && curMop != MOP_wcbnz && curMop != MOP_xcbnz) { + return false; + } + FindNewMop(*insn.GetBB(), insn); + if (newOp == MOP_undef) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevInsn = GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + if (&(prevInsn->GetOperand(kInsnFirstOpnd)) != &(insn.GetOperand(kInsnFirstOpnd))) { + return false; + } + return true; +} + +bool OneHoleBranchPattern::CheckPrePrevInsn() +{ + auto &useReg = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + prePrevInsn = GetDefInsn(useReg); + if (prePrevInsn == nullptr) { + return false; + } + if (prePrevInsn->GetMachineOpcode() != MOP_xuxtb32 || + static_cast(prePrevInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() != 1) { + return false; + } + if (&(prePrevInsn->GetOperand(kInsnFirstOpnd)) != &(prevInsn->GetOperand(kInsnSecondOpnd))) { + return false; + } + return true; +} + +void OrrToMovPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + RegOperand *reg1 = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, *reg1, *reg2); + bb.ReplaceInsn(insn, newInsn); + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, &newInsn, nullptr); + } +} + +bool OrrToMovPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wiorrri12 && curMop != MOP_xiorrri13) { + return false; + } + MOperator thisMop = insn.GetMachineOpcode(); + Operand *opndOfOrr = nullptr; + switch (thisMop) { + case MOP_wiorrri12: { /* opnd1 is reg32 and opnd3 is immediate. */ + opndOfOrr = &(insn.GetOperand(kInsnThirdOpnd)); + reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + newMop = MOP_wmovrr; + break; + } + case MOP_xiorrri13: { /* opnd1 is reg64 and opnd3 is immediate. */ + opndOfOrr = &(insn.GetOperand(kInsnThirdOpnd)); + reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + newMop = MOP_xmovrr; + break; + } + default: + return false; + } + CHECK_FATAL(opndOfOrr->IsIntImmediate(), "expects immediate operand"); + ImmOperand *immOpnd = static_cast(opndOfOrr); + if (immOpnd->GetValue() != 0) { + return false; + } + return true; +} + +void AArch64CGPeepHole::DoNormalOptimize(BB &bb, Insn &insn) +{ + MOperator thisMop = insn.GetMachineOpcode(); + manager = peepMemPool->New(*cgFunc, bb, insn); + switch (thisMop) { + /* + * e.g. + * execute before & after RA: manager->NormalPatternOpt<>(true) + * execute before RA: manager->NormalPatternOpt<>(!cgFunc->IsAfterRegAlloc()) + * execute after RA: manager->NormalPatternOpt<>(cgFunc->IsAfterRegAlloc()) + */ + case MOP_xubfxrri6i6: { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_xmovzri16: { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wcmpri: { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wmovrr: + case MOP_xmovrr: + case MOP_xvmovs: + case MOP_xvmovd: + case MOP_vmovuu: + case MOP_vmovvv: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wstrb: + case MOP_wldrb: + case MOP_wstrh: + case MOP_wldrh: + case MOP_xldr: + case MOP_xstr: + case MOP_wldr: + case MOP_wstr: + case MOP_dldr: + case MOP_dstr: + case MOP_sldr: + case MOP_sstr: + case MOP_qldr: + case MOP_qstr: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_xvmovrv: + case MOP_xvmovrd: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_xsbfxrri6i6: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wsdivrrr: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_xbl: { + if (JAVALANG) { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + } + if (CGOptions::IsGCOnly() && CGOptions::DoWriteRefFieldOpt()) { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + } + break; + } + default: + break; + } + /* skip if it is not a read barrier call. */ + if (GetReadBarrierName(insn) != "") { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + } +} +/* ======== CGPeepPattern End ======== */ + +void AArch64PeepHole::InitOpts() +{ + optimizations.resize(kPeepholeOptsNum); + optimizations[kEliminateSpecifcSXTOpt] = optOwnMemPool->New(cgFunc); + optimizations[kEliminateSpecifcUXTOpt] = optOwnMemPool->New(cgFunc); + optimizations[kCsetCbzToBeqOpt] = optOwnMemPool->New(cgFunc); + optimizations[kAndCmpBranchesToCsetOpt] = optOwnMemPool->New(cgFunc); + optimizations[kAndCmpBranchesToTstOpt] = optOwnMemPool->New(cgFunc); + optimizations[kAndCbzBranchesToTstOpt] = optOwnMemPool->New(cgFunc); + optimizations[kZeroCmpBranchesOpt] = optOwnMemPool->New(cgFunc); + optimizations[kCselZeroOneToCsetOpt] = optOwnMemPool->New(cgFunc); + optimizations[kAndCmpCsetEorCbzOpt] = optOwnMemPool->New(cgFunc); + optimizations[kAddLdrOpt] = optOwnMemPool->New(cgFunc); + optimizations[kCsetEorOpt] = optOwnMemPool->New(cgFunc); + optimizations[kMoveCmpOpt] = optOwnMemPool->New(cgFunc); +} + +void AArch64PeepHole::Run(BB &bb, Insn &insn) +{ + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_xsxtb32: + case MOP_xsxth32: + case MOP_xsxtb64: + case MOP_xsxth64: + case MOP_xsxtw64: { + (static_cast(optimizations[kEliminateSpecifcSXTOpt]))->Run(bb, insn); + break; + } + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_xuxtw64: { + (static_cast(optimizations[kEliminateSpecifcUXTOpt]))->Run(bb, insn); + break; + } + case MOP_wcbnz: + case MOP_xcbnz: { + (static_cast(optimizations[kCsetCbzToBeqOpt]))->Run(bb, insn); + break; + } + case MOP_wcbz: + case MOP_xcbz: { + (static_cast(optimizations[kCsetCbzToBeqOpt]))->Run(bb, insn); + break; + } + case MOP_xandrrr: + case MOP_wandrrr: + case MOP_wandrri12: + case MOP_xandrri13: { + (static_cast(optimizations[kAndCmpCsetEorCbzOpt]))->Run(bb, insn); + (static_cast(optimizations[kAndCmpBranchesToTstOpt]))->Run(bb, insn); + (static_cast(optimizations[kAndCbzBranchesToTstOpt]))->Run(bb, insn); + break; + } + case MOP_wcsetrc: + case MOP_xcsetrc: { + (static_cast(optimizations[kCsetEorOpt]))->Run(bb, insn); + (static_cast(optimizations[kAndCmpBranchesToCsetOpt]))->Run(bb, insn); + break; + } + case MOP_xmovri64: + case MOP_wmovri32: { + static_cast(optimizations[kMoveCmpOpt])->Run(bb, insn); + break; + } + case MOP_xaddrrr: { + (static_cast(optimizations[kAddLdrOpt]))->Run(bb, insn); + break; + } + case MOP_wcselrrrc: + case MOP_xcselrrrc: { + (static_cast(optimizations[kCselZeroOneToCsetOpt]))->Run(bb, insn); + break; + } + default: + break; + } + if (&insn == bb.GetLastInsn()) { + (static_cast(optimizations[kZeroCmpBranchesOpt]))->Run(bb, insn); + } +} + +void AArch64PeepHole0::InitOpts() +{ + optimizations.resize(kPeepholeOptsNum); + optimizations[kRemoveIdenticalLoadAndStoreOpt] = optOwnMemPool->New(cgFunc); + optimizations[kCmpCsetOpt] = optOwnMemPool->New(cgFunc); + optimizations[kComplexMemOperandOptAdd] = optOwnMemPool->New(cgFunc); + optimizations[kDeleteMovAfterCbzOrCbnzOpt] = optOwnMemPool->New(cgFunc); + optimizations[kRemoveSxtBeforeStrOpt] = optOwnMemPool->New(cgFunc); + optimizations[kRemoveMovingtoSameRegOpt] = optOwnMemPool->New(cgFunc); + optimizations[kEnhanceStrLdrAArch64Opt] = optOwnMemPool->New(cgFunc); +} + +void AArch64PeepHole0::Run(BB &bb, Insn &insn) +{ + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_wcmpri: + case MOP_xcmpri: { + (static_cast(optimizations[kCmpCsetOpt]))->Run(bb, insn); + break; + } + case MOP_xaddrrr: { + (static_cast(optimizations[kComplexMemOperandOptAdd]))->Run(bb, insn); + break; + } + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: { + (static_cast(optimizations[kDeleteMovAfterCbzOrCbnzOpt]))->Run(bb, insn); + break; + } + case MOP_wstrh: + case MOP_wstrb: { + (static_cast(optimizations[kRemoveSxtBeforeStrOpt]))->Run(bb, insn); + break; + } + case MOP_wmovrr: + case MOP_xmovrr: + case MOP_xvmovs: + case MOP_xvmovd: + case MOP_vmovuu: + case MOP_vmovvv: { + (static_cast(optimizations[kRemoveMovingtoSameRegOpt]))->Run(bb, insn); + break; + } + case MOP_xldr: + case MOP_xstr: + case MOP_wldr: + case MOP_wstr: + case MOP_dldr: + case MOP_dstr: + case MOP_sldr: + case MOP_sstr: { + if (thisMop == MOP_wstr || thisMop == MOP_xstr) { + (static_cast(optimizations[kRemoveIdenticalLoadAndStoreOpt]))->Run(bb, insn); + } + (static_cast(optimizations[kEnhanceStrLdrAArch64Opt]))->Run(bb, insn); + break; + } + default: + break; + } +} + +void AArch64PrePeepHole::InitOpts() +{ + optimizations.resize(kPeepholeOptsNum); + optimizations[kOneHoleBranchesPreOpt] = optOwnMemPool->New(cgFunc); + optimizations[kReplaceOrrToMovOpt] = optOwnMemPool->New(cgFunc); + optimizations[kReplaceCmpToCmnOpt] = optOwnMemPool->New(cgFunc); + optimizations[kComplexMemOperandOpt] = optOwnMemPool->New(cgFunc); + optimizations[kComplexMemOperandPreOptAdd] = optOwnMemPool->New(cgFunc); + optimizations[kComplexMemOperandOptLSL] = optOwnMemPool->New(cgFunc); + optimizations[kComplexMemOperandOptLabel] = optOwnMemPool->New(cgFunc); + optimizations[kDuplicateExtensionOpt] = optOwnMemPool->New(cgFunc); + optimizations[kEnhanceStrLdrAArch64Opt] = optOwnMemPool->New(cgFunc); +} + +void AArch64PrePeepHole::Run(BB &bb, Insn &insn) +{ + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_wiorrri12: + case MOP_xiorrri13: { + (static_cast(optimizations[kReplaceOrrToMovOpt]))->Run(bb, insn); + break; + } + case MOP_wmovri32: + case MOP_xmovri64: { + (static_cast(optimizations[kReplaceCmpToCmnOpt]))->Run(bb, insn); + break; + } + case MOP_xadrpl12: { + (static_cast(optimizations[kComplexMemOperandOpt]))->Run(bb, insn); + break; + } + case MOP_xaddrrr: { + (static_cast(optimizations[kComplexMemOperandPreOptAdd]))->Run(bb, insn); + break; + } + case MOP_xaddrrrs: { + (static_cast(optimizations[kComplexMemOperandOptLSL]))->Run(bb, insn); + break; + } + case MOP_xsxtb32: + case MOP_xsxth32: + case MOP_xsxtb64: + case MOP_xsxth64: + case MOP_xsxtw64: + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_xuxtw64: { + (static_cast(optimizations[kDuplicateExtensionOpt]))->Run(bb, insn); + break; + } + case MOP_xldli: { + (static_cast(optimizations[kComplexMemOperandOptLabel]))->Run(bb, insn); + break; + } + case MOP_xldr: + case MOP_xstr: + case MOP_wldr: + case MOP_wstr: + case MOP_dldr: + case MOP_dstr: + case MOP_sldr: + case MOP_sstr: { + (static_cast(optimizations[kEnhanceStrLdrAArch64Opt]))->Run(bb, insn); + break; + } + default: + break; + } + if (&insn == bb.GetLastInsn()) { + (static_cast(optimizations[kOneHoleBranchesPreOpt]))->Run(bb, insn); + } +} + +void AArch64PrePeepHole1::InitOpts() +{ + optimizations.resize(kPeepholeOptsNum); + optimizations[kOneHoleBranchesOpt] = optOwnMemPool->New(cgFunc); + optimizations[kAndCmpBranchesToTbzOpt] = optOwnMemPool->New(cgFunc); + optimizations[kComplexExtendWordLslOpt] = optOwnMemPool->New(cgFunc); +} + +void AArch64PrePeepHole1::Run(BB &bb, Insn &insn) +{ + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_xsxtw64: + case MOP_xuxtw64: { + (static_cast(optimizations[kComplexExtendWordLslOpt]))->Run(bb, insn); + break; + } + default: + break; + } + if (&insn == bb.GetLastInsn()) { + switch (thisMop) { + case MOP_wcbz: + case MOP_wcbnz: + case MOP_xcbz: + case MOP_xcbnz: { + (static_cast(optimizations[kOneHoleBranchesOpt]))->Run(bb, insn); + break; + } + case MOP_beq: + case MOP_bne: { + (static_cast(optimizations[kAndCmpBranchesToTbzOpt]))->Run(bb, insn); + break; + } + default: + break; + } + } +} + +bool RemoveIdenticalLoadAndStorePattern::CheckCondition(Insn &insn) +{ + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return false; + } + return true; +} + +void RemoveIdenticalLoadAndStorePattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + MOperator mop1 = insn.GetMachineOpcode(); + MOperator mop2 = nextInsn->GetMachineOpcode(); + if ((mop1 == MOP_wstr && mop2 == MOP_wstr) || (mop1 == MOP_xstr && mop2 == MOP_xstr)) { + if (IsMemOperandsIdentical(insn, *nextInsn)) { + bb.RemoveInsn(insn); + } + } else if ((mop1 == MOP_wstr && mop2 == MOP_wldr) || (mop1 == MOP_xstr && mop2 == MOP_xldr)) { + if (IsMemOperandsIdentical(insn, *nextInsn)) { + bb.RemoveInsn(*nextInsn); + } + } +} + +bool RemoveIdenticalLoadAndStorePattern::IsMemOperandsIdentical(const Insn &insn1, const Insn &insn2) const +{ + regno_t regNO1 = static_cast(insn1.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + regno_t regNO2 = static_cast(insn2.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + if (regNO1 != regNO2) { + return false; + } + /* Match only [base + offset] */ + auto &memOpnd1 = static_cast(insn1.GetOperand(kInsnSecondOpnd)); + if (memOpnd1.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { + return false; + } + auto &memOpnd2 = static_cast(insn2.GetOperand(kInsnSecondOpnd)); + if (memOpnd2.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { + return false; + } + Operand *base1 = memOpnd1.GetBaseRegister(); + Operand *base2 = memOpnd2.GetBaseRegister(); + if (!((base1 != nullptr) && base1->IsRegister()) || !((base2 != nullptr) && base2->IsRegister())) { + return false; + } + + regno_t baseRegNO1 = static_cast(base1)->GetRegisterNumber(); + /* First insn re-write base addr reg1 <- [ reg1 + offset ] */ + if (baseRegNO1 == regNO1) { + return false; + } + + regno_t baseRegNO2 = static_cast(base2)->GetRegisterNumber(); + if (baseRegNO1 != baseRegNO2) { + return false; + } + + return memOpnd1.GetOffsetImmediate()->GetOffsetValue() == memOpnd2.GetOffsetImmediate()->GetOffsetValue(); +} + +void RemoveIdenticalLoadAndStoreAArch64::Run(BB &bb, Insn &insn) +{ + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + MOperator mop1 = insn.GetMachineOpcode(); + MOperator mop2 = nextInsn->GetMachineOpcode(); + if ((mop1 == MOP_wstr && mop2 == MOP_wstr) || (mop1 == MOP_xstr && mop2 == MOP_xstr)) { + if (IsMemOperandsIdentical(insn, *nextInsn)) { + bb.RemoveInsn(insn); + } + } else if ((mop1 == MOP_wstr && mop2 == MOP_wldr) || (mop1 == MOP_xstr && mop2 == MOP_xldr)) { + if (IsMemOperandsIdentical(insn, *nextInsn)) { + bb.RemoveInsn(*nextInsn); + } + } +} + +bool RemoveIdenticalLoadAndStoreAArch64::IsMemOperandsIdentical(const Insn &insn1, const Insn &insn2) const +{ + regno_t regNO1 = static_cast(insn1.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + regno_t regNO2 = static_cast(insn2.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + if (regNO1 != regNO2) { + return false; + } + /* Match only [base + offset] */ + auto &memOpnd1 = static_cast(insn1.GetOperand(kInsnSecondOpnd)); + if (memOpnd1.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { + return false; + } + auto &memOpnd2 = static_cast(insn2.GetOperand(kInsnSecondOpnd)); + if (memOpnd2.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { + return false; + } + Operand *base1 = memOpnd1.GetBaseRegister(); + Operand *base2 = memOpnd2.GetBaseRegister(); + if (!((base1 != nullptr) && base1->IsRegister()) || !((base2 != nullptr) && base2->IsRegister())) { + return false; + } + + regno_t baseRegNO1 = static_cast(base1)->GetRegisterNumber(); + /* First insn re-write base addr reg1 <- [ reg1 + offset ] */ + if (baseRegNO1 == regNO1) { + return false; + } + + regno_t baseRegNO2 = static_cast(base2)->GetRegisterNumber(); + if (baseRegNO1 != baseRegNO2) { + return false; + } + + return memOpnd1.GetOffsetImmediate()->GetOffsetValue() == memOpnd2.GetOffsetImmediate()->GetOffsetValue(); +} + +bool RemoveMovingtoSameRegPattern::CheckCondition(Insn &insn) +{ + DEBUG_ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "expects registers"); + DEBUG_ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "expects registers"); + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + /* remove mov x0,x0 when it cast i32 to i64 */ + if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { + return true; + } + return false; +} + +void RemoveMovingtoSameRegPattern::Run(BB &bb, Insn &insn) +{ + /* remove mov x0,x0 when it cast i32 to i64 */ + if (CheckCondition(insn)) { + bb.RemoveInsn(insn); + } +} + +void RemoveMovingtoSameRegAArch64::Run(BB &bb, Insn &insn) +{ + DEBUG_ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "expects registers"); + DEBUG_ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "expects registers"); + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + /* remove mov x0,x0 when it cast i32 to i64 */ + if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { + bb.RemoveInsn(insn); + } +} + +bool EnhanceStrLdrAArch64::CheckOperandIsDeadFromInsn(const RegOperand ®Opnd, Insn &insn) { + for (uint32 i = 0; i < insn.GetOperandSize(); ++i) { + auto &opnd = insn.GetOperand(i); + if (!insn.GetDesc()->GetOpndDes(i)->IsRegDef()) { + continue; + } + // regOpnd is redefined at curInsn + if (static_cast(opnd).GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + return !IfOperandIsLiveAfterInsn(regOpnd, insn); +} + +ImmOperand *EnhanceStrLdrAArch64::GetInsnAddOrSubNewOffset(Insn &insn, ImmOperand &offset) +{ + int64 val = 0; + VaryType vary = offset.GetVary(); + auto mOp = insn.GetMachineOpcode(); + if (mOp == MOP_xaddrri12 || mOp == MOP_xsubrri12) { + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + val = immOpnd.GetValue(); + CHECK_FATAL(!(vary == kUnAdjustVary && immOpnd.GetVary() == kUnAdjustVary), "NIY, can not deal this case!"); + vary = immOpnd.GetVary(); + } else { + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + auto &shiftOpnd = static_cast(insn.GetOperand(kInsnFourthOpnd)); + CHECK_FATAL(shiftOpnd.GetShiftAmount() == 12, "invalid shiftAmount"); + val = (immOpnd.GetValue() << shiftOpnd.GetShiftAmount()); + } + + if (mOp == MOP_xsubrri12 || mOp == MOP_xsubrri24) { + val = -val; + } + val += offset.GetValue(); + auto &newImm = static_cast(cgFunc).GetOrCreateOfstOpnd(val, k64BitSize); + newImm.SetVary(vary); + return &newImm; +} + +void EnhanceStrLdrAArch64::OptimizeAddrBOI(Insn &insn, MemOperand &memOpnd, Insn &prevInsn) +{ + auto *oriBase = memOpnd.GetBaseRegister(); + auto *oriOffset = memOpnd.GetOffsetOperand(); + auto &defOpnd = static_cast(prevInsn.GetOperand(kInsnFirstOpnd)); + if (defOpnd.GetRegisterNumber() != oriBase->GetRegisterNumber() || !CheckOperandIsDeadFromInsn(defOpnd, insn)) { + return; + } + auto *newBase = static_cast(&prevInsn.GetOperand(kInsnSecondOpnd)); + auto *newOffset = GetInsnAddOrSubNewOffset(prevInsn, *memOpnd.GetOffsetOperand()); + if (newOffset->GetValue() < 0) { + return; // obj dump cannot deal str x19, [x29,#-16] + } + + memOpnd.SetBaseRegister(*newBase); + memOpnd.SetOffsetOperand(*newOffset); + if (!static_cast(cgFunc).IsOperandImmValid(insn.GetMachineOpcode(), &memOpnd, kInsnSecondOpnd)) { + // If new offset is invalid, undo it + memOpnd.SetBaseRegister(*oriBase); + memOpnd.SetOffsetOperand(*oriOffset); + return; + } + memOpnd.SetAddrMode(MemOperand::kAddrModeBOi); + prevInsn.GetBB()->RemoveInsn(prevInsn); +} + +void EnhanceStrLdrAArch64::OptimizeAddrBOrXShiftExtend(Insn &insn, MemOperand &memOpnd, Insn &shiftExtendInsn) +{ + auto mOp = shiftExtendInsn.GetMachineOpcode(); + if (mOp != MOP_xuxtw64 && mOp != MOP_xsxtw64 && mOp != MOP_xlslrri6) { + return; + } + auto *oriIndex = memOpnd.GetIndexRegister(); + auto &defOpnd = static_cast(shiftExtendInsn.GetOperand(kInsnFirstOpnd)); + if (defOpnd.GetRegisterNumber() != oriIndex->GetRegisterNumber() || !CheckOperandIsDeadFromInsn(defOpnd, insn)) { + return; + } + auto &newIndex = static_cast(shiftExtendInsn.GetOperand(kInsnSecondOpnd)); + bool isSigned = (mOp == MOP_xsxtw64); + uint32 shift = 0; + if (mOp == MOP_xlslrri6) { + shift = static_cast(static_cast(shiftExtendInsn.GetOperand(kInsnThirdOpnd)).GetValue()); + } + if (shift < k4BitSize) { + auto *newMemOpnd = static_cast(cgFunc).CreateMemOperand(MemOperand::kAddrModeBOrX, + memOpnd.GetSize(), *memOpnd.GetBaseRegister(), newIndex, shift, isSigned); + insn.SetOperand(kInsnSecondOpnd, *newMemOpnd); + shiftExtendInsn.GetBB()->RemoveInsn(shiftExtendInsn); + } +} + +void EnhanceStrLdrAArch64::OptimizeAddrBOrX(Insn &insn, MemOperand &memOpnd, Insn &prevInsn) +{ + if (memOpnd.GetOffsetOperand()->GetValue() != 0 || memOpnd.GetOffsetOperand()->GetVary() == kUnAdjustVary) { + return; + } + auto *oriBase = memOpnd.GetBaseRegister(); + auto &defOpnd = static_cast(prevInsn.GetOperand(kInsnFirstOpnd)); + if (defOpnd.GetRegisterNumber() != oriBase->GetRegisterNumber() || !CheckOperandIsDeadFromInsn(defOpnd, insn)) { + return; + } + auto *newBase = static_cast(&prevInsn.GetOperand(kInsnSecondOpnd)); + auto *newIndex = static_cast(&prevInsn.GetOperand(kInsnThirdOpnd)); + + memOpnd.SetBaseRegister(*newBase); + memOpnd.SetIndexRegister(*newIndex); + memOpnd.SetAddrMode(MemOperand::kAddrModeBOrX); + auto *prevShiftExtendInsn = prevInsn.GetPreviousMachineInsn(); + if (prevShiftExtendInsn != nullptr) { + OptimizeAddrBOrXShiftExtend(insn, memOpnd, *prevShiftExtendInsn); + } + prevInsn.GetBB()->RemoveInsn(prevInsn); +} + +void EnhanceStrLdrAArch64::OptimizeWithAddrrrs(Insn &insn, MemOperand &memOpnd, Insn &addInsn) +{ + if (memOpnd.GetOffsetOperand()->GetValue() != 0 || memOpnd.GetOffsetOperand()->GetVary() != kNotVary) { + return; + } + auto *oriBase = memOpnd.GetBaseRegister(); + auto &defOpnd = static_cast(addInsn.GetOperand(kInsnFirstOpnd)); + if (defOpnd.GetRegisterNumber() != oriBase->GetRegisterNumber() || !CheckOperandIsDeadFromInsn(defOpnd, insn)) { + return; + } + auto &newBase = static_cast(addInsn.GetOperand(kInsnSecondOpnd)); + auto &newIndex = static_cast(addInsn.GetOperand(kInsnThirdOpnd)); + auto &shift = static_cast(addInsn.GetOperand(kInsnFourthOpnd)); + if (shift.GetShiftOp() != BitShiftOperand::kLSL) { + return; + } + auto *newMemOpnd = static_cast(cgFunc).CreateMemOperand(MemOperand::kAddrModeBOrX, + memOpnd.GetSize(), newBase, newIndex, shift.GetShiftAmount()); + insn.SetOperand(kInsnSecondOpnd, *newMemOpnd); + addInsn.GetBB()->RemoveInsn(addInsn); +} + +void EnhanceStrLdrAArch64::Run(BB &bb, Insn &insn) +{ + Operand &opnd = insn.GetOperand(kInsnSecondOpnd); + CHECK_FATAL(opnd.IsMemoryAccessOperand(), "Unexpected operand in EnhanceStrLdrAArch64"); + auto &memOpnd = static_cast(opnd); + if (memOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd.GetOffsetImmediate()->IsImmOffset()) { + return; + } + + auto *prev = insn.GetPreviousMachineInsn(); + while (prev != nullptr) { + if (prev->GetMachineOpcode() == MOP_xmovrr) { + auto &defOpnd = static_cast(prev->GetOperand(kInsnFirstOpnd)); + if (defOpnd.GetRegisterNumber() != memOpnd.GetBaseRegister()->GetRegisterNumber() || + !CheckOperandIsDeadFromInsn(defOpnd, insn)) { + return; + } + memOpnd.SetBaseRegister(static_cast(prev->GetOperand(kInsnSecondOpnd))); + auto *tmpInsn = prev; + prev = prev->GetPreviousMachineInsn(); + tmpInsn->GetBB()->RemoveInsn(*tmpInsn); + continue; + } + break; + } + if (prev == nullptr) { + return; + } + auto prevMop = prev->GetMachineOpcode(); + if (prevMop == MOP_xaddrri12 || prevMop == MOP_xsubrri12 || prevMop == MOP_xaddrri24 || prevMop == MOP_xsubrri24) { + OptimizeAddrBOI(insn, memOpnd, *prev); + } else if (prevMop == MOP_xaddrrr) { + OptimizeAddrBOrX(insn, memOpnd, *prev); + } else if (prevMop == MOP_xaddrrrs) { + OptimizeWithAddrrrs(insn, memOpnd, *prev); + } +} + +bool IsSameRegisterOperation(const RegOperand &desMovOpnd, const RegOperand &uxtDestOpnd, const RegOperand &uxtFromOpnd) +{ + return ((desMovOpnd.GetRegisterNumber() == uxtDestOpnd.GetRegisterNumber()) && + (uxtDestOpnd.GetRegisterNumber() == uxtFromOpnd.GetRegisterNumber())); +} + +bool CombineContiLoadAndStorePattern::IsRegNotSameMemUseInInsn(const Insn &insn, regno_t regNO, bool isStore, + int64 baseOfst) const +{ + uint32 opndNum = insn.GetOperandSize(); + bool sameMemAccess = false; /* both store or load */ + if (insn.IsStore() == isStore) { + sameMemAccess = true; + } + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto listElem : listOpnd.GetOperands()) { + RegOperand *regOpnd = static_cast(listElem); + DEBUG_ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + return true; + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOperand = static_cast(opnd); + RegOperand *base = memOperand.GetBaseRegister(); + /* need check offset as well */ + regno_t stackBaseRegNO = cgFunc->UseFP() ? R29 : RSP; + if (!sameMemAccess && base != nullptr) { + regno_t curBaseRegNO = base->GetRegisterNumber(); + int64 memBarrierRange = static_cast(insn.IsLoadStorePair() ? k16BitSize : k8BitSize); + if (!(curBaseRegNO == regNO && memOperand.GetAddrMode() == MemOperand::kAddrModeBOi && + memOperand.GetOffsetImmediate() != nullptr && + (memOperand.GetOffsetImmediate()->GetOffsetValue() <= (baseOfst - memBarrierRange) || + memOperand.GetOffsetImmediate()->GetOffsetValue() >= (baseOfst + memBarrierRange)))) { + return true; + } + } + /* do not trust the following situation : + * str x1, [x9] + * str x6, [x2] + * str x3, [x9, #8] + */ + if (isStore && regNO != stackBaseRegNO && base != nullptr && base->GetRegisterNumber() != stackBaseRegNO && + base->GetRegisterNumber() != regNO) { + return true; + } + if (isStore && base != nullptr && base->GetRegisterNumber() == regNO) { + if (memOperand.GetAddrMode() == MemOperand::kAddrModeBOi && + memOperand.GetOffsetImmediate() != nullptr) { + int64 curOffset = memOperand.GetOffsetImmediate()->GetOffsetValue(); + if (memOperand.GetSize() == k64BitSize) { + uint32 memBarrierRange = insn.IsLoadStorePair() ? k16BitSize : k8BitSize; + if (curOffset < baseOfst + memBarrierRange && curOffset > baseOfst - memBarrierRange) { + return true; + } + } else if (memOperand.GetSize() == k32BitSize) { + uint32 memBarrierRange = insn.IsLoadStorePair() ? k8BitSize : k4BitSize; + if (curOffset < baseOfst + memBarrierRange && curOffset > baseOfst - memBarrierRange) { + return true; + } + } + } + } + } else if (opnd.IsConditionCode()) { + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + if (rflagReg.GetRegisterNumber() == regNO) { + return true; + } + } else if (opnd.IsRegister()) { + if (!isStore && static_cast(opnd).GetRegisterNumber() == regNO) { + return true; + } + } + } + return false; +} + +std::vector CombineContiLoadAndStorePattern::FindPrevStrLdr(Insn &insn, regno_t destRegNO, regno_t memBaseRegNO, + int64 baseOfst) +{ + std::vector prevContiInsns; + bool isStr = insn.IsStore(); + for (Insn *curInsn = insn.GetPrev(); curInsn != nullptr; curInsn = curInsn->GetPrev()) { + if (!curInsn->IsMachineInstruction()) { + continue; + } + if (curInsn->IsRegDefined(memBaseRegNO)) { + return prevContiInsns; + } + if (IsRegNotSameMemUseInInsn(*curInsn, memBaseRegNO, insn.IsStore(), static_cast(baseOfst))) { + return prevContiInsns; + } + /* return continuous STD/LDR insn */ + if (((isStr && curInsn->IsStore()) || (!isStr && curInsn->IsLoad())) && !curInsn->IsLoadStorePair()) { + auto *memOperand = static_cast(curInsn->GetMemOpnd()); + /* do not combine ldr r0, label */ + if (memOperand != nullptr) { + auto *BaseRegOpnd = static_cast(memOperand->GetBaseRegister()); + DEBUG_ASSERT(BaseRegOpnd == nullptr || !BaseRegOpnd->IsVirtualRegister(), + "physical register has not been allocated?"); + if (memOperand->GetAddrMode() == MemOperand::kAddrModeBOi && + BaseRegOpnd->GetRegisterNumber() == memBaseRegNO) { + prevContiInsns.emplace_back(curInsn); + } + } + } + /* check insn that changes the data flow */ + regno_t stackBaseRegNO = cgFunc->UseFP() ? R29 : RSP; + /* ldr x8, [x21, #8] + * call foo() + * ldr x9, [x21, #16] + * although x21 is a calleeSave register, there is no guarantee data in memory [x21] is not changed + */ + if (curInsn->IsCall() && + (!AArch64Abi::IsCalleeSavedReg(static_cast(destRegNO)) || memBaseRegNO != stackBaseRegNO)) { + return prevContiInsns; + } + /* store opt should not cross call due to stack args */ + if (curInsn->IsCall() && isStr) { + return prevContiInsns; + } + if (curInsn->GetMachineOpcode() == MOP_asm) { + return prevContiInsns; + } + if (curInsn->ScanReg(destRegNO)) { + return prevContiInsns; + } + } + return prevContiInsns; +} + +Insn *CombineContiLoadAndStorePattern::FindValidSplitAddInsn(Insn &curInsn, RegOperand &baseOpnd) const +{ + Insn *splitAdd = nullptr; + for (Insn *cursor = curInsn.GetPrev(); cursor != nullptr; cursor = cursor->GetPrev()) { + if (!cursor->IsMachineInstruction()) { + continue; + } + if (cursor->IsCall()) { + break; + } + if (cursor->IsRegDefined(baseOpnd.GetRegisterNumber())) { + break; + } + MOperator mOp = cursor->GetMachineOpcode(); + if (mOp != MOP_xaddrri12 && mOp != MOP_waddrri12) { + continue; + } + auto &destOpnd = static_cast(cursor->GetOperand(kInsnFirstOpnd)); + if (destOpnd.GetRegisterNumber() != R16 || destOpnd.GetSize() != baseOpnd.GetSize()) { + continue; + } + auto &useOpnd = static_cast(cursor->GetOperand(kInsnSecondOpnd)); + /* + * split add as following: + * add R16, R0, #2, LSL #12 + * add R16, R16, #1536 + */ + if (useOpnd.GetRegisterNumber() != baseOpnd.GetRegisterNumber()) { + if (useOpnd.GetRegisterNumber() == R16) { + Insn *defInsn = cursor->GetPrev(); + CHECK_FATAL(defInsn, "invalid defInsn"); + CHECK_FATAL(defInsn->GetMachineOpcode() == MOP_xaddrri24 || + defInsn->GetMachineOpcode() == MOP_waddrri24, + "split with wrong add"); + auto &opnd = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + if (opnd.GetRegisterNumber() == baseOpnd.GetRegisterNumber()) { + splitAdd = cursor; + } + } + break; + } else { + splitAdd = cursor; + break; + } + } + return splitAdd; +} + +bool CombineContiLoadAndStorePattern::PlaceSplitAddInsn(const Insn &curInsn, Insn &combineInsn, + const MemOperand &memOperand, RegOperand &baseOpnd, + uint32 bitLen) const +{ + Insn *cursor = nullptr; + MemOperand *maxOfstMem = nullptr; + int64 maxOfstVal = 0; + MOperator mop = curInsn.GetMachineOpcode(); + OfstOperand *ofstOpnd = memOperand.GetOffsetImmediate(); + int64 ofstVal = ofstOpnd->GetOffsetValue(); + auto &aarFunc = static_cast(*cgFunc); + for (cursor = curInsn.GetNext(); cursor != nullptr; cursor = cursor->GetNext()) { + if (!cursor->IsMachineInstruction()) { + continue; + } + if (cursor->GetMachineOpcode() == mop && (cursor->IsLoad() || cursor->IsStore())) { + auto &curMemOpnd = static_cast(cursor->GetOperand(kInsnSecondOpnd)); + RegOperand *curBaseOpnd = curMemOpnd.GetBaseRegister(); + if (curMemOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && RegOperand::IsSameReg(baseOpnd, *curBaseOpnd)) { + OfstOperand *curOfstOpnd = curMemOpnd.GetOffsetImmediate(); + CHECK_FATAL(curOfstOpnd, "invalid OfstOperand"); + if (curOfstOpnd->GetOffsetValue() > ofstVal && + (curOfstOpnd->GetOffsetValue() - ofstVal) < MemOperand::GetMaxPairPIMM(bitLen) && + !aarFunc.IsOperandImmValid(combineInsn.GetMachineOpcode(), &curMemOpnd, kInsnThirdOpnd)) { + maxOfstMem = &curMemOpnd; + maxOfstVal = curOfstOpnd->GetOffsetValue(); + } + } + } + if (cursor->IsRegDefined(baseOpnd.GetRegisterNumber())) { + break; + } + if (cursor->IsRegDefined(R16)) { + break; + } + } + MemOperand *newMemOpnd = nullptr; + if (maxOfstMem == nullptr) { + newMemOpnd = &aarFunc.SplitOffsetWithAddInstruction(memOperand, bitLen, static_cast(R16), false, + &combineInsn, true); + } else { + RegOperand *addResOpnd = aarFunc.GetBaseRegForSplit(R16); + ImmOperand &immAddend = + aarFunc.SplitAndGetRemained(*maxOfstMem, bitLen, addResOpnd, maxOfstVal, false, &combineInsn, true); + newMemOpnd = &aarFunc.CreateReplacementMemOperand(bitLen, *addResOpnd, ofstVal - immAddend.GetValue()); + if (!(aarFunc.IsOperandImmValid(combineInsn.GetMachineOpcode(), newMemOpnd, kInsnThirdOpnd))) { + newMemOpnd = &aarFunc.SplitOffsetWithAddInstruction(memOperand, bitLen, static_cast(R16), false, + &combineInsn, true); + } else { + aarFunc.SelectAddAfterInsn(*addResOpnd, baseOpnd, immAddend, PTY_i64, false, combineInsn); + } + } + if (!(aarFunc.IsOperandImmValid(combineInsn.GetMachineOpcode(), newMemOpnd, kInsnThirdOpnd))) { + return false; + } + combineInsn.SetOperand(kInsnThirdOpnd, *newMemOpnd); + return true; +} + +bool CombineContiLoadAndStorePattern::SplitOfstWithAddToCombine(const Insn &curInsn, Insn &combineInsn, + const MemOperand &memOperand) const +{ + auto *baseRegOpnd = static_cast(memOperand.GetBaseRegister()); + auto *ofstOpnd = static_cast(memOperand.GetOffsetImmediate()); + DEBUG_ASSERT(baseRegOpnd && ofstOpnd, "get baseOpnd and ofstOpnd failed"); + CHECK_FATAL(combineInsn.GetOperand(kInsnFirstOpnd).GetSize() == combineInsn.GetOperand(kInsnSecondOpnd).GetSize(), + "the size must equal"); + if (baseRegOpnd->GetRegisterNumber() == R16) { + return false; + } + Insn *splitAdd = FindValidSplitAddInsn(combineInsn, *baseRegOpnd); + const InsnDesc *md = &AArch64CG::kMd[combineInsn.GetMachineOpcode()]; + auto *opndProp = md->opndMD[kInsnFirstOpnd]; + auto &aarFunc = static_cast(*cgFunc); + if (splitAdd == nullptr) { + if (combineInsn.IsLoadStorePair()) { + if (ofstOpnd->GetOffsetValue() < 0) { + return false; /* do not split */ + } + } + /* create and place addInsn */ + return PlaceSplitAddInsn(curInsn, combineInsn, memOperand, *baseRegOpnd, opndProp->GetSize()); + } else { + auto &newBaseReg = static_cast(splitAdd->GetOperand(kInsnFirstOpnd)); + auto &addImmOpnd = static_cast(splitAdd->GetOperand(kInsnThirdOpnd)); + int64 addVal = 0; + if (static_cast(splitAdd->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == R16) { + Insn *defInsn = splitAdd->GetPrev(); + CHECK_FATAL(defInsn->GetMachineOpcode() == MOP_xaddrri24 || defInsn->GetMachineOpcode() == MOP_waddrri24, + "split with wrong add"); + auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + auto &shiftOpnd = static_cast(defInsn->GetOperand(kInsnFourthOpnd)); + addVal = (immOpnd.GetValue() << shiftOpnd.GetShiftAmount()) + addImmOpnd.GetValue(); + } else { + addVal = addImmOpnd.GetValue(); + } + auto *newOfstOpnd = + &aarFunc.CreateOfstOpnd(static_cast(ofstOpnd->GetOffsetValue() - addVal), ofstOpnd->GetSize()); + auto *newMemOpnd = aarFunc.CreateMemOperand(MemOperand::kAddrModeBOi, opndProp->GetSize(), newBaseReg, nullptr, + newOfstOpnd, memOperand.GetSymbol()); + if (!(static_cast(*cgFunc).IsOperandImmValid(combineInsn.GetMachineOpcode(), newMemOpnd, + kInsnThirdOpnd))) { + return PlaceSplitAddInsn(curInsn, combineInsn, memOperand, *baseRegOpnd, opndProp->GetSize()); + } + combineInsn.SetOperand(kInsnThirdOpnd, *newMemOpnd); + return true; + } +} + +bool CombineContiLoadAndStorePattern::CheckCondition(Insn &insn) +{ + memOpnd = static_cast(insn.GetMemOpnd()); + DEBUG_ASSERT(memOpnd != nullptr, "get mem operand failed"); + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { + return false; + } + if (!doAggressiveCombine) { + return false; + } + return true; +} + +/* Combining 2 STRs into 1 stp or 2 LDRs into 1 ldp */ +void CombineContiLoadAndStorePattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + DEBUG_ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "unexpect operand"); + auto &destOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto *baseRegOpnd = static_cast(memOpnd->GetBaseRegister()); + OfstOperand *offsetOpnd = memOpnd->GetOffsetImmediate(); + CHECK_FATAL(offsetOpnd != nullptr, "offset opnd lost"); + DEBUG_ASSERT(baseRegOpnd == nullptr || !baseRegOpnd->IsVirtualRegister(), + "physical register has not been allocated?"); + std::vector prevContiInsnVec = FindPrevStrLdr( + insn, destOpnd.GetRegisterNumber(), baseRegOpnd->GetRegisterNumber(), offsetOpnd->GetOffsetValue()); + for (auto prevContiInsn : prevContiInsnVec) { + DEBUG_ASSERT(prevContiInsn != nullptr, "get previous consecutive instructions failed"); + auto *prevMemOpnd = static_cast(prevContiInsn->GetMemOpnd()); + if (memOpnd->GetIndexOpt() != prevMemOpnd->GetIndexOpt()) { + continue; + } + OfstOperand *prevOffsetOpnd = prevMemOpnd->GetOffsetImmediate(); + CHECK_FATAL(offsetOpnd != nullptr && prevOffsetOpnd != nullptr, "both conti str/ldr have no offset"); + auto &prevDestOpnd = static_cast(prevContiInsn->GetOperand(kInsnFirstOpnd)); + uint32 memSize = insn.GetMemoryByteSize(); + uint32 prevMemSize = prevContiInsn->GetMemoryByteSize(); + if (prevDestOpnd.GetRegisterType() != destOpnd.GetRegisterType()) { + continue; + } + int64 offsetVal = offsetOpnd->GetOffsetValue(); + int64 prevOffsetVal = prevOffsetOpnd->GetOffsetValue(); + auto diffVal = std::abs(offsetVal - prevOffsetVal); + regno_t destRegNO = destOpnd.GetRegisterNumber(); + regno_t prevDestRegNO = prevDestOpnd.GetRegisterNumber(); + if (insn.IsStore() && memOpnd->IsStackArgMem() && prevMemOpnd->IsStackArgMem() && + (memSize == k4ByteSize || memSize == k8ByteSize) && diffVal == k8BitSize && + (prevMemSize == k4ByteSize || prevMemSize == k8ByteSize) && + (destOpnd.GetValidBitsNum() == memSize * k8BitSize) && + (prevDestOpnd.GetValidBitsNum() == prevMemSize * k8BitSize)) { + RegOperand &newDest = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(destRegNO), k64BitSize, destOpnd.GetRegisterType()); + RegOperand &newPrevDest = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(prevDestRegNO), k64BitSize, prevDestOpnd.GetRegisterType()); + MemOperand *combineMemOpnd = (offsetVal < prevOffsetVal) ? memOpnd : prevMemOpnd; + MOperator mopPair = (destOpnd.GetRegisterType() == kRegTyInt) ? MOP_xstp : MOP_dstp; + if ((static_cast(*cgFunc).IsOperandImmValid(mopPair, combineMemOpnd, kInsnThirdOpnd))) { + Insn &combineInsn = + (offsetVal < prevOffsetVal) + ? cgFunc->GetInsnBuilder()->BuildInsn(mopPair, newDest, newPrevDest, *combineMemOpnd) + : cgFunc->GetInsnBuilder()->BuildInsn(mopPair, newPrevDest, newDest, *combineMemOpnd); + bb.InsertInsnAfter(*prevContiInsn, combineInsn); + RemoveInsnAndKeepComment(bb, insn, *prevContiInsn); + return; + } + } + if (memSize != prevMemSize || thisMop != prevContiInsn->GetMachineOpcode() || + prevDestOpnd.GetSize() != destOpnd.GetSize()) { + continue; + } + /* do combination str/ldr -> stp/ldp */ + if ((insn.IsStore() || destRegNO != prevDestRegNO) || (destRegNO == RZR && prevDestRegNO == RZR)) { + if ((memSize == k8ByteSize && diffVal == k8BitSize) || (memSize == k4ByteSize && diffVal == k4BitSize) || + (memSize == k16ByteSize && diffVal == k16BitSize)) { + MOperator mopPair = GetMopPair(thisMop); + MemOperand *combineMemOpnd = (offsetVal < prevOffsetVal) ? memOpnd : prevMemOpnd; + Insn &combineInsn = + (offsetVal < prevOffsetVal) + ? cgFunc->GetInsnBuilder()->BuildInsn(mopPair, destOpnd, prevDestOpnd, *combineMemOpnd) + : cgFunc->GetInsnBuilder()->BuildInsn(mopPair, prevDestOpnd, destOpnd, *combineMemOpnd); + bb.InsertInsnAfter(*prevContiInsn, combineInsn); + if (!(static_cast(*cgFunc).IsOperandImmValid(mopPair, combineMemOpnd, + kInsnThirdOpnd)) && + !SplitOfstWithAddToCombine(insn, combineInsn, *combineMemOpnd)) { + bb.RemoveInsn(combineInsn); + return; + } + RemoveInsnAndKeepComment(bb, insn, *prevContiInsn); + return; + } + } + /* do combination strb/ldrb -> strh/ldrh -> str/ldr */ + if (destRegNO == prevDestRegNO && destRegNO == RZR && prevDestRegNO == RZR) { + if ((memSize == k1ByteSize && diffVal == k1ByteSize) || (memSize == k2ByteSize && diffVal == k2ByteSize)) { + MOperator mopPair = GetMopHigherByte(thisMop); + if (offsetVal < prevOffsetVal) { + if (static_cast(*cgFunc).IsOperandImmValid(mopPair, memOpnd, kInsnSecondOpnd)) { + Insn &combineInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopPair, destOpnd, *memOpnd); + bb.InsertInsnAfter(*prevContiInsn, combineInsn); + RemoveInsnAndKeepComment(bb, insn, *prevContiInsn); + return; + } + } else { + if (static_cast(*cgFunc).IsOperandImmValid(mopPair, prevMemOpnd, + kInsnSecondOpnd)) { + Insn &combineInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopPair, prevDestOpnd, *prevMemOpnd); + bb.InsertInsnAfter(*prevContiInsn, combineInsn); + RemoveInsnAndKeepComment(bb, insn, *prevContiInsn); + return; + } + } + } + } + } +} + +MOperator CombineContiLoadAndStorePattern::GetMopHigherByte(MOperator mop) const +{ + switch (mop) { + case MOP_wldrb: + return MOP_wldrh; + case MOP_wstrb: + return MOP_wstrh; + case MOP_wldrh: + return MOP_wldr; + case MOP_wstrh: + return MOP_wstr; + default: + DEBUG_ASSERT(false, "should not run here"); + return MOP_undef; + } +} + +void CombineContiLoadAndStorePattern::RemoveInsnAndKeepComment(BB &bb, Insn &insn, Insn &prevInsn) const +{ + /* keep the comment */ + Insn *nn = prevInsn.GetNextMachineInsn(); + std::string newComment = ""; + MapleString comment = insn.GetComment(); + if (comment.c_str() != nullptr && strlen(comment.c_str()) > 0) { + newComment += comment.c_str(); + } + comment = prevInsn.GetComment(); + if (comment.c_str() != nullptr && strlen(comment.c_str()) > 0) { + newComment = newComment + " " + comment.c_str(); + } + if (newComment.c_str() != nullptr && strlen(newComment.c_str()) > 0) { + DEBUG_ASSERT(nn != nullptr, "nn should not be nullptr"); + nn->SetComment(newComment); + } + bb.RemoveInsn(insn); + bb.RemoveInsn(prevInsn); +} + +void EliminateSpecifcSXTAArch64::Run(BB &bb, Insn &insn) +{ + MOperator thisMop = insn.GetMachineOpcode(); + Insn *prevInsn = insn.GetPrev(); + while (prevInsn != nullptr && !prevInsn->GetMachineOpcode()) { + prevInsn = prevInsn->GetPrev(); + } + if (prevInsn == nullptr) { + return; + } + auto ®Opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®Opnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (&insn != bb.GetFirstInsn() && regOpnd0.GetRegisterNumber() == regOpnd1.GetRegisterNumber() && + prevInsn->IsMachineInstruction()) { + if (prevInsn->GetMachineOpcode() == MOP_wmovri32 || prevInsn->GetMachineOpcode() == MOP_xmovri64) { + auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (dstMovOpnd.GetRegisterNumber() != regOpnd1.GetRegisterNumber()) { + return; + } + Operand &opnd = prevInsn->GetOperand(kInsnSecondOpnd); + if (opnd.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd); + int64 value = immOpnd.GetValue(); + if (thisMop == MOP_xsxtb32) { + /* value should in range between -127 and 127 */ + if (value >= static_cast(0xFFFFFFFFFFFFFF80) && value <= 0x7F && + immOpnd.IsSingleInstructionMovable(regOpnd0.GetSize())) { + bb.RemoveInsn(insn); + } + } else if (thisMop == MOP_xsxth32) { + /* value should in range between -32678 and 32678 */ + if (value >= static_cast(0xFFFFFFFFFFFF8000) && value <= 0x7FFF && + immOpnd.IsSingleInstructionMovable(regOpnd0.GetSize())) { + bb.RemoveInsn(insn); + } + } else { + uint64 flag = 0xFFFFFFFFFFFFFF80; /* initialize the flag with fifty-nine 1s at top */ + if (thisMop == MOP_xsxth64) { + flag = 0xFFFFFFFFFFFF8000; /* specify the flag with forty-nine 1s at top in this case */ + } else if (thisMop == MOP_xsxtw64) { + flag = 0xFFFFFFFF80000000; /* specify the flag with thirty-three 1s at top in this case */ + } + if (!(static_cast(value) & flag) && + immOpnd.IsSingleInstructionMovable(regOpnd0.GetSize())) { + auto *aarch64CGFunc = static_cast(&cgFunc); + RegOperand &dstOpnd = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(dstMovOpnd.GetRegisterNumber()), k64BitSize, + dstMovOpnd.GetRegisterType()); + prevInsn->SetOperand(kInsnFirstOpnd, dstOpnd); + prevInsn->SetMOP(AArch64CG::kMd[MOP_xmovri64]); + bb.RemoveInsn(insn); + } + } + } + } else if (prevInsn->GetMachineOpcode() == MOP_wldrsb) { + auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (dstMovOpnd.GetRegisterNumber() != regOpnd1.GetRegisterNumber()) { + return; + } + if (thisMop == MOP_xsxtb32) { + bb.RemoveInsn(insn); + } + } else if (prevInsn->GetMachineOpcode() == MOP_wldrsh) { + auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (dstMovOpnd.GetRegisterNumber() != regOpnd1.GetRegisterNumber()) { + return; + } + if (thisMop == MOP_xsxth32) { + bb.RemoveInsn(insn); + } + } + } +} + +void EliminateSpecifcUXTAArch64::Run(BB &bb, Insn &insn) +{ + MOperator thisMop = insn.GetMachineOpcode(); + Insn *prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return; + } + auto ®Opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®Opnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (prevInsn->IsCall() && prevInsn->GetIsCallReturnUnsigned() && + regOpnd0.GetRegisterNumber() == regOpnd1.GetRegisterNumber() && + (regOpnd1.GetRegisterNumber() == R0 || regOpnd1.GetRegisterNumber() == V0)) { + uint32 retSize = prevInsn->GetRetSize(); + if (retSize > 0 && + ((thisMop == MOP_xuxtb32 && retSize <= k1ByteSize) || (thisMop == MOP_xuxth32 && retSize <= k2ByteSize) || + (thisMop == MOP_xuxtw64 && retSize <= k4ByteSize))) { + bb.RemoveInsn(insn); + } + return; + } + if (&insn == bb.GetFirstInsn() || regOpnd0.GetRegisterNumber() != regOpnd1.GetRegisterNumber() || + !prevInsn->IsMachineInstruction()) { + return; + } + if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC && prevInsn->IsCall() && prevInsn->GetIsCallReturnSigned()) { + return; + } + if (thisMop == MOP_xuxtb32) { + if (prevInsn->GetMachineOpcode() == MOP_wmovri32 || prevInsn->GetMachineOpcode() == MOP_xmovri64) { + auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (!IsSameRegisterOperation(dstMovOpnd, regOpnd1, regOpnd0)) { + return; + } + Operand &opnd = prevInsn->GetOperand(kInsnSecondOpnd); + if (opnd.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd); + int64 value = immOpnd.GetValue(); + /* check the top 56 bits of value */ + if (!(static_cast(value) & 0xFFFFFFFFFFFFFF00)) { + bb.RemoveInsn(insn); + } + } + } else if (prevInsn->GetMachineOpcode() == MOP_wldrb) { + auto &dstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (dstOpnd.GetRegisterNumber() != regOpnd1.GetRegisterNumber()) { + return; + } + bb.RemoveInsn(insn); + } + } else if (thisMop == MOP_xuxth32) { + if (prevInsn->GetMachineOpcode() == MOP_wmovri32 || prevInsn->GetMachineOpcode() == MOP_xmovri64) { + auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (!IsSameRegisterOperation(dstMovOpnd, regOpnd1, regOpnd0)) { + return; + } + Operand &opnd = prevInsn->GetOperand(kInsnSecondOpnd); + if (opnd.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd); + int64 value = immOpnd.GetValue(); + if (!(static_cast(value) & 0xFFFFFFFFFFFF0000)) { + bb.RemoveInsn(insn); + } + } + } else if (prevInsn->GetMachineOpcode() == MOP_wldrh) { + auto &dstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (dstOpnd.GetRegisterNumber() != regOpnd1.GetRegisterNumber()) { + return; + } + bb.RemoveInsn(insn); + } + } else { + /* this_mop == MOP_xuxtw64 */ + if (prevInsn->GetMachineOpcode() == MOP_wmovri32 || prevInsn->GetMachineOpcode() == MOP_wldrsb || + prevInsn->GetMachineOpcode() == MOP_wldrb || prevInsn->GetMachineOpcode() == MOP_wldrsh || + prevInsn->GetMachineOpcode() == MOP_wldrh || prevInsn->GetMachineOpcode() == MOP_wldr) { + auto &dstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (!IsSameRegisterOperation(dstOpnd, regOpnd1, regOpnd0)) { + return; + } + /* 32-bit ldr does zero-extension by default, so this conversion can be skipped */ + bb.RemoveInsn(insn); + } + } +} + +bool FmovRegPattern::CheckCondition(Insn &insn) +{ + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return false; + } + if (&insn == insn.GetBB()->GetFirstInsn()) { + return false; + } + prevInsn = insn.GetPrev(); + auto &curSrcRegOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &prevSrcRegOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + /* same src freg */ + if (curSrcRegOpnd.GetRegisterNumber() != prevSrcRegOpnd.GetRegisterNumber()) { + return false; + } + return true; +} + +void FmovRegPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + MOperator prevMop = prevInsn->GetMachineOpcode(); + MOperator newMop; + uint32 doOpt = 0; + if (prevMop == MOP_xvmovrv && thisMop == MOP_xvmovrv) { + doOpt = k32BitSize; + newMop = MOP_wmovrr; + } else if (prevMop == MOP_xvmovrd && thisMop == MOP_xvmovrd) { + doOpt = k64BitSize; + newMop = MOP_xmovrr; + } + if (doOpt == 0) { + return; + } + auto &curDstRegOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + regno_t curDstReg = curDstRegOpnd.GetRegisterNumber(); + /* optimize case 1 */ + auto &prevDstRegOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + regno_t prevDstReg = prevDstRegOpnd.GetRegisterNumber(); + auto *aarch64CGFunc = static_cast(cgFunc); + RegOperand &dst = + aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(curDstReg), doOpt, kRegTyInt); + RegOperand &src = + aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(prevDstReg), doOpt, kRegTyInt); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, dst, src); + bb.InsertInsnBefore(insn, newInsn); + bb.RemoveInsn(insn); + RegOperand &newOpnd = + aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(prevDstReg), doOpt, kRegTyInt); + uint32 opndNum = nextInsn->GetOperandSize(); + for (uint32 opndIdx = 0; opndIdx < opndNum; ++opndIdx) { + Operand &opnd = nextInsn->GetOperand(opndIdx); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + if (base != nullptr) { + if (base->IsRegister()) { + auto *reg = static_cast(base); + if (reg->GetRegisterNumber() == curDstReg) { + memOpnd.SetBaseRegister(newOpnd); + } + } + } + Operand *offset = memOpnd.GetIndexRegister(); + if (offset != nullptr) { + if (offset->IsRegister()) { + auto *reg = static_cast(offset); + if (reg->GetRegisterNumber() == curDstReg) { + memOpnd.SetIndexRegister(newOpnd); + } + } + } + } else if (opnd.IsRegister()) { + /* Check if it is a source operand. */ + auto *regProp = nextInsn->GetDesc()->opndMD[opndIdx]; + if (regProp->IsUse()) { + auto ® = static_cast(opnd); + if (reg.GetRegisterNumber() == curDstReg) { + nextInsn->SetOperand(opndIdx, newOpnd); + } + } + } + } +} + +bool SbfxOptPattern::CheckCondition(Insn &insn) +{ + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return false; + } + auto &curDstRegOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 opndNum = nextInsn->GetOperandSize(); + const InsnDesc *md = insn.GetDesc(); + for (uint32 opndIdx = 0; opndIdx < opndNum; ++opndIdx) { + Operand &opnd = nextInsn->GetOperand(opndIdx); + /* Check if it is a source operand. */ + if (opnd.IsMemoryAccessOperand() || opnd.IsList()) { + return false; + } else if (opnd.IsRegister()) { + auto ® = static_cast(opnd); + auto *regProp = md->opndMD[opndIdx]; + if (reg.GetRegisterNumber() == curDstRegOpnd.GetRegisterNumber()) { + if (reg.GetSize() != k32BitSize) { + return false; + } + if (regProp->IsDef()) { + toRemove = true; + } else { + (void)cands.emplace_back(opndIdx); + } + } + } + } + return cands.size() != 0; +} + +void SbfxOptPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + auto &srcRegOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + RegOperand &newReg = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(srcRegOpnd.GetRegisterNumber()), k32BitSize, srcRegOpnd.GetRegisterType()); + // replace use point of opnd in nextInsn + for (auto i : cands) { + nextInsn->SetOperand(i, newReg); + } + if (toRemove) { + bb.RemoveInsn(insn); + } +} + +bool CbnzToCbzPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcbnz && curMop != MOP_xcbnz) { + return false; + } + /* reg has to be R0, since return value is in R0 */ + auto ®Opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (regOpnd0.GetRegisterNumber() != R0) { + return false; + } + nextBB = insn.GetBB()->GetNext(); + /* Make sure nextBB can only be reached by bb */ + if (nextBB->GetPreds().size() > 1 || nextBB->GetEhPreds().empty()) { + return false; + } + /* Next insn should be a mov R0 = 0 */ + movInsn = nextBB->GetFirstMachineInsn(); + if (movInsn == nullptr) { + return false; + } + MOperator movInsnMop = movInsn->GetMachineOpcode(); + if (movInsnMop != MOP_wmovri32 && movInsnMop != MOP_xmovri64) { + return false; + } + auto &movDest = static_cast(movInsn->GetOperand(kInsnFirstOpnd)); + if (movDest.GetRegisterNumber() != R0) { + return false; + } + auto &movImm = static_cast(movInsn->GetOperand(kInsnSecondOpnd)); + if (movImm.GetValue() != 0) { + return false; + } + Insn *nextBrInsn = movInsn->GetNextMachineInsn(); + if (nextBrInsn == nullptr) { + return false; + } + if (nextBrInsn->GetMachineOpcode() != MOP_xuncond) { + return false; + } + /* Is nextBB branch to the return-bb? */ + if (nextBB->GetSuccs().size() != 1) { + return false; + } + return true; +} + +void CbnzToCbzPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + BB *targetBB = nullptr; + auto it = bb.GetSuccsBegin(); + if (*it == nextBB) { + ++it; + } + targetBB = *it; + /* Make sure when nextBB is empty, targetBB is fallthru of bb. */ + if (targetBB != nextBB->GetNext()) { + return; + } + BB *nextBBTarget = *(nextBB->GetSuccsBegin()); + if (nextBBTarget->GetKind() != BB::kBBReturn) { + return; + } + /* Control flow looks nice, instruction looks nice */ + Operand &brTarget = brInsn->GetOperand(kInsnFirstOpnd); + insn.SetOperand(kInsnSecondOpnd, brTarget); + if (thisMop == MOP_wcbnz) { + insn.SetMOP(AArch64CG::kMd[MOP_wcbz]); + } else { + insn.SetMOP(AArch64CG::kMd[MOP_xcbz]); + } + nextBB->RemoveInsn(*movInsn); + nextBB->RemoveInsn(*brInsn); + /* nextBB is now a fallthru bb, not a goto bb */ + nextBB->SetKind(BB::kBBFallthru); + /* + * fix control flow, we have bb, nextBB, targetBB, nextBB_target + * connect bb -> nextBB_target erase targetBB + */ + it = bb.GetSuccsBegin(); + CHECK_FATAL(it != bb.GetSuccsEnd(), "succs is empty."); + if (*it == targetBB) { + bb.EraseSuccs(it); + bb.PushFrontSuccs(*nextBBTarget); + } else { + ++it; + bb.EraseSuccs(it); + bb.PushBackSuccs(*nextBBTarget); + } + for (auto targetBBIt = targetBB->GetPredsBegin(); targetBBIt != targetBB->GetPredsEnd(); ++targetBBIt) { + if (*targetBBIt == &bb) { + targetBB->ErasePreds(targetBBIt); + break; + } + } + for (auto nextIt = nextBBTarget->GetPredsBegin(); nextIt != nextBBTarget->GetPredsEnd(); ++nextIt) { + if (*nextIt == nextBB) { + nextBBTarget->ErasePreds(nextIt); + break; + } + } + nextBBTarget->PushBackPreds(bb); + + /* nextBB has no target, originally just branch target */ + nextBB->EraseSuccs(nextBB->GetSuccsBegin()); + DEBUG_ASSERT(nextBB->GetSuccs().empty(), "peep: branch target incorrect"); + /* Now make nextBB fallthru to targetBB */ + nextBB->PushFrontSuccs(*targetBB); + targetBB->PushBackPreds(*nextBB); +} + +void CsetCbzToBeqOptAArch64::Run(BB &bb, Insn &insn) +{ + Insn *insn1 = insn.GetPreviousMachineInsn(); + if (insn1 == nullptr) { + return; + } + /* prevInsn must be "cset" insn */ + MOperator opCode1 = insn1->GetMachineOpcode(); + if (opCode1 != MOP_xcsetrc && opCode1 != MOP_wcsetrc) { + return; + } + + auto &tmpRegOp1 = static_cast(insn1->GetOperand(kInsnFirstOpnd)); + regno_t baseRegNO1 = tmpRegOp1.GetRegisterNumber(); + auto &tmpRegOp2 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + regno_t baseRegNO2 = tmpRegOp2.GetRegisterNumber(); + if (baseRegNO1 != baseRegNO2) { + return; + } + /* If the reg will be used later, we shouldn't optimize the cset insn here */ + if (IfOperandIsLiveAfterInsn(tmpRegOp2, insn)) { + return; + } + MOperator opCode = insn.GetMachineOpcode(); + bool reverse = (opCode == MOP_xcbz || opCode == MOP_wcbz); + Operand &rflag = static_cast(&cgFunc)->GetOrCreateRflag(); + auto &label = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &cond = static_cast(insn1->GetOperand(kInsnSecondOpnd)); + MOperator jmpOperator = SelectMOperator(cond.GetCode(), reverse); + CHECK_FATAL((jmpOperator != MOP_undef), "unknown condition code"); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(jmpOperator, rflag, label); + bb.RemoveInsn(*insn1); + bb.ReplaceInsn(insn, newInsn); +} + +MOperator CsetCbzToBeqOptAArch64::SelectMOperator(ConditionCode condCode, bool inverse) const +{ + switch (condCode) { + case CC_NE: + return inverse ? MOP_beq : MOP_bne; + case CC_EQ: + return inverse ? MOP_bne : MOP_beq; + case CC_MI: + return inverse ? MOP_bpl : MOP_bmi; + case CC_PL: + return inverse ? MOP_bmi : MOP_bpl; + case CC_VS: + return inverse ? MOP_bvc : MOP_bvs; + case CC_VC: + return inverse ? MOP_bvs : MOP_bvc; + case CC_HI: + return inverse ? MOP_bls : MOP_bhi; + case CC_LS: + return inverse ? MOP_bhi : MOP_bls; + case CC_GE: + return inverse ? MOP_blt : MOP_bge; + case CC_LT: + return inverse ? MOP_bge : MOP_blt; + case CC_HS: + return inverse ? MOP_blo : MOP_bhs; + case CC_LO: + return inverse ? MOP_bhs : MOP_blo; + case CC_LE: + return inverse ? MOP_bgt : MOP_ble; + case CC_GT: + return inverse ? MOP_ble : MOP_bgt; + case CC_CS: + return inverse ? MOP_bcc : MOP_bcs; + default: + return MOP_undef; + } +} + +bool ContiLDRorSTRToSameMEMPattern::CheckCondition(Insn &insn) +{ + prevInsn = insn.GetPrev(); + while (prevInsn != nullptr && !prevInsn->GetMachineOpcode() && prevInsn != insn.GetBB()->GetFirstInsn()) { + prevInsn = prevInsn->GetPrev(); + } + if (!insn.IsMachineInstruction() || prevInsn == nullptr) { + return false; + } + MOperator thisMop = insn.GetMachineOpcode(); + MOperator prevMop = prevInsn->GetMachineOpcode(); + /* + * store regB, RegC, offset + * load regA, RegC, offset + */ + if ((thisMop == MOP_xldr && prevMop == MOP_xstr) || (thisMop == MOP_wldr && prevMop == MOP_wstr) || + (thisMop == MOP_dldr && prevMop == MOP_dstr) || (thisMop == MOP_sldr && prevMop == MOP_sstr)) { + loadAfterStore = true; + } + /* + * load regA, RegC, offset + * load regB, RegC, offset + */ + if ((thisMop == MOP_xldr || thisMop == MOP_wldr || thisMop == MOP_dldr || thisMop == MOP_sldr) && + prevMop == thisMop) { + loadAfterLoad = true; + } + if (!loadAfterStore && !loadAfterLoad) { + return false; + } + DEBUG_ASSERT(insn.GetOperand(kInsnSecondOpnd).IsMemoryAccessOperand(), "expects mem operands"); + DEBUG_ASSERT(prevInsn->GetOperand(kInsnSecondOpnd).IsMemoryAccessOperand(), "expects mem operands"); + return true; +} + +void ContiLDRorSTRToSameMEMPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + auto &memOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + MemOperand::AArch64AddressingMode addrMode1 = memOpnd1.GetAddrMode(); + if (addrMode1 != MemOperand::kAddrModeBOi || (!memOpnd1.IsIntactIndexed())) { + return; + } + + auto *base1 = static_cast(memOpnd1.GetBaseRegister()); + DEBUG_ASSERT(base1 == nullptr || !base1->IsVirtualRegister(), "physical register has not been allocated?"); + OfstOperand *offset1 = memOpnd1.GetOffsetImmediate(); + + auto &memOpnd2 = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + MemOperand::AArch64AddressingMode addrMode2 = memOpnd2.GetAddrMode(); + if (addrMode2 != MemOperand::kAddrModeBOi || (!memOpnd2.IsIntactIndexed())) { + return; + } + + auto *base2 = static_cast(memOpnd2.GetBaseRegister()); + DEBUG_ASSERT(base2 == nullptr || !base2->IsVirtualRegister(), "physical register has not been allocated?"); + OfstOperand *offset2 = memOpnd2.GetOffsetImmediate(); + + if (base1 == nullptr || base2 == nullptr || offset1 == nullptr || offset2 == nullptr) { + return; + } + + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + int64 offsetVal1 = offset1->GetOffsetValue(); + int64 offsetVal2 = offset2->GetOffsetValue(); + if (base1->GetRegisterNumber() != base2->GetRegisterNumber() || reg1.GetRegisterType() != reg2.GetRegisterType() || + reg1.GetSize() != reg2.GetSize() || offsetVal1 != offsetVal2) { + return; + } + if (loadAfterStore && reg1.GetRegisterNumber() != reg2.GetRegisterNumber()) { + /* replace it with mov */ + MOperator newOp = MOP_wmovrr; + if (reg1.GetRegisterType() == kRegTyInt) { + newOp = (reg1.GetSize() <= k32BitSize) ? MOP_wmovrr : MOP_xmovrr; + } else if (reg1.GetRegisterType() == kRegTyFloat) { + newOp = (reg1.GetSize() <= k32BitSize) ? MOP_xvmovs : MOP_xvmovd; + } + Insn *nextInsn = insn.GetNext(); + while (nextInsn != nullptr && !nextInsn->GetMachineOpcode() && nextInsn != bb.GetLastInsn()) { + nextInsn = nextInsn->GetNext(); + } + bool moveSameReg = false; + if (nextInsn && nextInsn->GetIsSpill() && !IfOperandIsLiveAfterInsn(reg1, *nextInsn)) { + MOperator nextMop = nextInsn->GetMachineOpcode(); + if ((thisMop == MOP_xldr && nextMop == MOP_xstr) || (thisMop == MOP_wldr && nextMop == MOP_wstr) || + (thisMop == MOP_dldr && nextMop == MOP_dstr) || (thisMop == MOP_sldr && nextMop == MOP_sstr)) { + nextInsn->Insn::SetOperand(kInsnFirstOpnd, reg2); + moveSameReg = true; + } + } + if (!moveSameReg) { + bb.InsertInsnAfter(*prevInsn, cgFunc->GetInsnBuilder()->BuildInsn(newOp, reg1, reg2)); + } + bb.RemoveInsn(insn); + } else if (reg1.GetRegisterNumber() == reg2.GetRegisterNumber() && + base1->GetRegisterNumber() != reg2.GetRegisterNumber()) { + bb.RemoveInsn(insn); + } +} + +bool RemoveIncDecRefPattern::CheckCondition(Insn &insn) +{ + if (insn.GetMachineOpcode() != MOP_xbl) { + return false; + } + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_xmovrr) { + return false; + } + auto &target = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (target.GetName() != "MCC_IncDecRef_NaiveRCFast") { + return false; + } + if (static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != R1 || + static_cast(prevInsn->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() != R0) { + return false; + } + return true; +} + +void RemoveIncDecRefPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(insn); +} + +#ifdef USE_32BIT_REF +constexpr uint32 kRefSize = 32; +#else +constexpr uint32 kRefSize = 64; +#endif + +void CselZeroOneToCsetOpt::Run(BB &bb, Insn &insn) +{ + Operand &trueValueOp = insn.GetOperand(kInsnSecondOpnd); + Operand &falseValueOp = insn.GetOperand(kInsnThirdOpnd); + Operand *trueTempOp = nullptr; + Operand *falseTempOp = nullptr; + + /* find fixed value in BB */ + if (!trueValueOp.IsIntImmediate()) { + trueMovInsn = FindFixedValue(trueValueOp, bb, trueTempOp, insn); + } + if (!falseValueOp.IsIntImmediate()) { + falseMovInsn = FindFixedValue(falseValueOp, bb, falseTempOp, insn); + } + + DEBUG_ASSERT(trueTempOp != nullptr, "trueTempOp should not be nullptr"); + DEBUG_ASSERT(falseTempOp != nullptr, "falseTempOp should not be nullptr"); + /* csel to cset */ + if ((trueTempOp->IsIntImmediate() || IsZeroRegister(*trueTempOp)) && + (falseTempOp->IsIntImmediate() || IsZeroRegister(*falseTempOp))) { + ImmOperand *imm1 = static_cast(trueTempOp); + ImmOperand *imm2 = static_cast(falseTempOp); + bool inverse = imm1->IsOne() && (imm2->IsZero() || IsZeroRegister(*imm2)); + if (inverse || ((imm1->IsZero() || IsZeroRegister(*imm1)) && imm2->IsOne())) { + Operand ® = insn.GetOperand(kInsnFirstOpnd); + CondOperand &condOperand = static_cast(insn.GetOperand(kInsnFourthOpnd)); + MOperator mopCode = (reg.GetSize() == k64BitSize) ? MOP_xcsetrc : MOP_wcsetrc; + /* get new cond ccCode */ + ConditionCode ccCode = inverse ? condOperand.GetCode() : GetReverseCC(condOperand.GetCode()); + if (ccCode == kCcLast) { + return; + } + AArch64CGFunc *func = static_cast(cgFunc); + CondOperand &cond = func->GetCondOperand(ccCode); + Operand &rflag = func->GetOrCreateRflag(); + Insn &csetInsn = func->GetInsnBuilder()->BuildInsn(mopCode, reg, cond, rflag); + if (CGOptions::DoCGSSA() && CGOptions::GetInstance().GetOptimizeLevel() < CGOptions::kLevel0) { + CHECK_FATAL(false, "check this case in ssa opt"); + } + insn.GetBB()->ReplaceInsn(insn, csetInsn); + } + } +} + +Insn *CselZeroOneToCsetOpt::FindFixedValue(Operand &opnd, BB &bb, Operand *&tempOp, const Insn &insn) const +{ + tempOp = &opnd; + bool alreadyFindCsel = false; + bool isRegDefined = false; + regno_t regno = static_cast(opnd).GetRegisterNumber(); + FOR_BB_INSNS_REV(defInsn, &bb) { + if (!defInsn->IsMachineInstruction() || defInsn->IsBranch()) { + continue; + } + /* find csel */ + if (defInsn->GetId() == insn.GetId()) { + alreadyFindCsel = true; + } + /* find def defined */ + if (alreadyFindCsel) { + isRegDefined = defInsn->IsRegDefined(regno); + } + /* if def defined is movi do this opt */ + if (isRegDefined) { + MOperator thisMop = defInsn->GetMachineOpcode(); + if (thisMop == MOP_wmovri32 || thisMop == MOP_xmovri64) { + if (&defInsn->GetOperand(kInsnFirstOpnd) == &opnd) { + tempOp = &(defInsn->GetOperand(kInsnSecondOpnd)); + return defInsn; + } + } else { + return nullptr; + } + } + } + return nullptr; +} + +void AndCmpCsetEorCbzOpt::Run(BB &bb, Insn &insn) +{ + if (insn.GetMachineOpcode() != MOP_wandrri12) { + return; + } + RegOperand &andInsnFirstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + RegOperand &andInsnSecondOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &andInsnThirdOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + if (andInsnFirstOpnd.GetRegisterNumber() != andInsnSecondOpnd.GetRegisterNumber() || + andInsnThirdOpnd.GetValue() != 1) { + return; + } + Insn *cmpInsn = insn.GetNextMachineInsn(); + if (cmpInsn == nullptr || cmpInsn->GetMachineOpcode() != MOP_wcmpri) { + return; + } + RegOperand &cmpInsnSecondOpnd = static_cast(cmpInsn->GetOperand(kInsnSecondOpnd)); + ImmOperand &cmpInsnThirdOpnd = static_cast(cmpInsn->GetOperand(kInsnThirdOpnd)); + if (cmpInsnSecondOpnd.GetRegisterNumber() != andInsnFirstOpnd.GetRegisterNumber() || + cmpInsnThirdOpnd.GetValue() != 0) { + return; + } + Insn *csetInsn = cmpInsn->GetNextMachineInsn(); + if (csetInsn == nullptr || csetInsn->GetMachineOpcode() != MOP_wcsetrc) { + return; + } + RegOperand &csetInsnFirstOpnd = static_cast(csetInsn->GetOperand(kInsnFirstOpnd)); + CondOperand &csetSecondOpnd = static_cast(csetInsn->GetOperand(kInsnSecondOpnd)); + if (csetInsnFirstOpnd.GetRegisterNumber() != andInsnFirstOpnd.GetRegisterNumber() || + csetSecondOpnd.GetCode() != CC_EQ) { + return; + } + Insn *eorInsn = csetInsn->GetNextMachineInsn(); + if (eorInsn == nullptr || eorInsn->GetMachineOpcode() != MOP_weorrri12) { + return; + } + RegOperand &eorInsnFirstOpnd = static_cast(eorInsn->GetOperand(kInsnFirstOpnd)); + RegOperand &eorInsnSecondOpnd = static_cast(eorInsn->GetOperand(kInsnSecondOpnd)); + ImmOperand &eorInsnThirdOpnd = static_cast(eorInsn->GetOperand(kInsnThirdOpnd)); + if (eorInsnFirstOpnd.GetRegisterNumber() != andInsnFirstOpnd.GetRegisterNumber() || + eorInsnFirstOpnd.GetRegisterNumber() != eorInsnSecondOpnd.GetRegisterNumber() || + eorInsnThirdOpnd.GetValue() != 1) { + return; + } + Insn *cbzInsn = eorInsn->GetNextMachineInsn(); + if (cbzInsn == nullptr || cbzInsn->GetMachineOpcode() != MOP_wcbz) { + return; + } + RegOperand &cbzInsnFirstOpnd = static_cast(cbzInsn->GetOperand(kInsnFirstOpnd)); + if (cbzInsnFirstOpnd.GetRegisterNumber() != andInsnFirstOpnd.GetRegisterNumber()) { + return; + } + bb.RemoveInsn(*cmpInsn); + bb.RemoveInsn(*csetInsn); + bb.RemoveInsn(*eorInsn); + bb.RemoveInsn(*cbzInsn); + /* replace insn */ + auto &label = static_cast(cbzInsn->GetOperand(kInsnSecondOpnd)); + ImmOperand &oneHoleOpnd = static_cast(cgFunc)->CreateImmOperand(0, k8BitSize, false); + bb.ReplaceInsn(insn, cgFunc->GetInsnBuilder()->BuildInsn(MOP_wtbz, cbzInsnFirstOpnd, oneHoleOpnd, label)); +} + +void AddLdrOpt::Run(BB &bb, Insn &insn) +{ + if (insn.GetMachineOpcode() != MOP_xaddrrr) { + return; + } + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + auto nextMop = nextInsn->GetMachineOpcode(); + if (nextMop != MOP_xldr && nextMop != MOP_wldr) { + return; + } + RegOperand &insnFirstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + RegOperand &insnSecondOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (insnFirstOpnd.GetRegisterNumber() != insnSecondOpnd.GetRegisterNumber()) { + return; + } + RegOperand &ldrInsnFirstOpnd = static_cast(nextInsn->GetOperand(kInsnFirstOpnd)); + MemOperand &memOpnd = static_cast(nextInsn->GetOperand(kInsnSecondOpnd)); + if (memOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || + memOpnd.GetBaseRegister()->GetRegisterNumber() != insnFirstOpnd.GetRegisterNumber() || + ldrInsnFirstOpnd.GetRegisterNumber() != insnFirstOpnd.GetRegisterNumber() || + memOpnd.GetOffsetImmediate()->GetOffsetValue() != 0) { + return; + } + MemOperand &newMemOpnd = static_cast(cgFunc)->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, + memOpnd.GetSize(), &insnFirstOpnd, &static_cast(insn.GetOperand(kInsnThirdOpnd)), 0, false); + nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); + bb.RemoveInsn(insn); +} + +void CsetEorOpt::Run(BB &bb, Insn &insn) +{ + if (insn.GetMachineOpcode() != MOP_xcsetrc && insn.GetMachineOpcode() != MOP_wcsetrc) { + return; + } + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr || + (nextInsn->GetMachineOpcode() != MOP_weorrri12 && nextInsn->GetMachineOpcode() != MOP_xeorrri13)) { + return; + } + RegOperand &csetFirstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + RegOperand &eorFirstOpnd = static_cast(nextInsn->GetOperand(kInsnFirstOpnd)); + RegOperand &eorSecondOpnd = static_cast(nextInsn->GetOperand(kInsnSecondOpnd)); + ImmOperand &eorThirdOpnd = static_cast(nextInsn->GetOperand(kInsnThirdOpnd)); + if (eorThirdOpnd.GetValue() != 1 || + eorFirstOpnd.GetRegisterNumber() != eorSecondOpnd.GetRegisterNumber() || + csetFirstOpnd.GetRegisterNumber() != eorFirstOpnd.GetRegisterNumber()) { + return; + } + CondOperand &csetSecondOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ConditionCode inverseCondCode = GetReverseCC(csetSecondOpnd.GetCode()); + if (inverseCondCode == kCcLast) { + return; + } + auto *aarFunc = static_cast(cgFunc); + CondOperand &inverseCondOpnd = aarFunc->GetCondOperand(inverseCondCode); + insn.SetOperand(kInsnSecondOpnd, inverseCondOpnd); + bb.RemoveInsn(*nextInsn); +} + +void MoveCmpOpt::Run(BB &bb, Insn &insn) +{ + if (insn.GetMachineOpcode() != MOP_xmovri64 && insn.GetMachineOpcode() != MOP_wmovri32) { + return; + } + ImmOperand &immOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (!immOpnd.IsInBitSize(kMaxImmVal12Bits, 0) && !immOpnd.IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) { + return; + } + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr || + (nextInsn->GetMachineOpcode() != MOP_wcmprr && nextInsn->GetMachineOpcode() != MOP_xcmprr)) { + return; + } + RegOperand &cmpThirdOpnd = static_cast(nextInsn->GetOperand(kInsnThirdOpnd)); + RegOperand &movFirstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (cmpThirdOpnd.GetRegisterNumber() != movFirstOpnd.GetRegisterNumber()) { + return; + } + MOperator cmpOpCode = (cmpThirdOpnd.GetSize() == k64BitSize) ? MOP_xcmpri : MOP_wcmpri; + Insn &newCmpInsn = cgFunc->GetInsnBuilder()->BuildInsn( + cmpOpCode, nextInsn->GetOperand(kInsnFirstOpnd), nextInsn->GetOperand(kInsnSecondOpnd), immOpnd); + bb.ReplaceInsn(*nextInsn, newCmpInsn); + if (!IfOperandIsLiveAfterInsn(movFirstOpnd, newCmpInsn)) { + bb.RemoveInsn(insn); + } +} + +bool InlineReadBarriersPattern::CheckCondition(Insn &insn) +{ + /* Inline read barriers only enabled for GCONLY. */ + if (!CGOptions::IsGCOnly()) { + return false; + } + return true; +} + +void InlineReadBarriersPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + const std::string &barrierName = GetReadBarrierName(insn); + if (barrierName == kMccDummy) { + /* remove dummy call. */ + bb.RemoveInsn(insn); + } else { + /* replace barrier function call with load instruction. */ + bool isVolatile = (barrierName == kMccLoadRefV || barrierName == kMccLoadRefVS); + bool isStatic = (barrierName == kMccLoadRefS || barrierName == kMccLoadRefVS); + /* refSize is 32 if USE_32BIT_REF defined, otherwise 64. */ + const uint32 refSize = kRefSize; + auto *aarch64CGFunc = static_cast(cgFunc); + MOperator loadOp = GetLoadOperator(refSize, isVolatile); + RegOperand ®Op = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(R0, refSize, kRegTyInt); + AArch64reg addrReg = isStatic ? R0 : R1; + MemOperand &addr = aarch64CGFunc->CreateMemOpnd(addrReg, 0, refSize); + Insn &loadInsn = cgFunc->GetInsnBuilder()->BuildInsn(loadOp, regOp, addr); + bb.ReplaceInsn(insn, loadInsn); + } + bool isTailCall = (insn.GetMachineOpcode() == MOP_tail_call_opt_xbl); + if (isTailCall) { + /* add 'ret' instruction for tail call optimized load barrier. */ + Insn &retInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xret); + bb.AppendInsn(retInsn); + bb.SetKind(BB::kBBReturn); + } +} + +bool ReplaceDivToMultiPattern::CheckCondition(Insn &insn) +{ + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + prePrevInsn = prevInsn->GetPreviousMachineInsn(); + auto &sdivOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &sdivOpnd2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + if (sdivOpnd1.GetRegisterNumber() == sdivOpnd2.GetRegisterNumber() || sdivOpnd1.GetRegisterNumber() == R16 || + sdivOpnd2.GetRegisterNumber() == R16 || prePrevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + MOperator prePrevMop = prePrevInsn->GetMachineOpcode(); + if (prevMop && (prevMop == MOP_wmovkri16) && prePrevMop && (prePrevMop == MOP_wmovri32)) { + return true; + } + return false; +} + +void ReplaceDivToMultiPattern::Run(BB &bb, Insn &insn) +{ + if (CheckCondition(insn)) { + auto &sdivOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &sdivOpnd2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + /* Check if dest operand of insn is idential with register of prevInsn and prePrevInsn. */ + if ((&(prevInsn->GetOperand(kInsnFirstOpnd)) != &sdivOpnd2) || + (&(prePrevInsn->GetOperand(kInsnFirstOpnd)) != &sdivOpnd2)) { + return; + } + auto &prevLsl = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (prevLsl.GetShiftAmount() != k16BitSize) { + return; + } + auto &prevImmOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto &prePrevImmOpnd = static_cast(prePrevInsn->GetOperand(kInsnSecondOpnd)); + /* + * expect the immediate value of first mov is 0x086A0 which matches 0x186A0 + * because 0x10000 is ignored in 32 bits register + */ + if ((prevImmOpnd.GetValue() != 1) || (prePrevImmOpnd.GetValue() != 34464)) { + return; + } + auto *aarch64CGFunc = static_cast(cgFunc); + /* mov w16, #0x588f */ + RegOperand &tempOpnd = + aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(R16), k64BitSize, kRegTyInt); + /* create a immedate operand with this specific value */ + ImmOperand &multiplierLow = aarch64CGFunc->CreateImmOperand(0x588f, k32BitSize, false); + Insn &multiplierLowInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_wmovri32, tempOpnd, multiplierLow); + bb.InsertInsnBefore(*prePrevInsn, multiplierLowInsn); + + /* + * movk w16, #0x4f8b, LSL #16 + * create a immedate operand with this specific value + */ + ImmOperand &multiplierHigh = aarch64CGFunc->CreateImmOperand(0x4f8b, k32BitSize, false); + BitShiftOperand *multiplierHighLsl = aarch64CGFunc->GetLogicalShiftLeftOperand(k16BitSize, true); + Insn &multiplierHighInsn = + cgFunc->GetInsnBuilder()->BuildInsn(MOP_wmovkri16, tempOpnd, multiplierHigh, *multiplierHighLsl); + bb.InsertInsnBefore(*prePrevInsn, multiplierHighInsn); + + /* smull x16, w0, w16 */ + Insn &newSmullInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xsmullrrr, tempOpnd, sdivOpnd1, tempOpnd); + bb.InsertInsnBefore(*prePrevInsn, newSmullInsn); + + /* asr x16, x16, #32 */ + ImmOperand &dstLsrImmHigh = aarch64CGFunc->CreateImmOperand(k32BitSize, k32BitSize, false); + Insn &dstLsrInsnHigh = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xasrrri6, tempOpnd, tempOpnd, dstLsrImmHigh); + bb.InsertInsnBefore(*prePrevInsn, dstLsrInsnHigh); + + /* add x16, x16, w0, SXTW */ + Operand &sxtw = aarch64CGFunc->CreateExtendShiftOperand(ExtendShiftOperand::kSXTW, 0, 3); + Insn &addInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xxwaddrrre, tempOpnd, tempOpnd, sdivOpnd1, sxtw); + bb.InsertInsnBefore(*prePrevInsn, addInsn); + + /* asr x16, x16, #17 */ + ImmOperand &dstLsrImmChange = aarch64CGFunc->CreateImmOperand(17, k32BitSize, false); + Insn &dstLsrInsnChange = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xasrrri6, tempOpnd, tempOpnd, dstLsrImmChange); + bb.InsertInsnBefore(*prePrevInsn, dstLsrInsnChange); + + /* add x2, x16, x0, LSR #31 */ + auto &sdivOpnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + regno_t sdivOpnd0RegNO = sdivOpnd0.GetRegisterNumber(); + RegOperand &extSdivO0 = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(sdivOpnd0RegNO), k64BitSize, kRegTyInt); + + regno_t sdivOpnd1RegNum = sdivOpnd1.GetRegisterNumber(); + RegOperand &extSdivO1 = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(sdivOpnd1RegNum), k64BitSize, kRegTyInt); + /* shift bit amount is thirty-one at this insn */ + BitShiftOperand &addLsrOpnd = aarch64CGFunc->CreateBitShiftOperand(BitShiftOperand::kLSR, 31, 6); + Insn &addLsrInsn = + cgFunc->GetInsnBuilder()->BuildInsn(MOP_xaddrrrs, extSdivO0, tempOpnd, extSdivO1, addLsrOpnd); + bb.InsertInsnBefore(*prePrevInsn, addLsrInsn); + + /* + * remove insns + * Check if x1 is used after sdiv insn, and if it is in live-out. + */ + if (sdivOpnd2.GetRegisterNumber() != sdivOpnd0.GetRegisterNumber()) { + if (IfOperandIsLiveAfterInsn(sdivOpnd2, insn)) { + /* Only remove div instruction. */ + bb.RemoveInsn(insn); + return; + } + } + + bb.RemoveInsn(*prePrevInsn); + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(insn); + } +} + +Insn *AndCmpBranchesToCsetAArch64::FindPreviousCmp(Insn &insn) const +{ + regno_t defRegNO = static_cast(insn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + for (Insn *curInsn = insn.GetPrev(); curInsn != nullptr; curInsn = curInsn->GetPrev()) { + if (!curInsn->IsMachineInstruction()) { + continue; + } + if (curInsn->GetMachineOpcode() == MOP_wcmpri || curInsn->GetMachineOpcode() == MOP_xcmpri) { + return curInsn; + } + /* + * if any def/use of CC or insn defReg between insn and curInsn, stop searching and return nullptr. + */ + if (curInsn->ScanReg(defRegNO) || curInsn->ScanReg(kRFLAG)) { + return nullptr; + } + } + return nullptr; +} + +void AndCmpBranchesToCsetAArch64::Run(BB &bb, Insn &insn) +{ + /* prevInsn must be "cmp" insn */ + Insn *prevInsn = FindPreviousCmp(insn); + if (prevInsn == nullptr) { + return; + } + /* prevPrevInsn must be "and" insn */ + Insn *prevPrevInsn = prevInsn->GetPreviousMachineInsn(); + if (prevPrevInsn == nullptr || + (prevPrevInsn->GetMachineOpcode() != MOP_wandrri12 && prevPrevInsn->GetMachineOpcode() != MOP_xandrri13)) { + return; + } + + auto &csetCond = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &cmpImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + int64 cmpImmVal = cmpImm.GetValue(); + auto &andImm = static_cast(prevPrevInsn->GetOperand(kInsnThirdOpnd)); + int64 andImmVal = andImm.GetValue(); + if ((csetCond.GetCode() == CC_EQ && cmpImmVal == andImmVal) || (csetCond.GetCode() == CC_NE && cmpImmVal == 0)) { + /* if flag_reg of "cmp" is live later, we can't remove cmp insn. */ + auto &flagReg = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (IfOperandIsLiveAfterInsn(flagReg, insn)) { + return; + } + + auto &csetReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &prevInsnSecondReg = prevInsn->GetOperand(kInsnSecondOpnd); + bool isRegDiff = !RegOperand::IsSameRegNO(csetReg, prevInsnSecondReg); + if (isRegDiff && IfOperandIsLiveAfterInsn(static_cast(prevInsnSecondReg), insn)) { + return; + } + if (andImmVal == 1) { + if (!RegOperand::IsSameRegNO(prevInsnSecondReg, prevPrevInsn->GetOperand(kInsnFirstOpnd))) { + return; + } + /* save the "and" insn only. */ + bb.RemoveInsn(insn); + bb.RemoveInsn(*prevInsn); + if (isRegDiff) { + prevPrevInsn->Insn::SetOperand(kInsnFirstOpnd, csetReg); + } + } else { + if (!RegOperand::IsSameReg(prevInsnSecondReg, prevPrevInsn->GetOperand(kInsnFirstOpnd)) || + !RegOperand::IsSameReg(prevInsnSecondReg, prevPrevInsn->GetOperand(kInsnSecondOpnd))) { + return; + } + + /* andImmVal is n power of 2 */ + int n = logValueAtBase2(andImmVal); + if (n < 0) { + return; + } + + /* create ubfx insn */ + MOperator ubfxOp = (csetReg.GetSize() <= k32BitSize) ? MOP_wubfxrri5i5 : MOP_xubfxrri6i6; + if (ubfxOp == MOP_wubfxrri5i5 && static_cast(n) >= k32BitSize) { + return; + } + auto &dstReg = static_cast(csetReg); + auto &srcReg = static_cast(prevInsnSecondReg); + auto *aarch64CGFunc = static_cast(&cgFunc); + ImmOperand &bitPos = aarch64CGFunc->CreateImmOperand(n, k8BitSize, false); + ImmOperand &bitSize = aarch64CGFunc->CreateImmOperand(1, k8BitSize, false); + Insn &ubfxInsn = cgFunc.GetInsnBuilder()->BuildInsn(ubfxOp, dstReg, srcReg, bitPos, bitSize); + bb.InsertInsnBefore(*prevPrevInsn, ubfxInsn); + bb.RemoveInsn(insn); + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(*prevPrevInsn); + } + } +} + +void AndCmpBranchesToTstAArch64::Run(BB &bb, Insn &insn) +{ + /* nextInsn must be "cmp" insn */ + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr || + (nextInsn->GetMachineOpcode() != MOP_wcmpri && nextInsn->GetMachineOpcode() != MOP_xcmpri)) { + return; + } + /* nextNextInsn must be "beq" or "bne" insn */ + Insn *nextNextInsn = nextInsn->GetNextMachineInsn(); + if (nextNextInsn == nullptr || + (nextNextInsn->GetMachineOpcode() != MOP_beq && nextNextInsn->GetMachineOpcode() != MOP_bne)) { + return; + } + auto &andRegOp = static_cast(insn.GetOperand(kInsnFirstOpnd)); + regno_t andRegNO1 = andRegOp.GetRegisterNumber(); + auto &cmpRegOp2 = static_cast(nextInsn->GetOperand(kInsnSecondOpnd)); + regno_t cmpRegNO2 = cmpRegOp2.GetRegisterNumber(); + if (andRegNO1 != cmpRegNO2) { + return; + } + /* If the reg will be used later, we shouldn't optimize the and insn here */ + if (IfOperandIsLiveAfterInsn(andRegOp, *nextInsn)) { + return; + } + Operand &immOpnd = nextInsn->GetOperand(kInsnThirdOpnd); + DEBUG_ASSERT(immOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &defConst = static_cast(immOpnd); + int64 defConstValue = defConst.GetValue(); + if (defConstValue != 0) { + return; + } + /* build tst insn */ + Operand &andOpnd3 = insn.GetOperand(kInsnThirdOpnd); + auto &andRegOp2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + MOperator newOp = MOP_undef; + if (andOpnd3.IsRegister()) { + newOp = (andRegOp2.GetSize() <= k32BitSize) ? MOP_wtstrr : MOP_xtstrr; + } else { + newOp = (andRegOp2.GetSize() <= k32BitSize) ? MOP_wtstri32 : MOP_xtstri64; + } + Operand &rflag = static_cast(&cgFunc)->GetOrCreateRflag(); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newOp, rflag, andRegOp2, andOpnd3); + if (CGOptions::DoCGSSA() && CGOptions::GetInstance().GetOptimizeLevel() < CGOptions::kLevel0) { + CHECK_FATAL(false, "check this case in ssa opt"); + } + bb.InsertInsnAfter(*nextInsn, newInsn); + bb.RemoveInsn(insn); + bb.RemoveInsn(*nextInsn); +} + +void AndCbzBranchesToTstAArch64::Run(BB &bb, Insn &insn) +{ + /* nextInsn must be "cbz" or "cbnz" insn */ + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr || (nextInsn->GetMachineOpcode() != MOP_wcbz && nextInsn->GetMachineOpcode() != MOP_xcbz)) { + return; + } + auto &andRegOp = static_cast(insn.GetOperand(kInsnFirstOpnd)); + regno_t andRegNO1 = andRegOp.GetRegisterNumber(); + auto &cbzRegOp2 = static_cast(nextInsn->GetOperand(kInsnFirstOpnd)); + regno_t cbzRegNO2 = cbzRegOp2.GetRegisterNumber(); + if (andRegNO1 != cbzRegNO2) { + return; + } + /* If the reg will be used later, we shouldn't optimize the and insn here */ + if (IfOperandIsLiveAfterInsn(andRegOp, *nextInsn)) { + return; + } + /* build tst insn */ + Operand &andOpnd3 = insn.GetOperand(kInsnThirdOpnd); + auto &andRegOp2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &andRegOp3 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + MOperator newTstOp = MOP_undef; + if (andOpnd3.IsRegister()) { + newTstOp = (andRegOp2.GetSize() <= k32BitSize && andRegOp3.GetSize() <= k32BitSize) ? MOP_wtstrr : MOP_xtstrr; + } else { + newTstOp = + (andRegOp2.GetSize() <= k32BitSize && andRegOp3.GetSize() <= k32BitSize) ? MOP_wtstri32 : MOP_xtstri64; + } + Operand &rflag = static_cast(&cgFunc)->GetOrCreateRflag(); + Insn &newInsnTst = cgFunc.GetInsnBuilder()->BuildInsn(newTstOp, rflag, andRegOp2, andOpnd3); + if (andOpnd3.IsImmediate()) { + if (!static_cast(andOpnd3).IsBitmaskImmediate(andRegOp2.GetSize())) { + return; + } + } + /* build beq insn */ + MOperator opCode = nextInsn->GetMachineOpcode(); + bool reverse = (opCode == MOP_xcbz || opCode == MOP_wcbz); + auto &label = static_cast(nextInsn->GetOperand(kInsnSecondOpnd)); + MOperator jmpOperator = reverse ? MOP_beq : MOP_bne; + Insn &newInsnJmp = cgFunc.GetInsnBuilder()->BuildInsn(jmpOperator, rflag, label); + bb.ReplaceInsn(insn, newInsnTst); + bb.ReplaceInsn(*nextInsn, newInsnJmp); +} + +void ZeroCmpBranchesAArch64::Run(BB &bb, Insn &insn) +{ + Insn *prevInsn = insn.GetPreviousMachineInsn(); + if (!insn.IsBranch() || insn.GetOperandSize() <= kInsnSecondOpnd || prevInsn == nullptr) { + return; + } + if (!insn.GetOperand(kInsnSecondOpnd).IsLabel()) { + return; + } + LabelOperand *label = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + RegOperand *regOpnd = nullptr; + RegOperand *reg0 = nullptr; + RegOperand *reg1 = nullptr; + MOperator newOp = MOP_undef; + ImmOperand *imm = nullptr; + switch (prevInsn->GetMachineOpcode()) { + case MOP_wcmpri: + case MOP_xcmpri: { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + imm = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (imm->GetValue() != 0) { + return; + } + if (insn.GetMachineOpcode() == MOP_bge) { + newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + } else if (insn.GetMachineOpcode() == MOP_blt) { + newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + } else { + return; + } + break; + } + case MOP_wcmprr: + case MOP_xcmprr: { + reg0 = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + reg1 = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (!IsZeroRegister(*reg0) && !IsZeroRegister(*reg1)) { + return; + } + switch (insn.GetMachineOpcode()) { + case MOP_bge: + if (IsZeroRegister(*reg1)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + } else { + return; + } + break; + case MOP_ble: + if (IsZeroRegister(*reg0)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + } else { + return; + } + break; + case MOP_blt: + if (IsZeroRegister(*reg1)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + } else { + return; + } + break; + case MOP_bgt: + if (IsZeroRegister(*reg0)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + newOp = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + } else { + return; + } + break; + default: + return; + } + break; + } + default: + return; + } + auto aarch64CGFunc = static_cast(&cgFunc); + ImmOperand &bitp = aarch64CGFunc->CreateImmOperand( + (regOpnd->GetSize() <= k32BitSize) ? (k32BitSize - 1) : (k64BitSize - 1), k8BitSize, false); + bb.InsertInsnAfter(insn, + cgFunc.GetInsnBuilder()->BuildInsn(newOp, *static_cast(regOpnd), bitp, *label)); + bb.RemoveInsn(insn); + bb.RemoveInsn(*prevInsn); +} + +void ElimDuplicateExtensionAArch64::Run(BB &bb, Insn &insn) +{ + (void)bb; + Insn *prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return; + } + uint32 index; + uint32 upper; + bool is32bits = false; + MOperator *table = nullptr; + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_xsxtb32: + is32bits = true; + [[clang::fallthrough]]; + case MOP_xsxtb64: + table = sextMopTable; + index = 0; + upper = kSizeOfSextMopTable; + break; + case MOP_xsxth32: + is32bits = true; + [[clang::fallthrough]]; + case MOP_xsxth64: + table = sextMopTable; + index = 2; + upper = kSizeOfSextMopTable; + break; + case MOP_xsxtw64: + table = sextMopTable; + index = 4; + upper = kSizeOfSextMopTable; + break; + case MOP_xuxtb32: + is32bits = true; + table = uextMopTable; + index = 0; + upper = kSizeOfUextMopTable; + break; + case MOP_xuxth32: + is32bits = true; + table = uextMopTable; + index = 1; + upper = kSizeOfUextMopTable; + break; + case MOP_xuxtw64: + table = uextMopTable; + index = 2; + upper = kSizeOfUextMopTable; + break; + default: + CHECK_FATAL(false, "Unexpected mop"); + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + for (uint32 i = index; i < upper; ++i) { + if (prevMop == table[i]) { + Operand &prevDestOpnd = prevInsn->GetOperand(kInsnFirstOpnd); + regno_t dest = static_cast(prevDestOpnd).GetRegisterNumber(); + regno_t src = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber(); + if (dest == src) { + insn.SetMOP(is32bits ? AArch64CG::kMd[MOP_wmovrr] : AArch64CG::kMd[MOP_xmovrr]); + if (upper == kSizeOfSextMopTable && + static_cast(prevDestOpnd).GetValidBitsNum() != + static_cast(insn.GetOperand(kInsnFirstOpnd)).GetValidBitsNum()) { + if (is32bits) { + insn.GetOperand(kInsnFirstOpnd).SetSize(k64BitSize); + insn.SetMOP(AArch64CG::kMd[MOP_xmovrr]); + } else { + prevDestOpnd.SetSize(k64BitSize); + prevInsn->SetMOP(prevMop == MOP_xsxtb32 ? AArch64CG::kMd[MOP_xsxtb64] + : AArch64CG::kMd[MOP_xsxth64]); + } + } + } + break; + } + } +} + +/* + * if there is define point of checkInsn->GetOperand(opndIdx) between startInsn and firstInsn + * return define insn. else return nullptr + */ +const Insn *CmpCsetAArch64::DefInsnOfOperandInBB(const Insn &startInsn, const Insn &checkInsn, int opndIdx) const +{ + Insn *prevInsn = nullptr; + for (const Insn *insn = &startInsn; insn != nullptr; insn = prevInsn) { + prevInsn = insn->GetPreviousMachineInsn(); + if (!insn->IsMachineInstruction()) { + continue; + } + /* checkInsn.GetOperand(opndIdx) is thought modified conservatively */ + if (insn->IsCall()) { + return insn; + } + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (!md->opndMD[i]->IsDef()) { + continue; + } + /* Operand is base reg of Memory, defined by str */ + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + DEBUG_ASSERT(base != nullptr, "nullptr check"); + DEBUG_ASSERT(base->IsRegister(), "expects RegOperand"); + if (RegOperand::IsSameRegNO(*base, checkInsn.GetOperand(static_cast(opndIdx))) && + memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { + return insn; + } + } else { + DEBUG_ASSERT(opnd.IsRegister(), "expects RegOperand"); + if (RegOperand::IsSameRegNO(checkInsn.GetOperand(static_cast(opndIdx)), opnd)) { + return insn; + } + } + } + } + return nullptr; +} + +bool CmpCsetAArch64::OpndDefByOneValidBit(const Insn &defInsn) const +{ + MOperator defMop = defInsn.GetMachineOpcode(); + switch (defMop) { + case MOP_wcsetrc: + case MOP_xcsetrc: + return true; + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &defOpnd = defInsn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(defOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &defConst = static_cast(defOpnd); + int64 defConstValue = defConst.GetValue(); + return (defConstValue == 0 || defConstValue == 1); + } + case MOP_xmovrr: + case MOP_wmovrr: + return IsZeroRegister(defInsn.GetOperand(kInsnSecondOpnd)); + case MOP_wlsrrri5: + case MOP_xlsrrri6: { + Operand &opnd2 = defInsn.GetOperand(kInsnThirdOpnd); + DEBUG_ASSERT(opnd2.IsIntImmediate(), "expects ImmOperand"); + auto &opndImm = static_cast(opnd2); + int64 shiftBits = opndImm.GetValue(); + return ((defMop == MOP_wlsrrri5 && shiftBits == (k32BitSize - 1)) || + (defMop == MOP_xlsrrri6 && shiftBits == (k64BitSize - 1))); + } + default: + return false; + } +} + +/* + * help function for cmpcset optimize + * if all define points of used opnd in insn has only one valid bit,return true. + * for cmp reg,#0(#1), that is checking for reg + */ +bool CmpCsetAArch64::CheckOpndDefPoints(Insn &checkInsn, int opndIdx) +{ + if (checkInsn.GetBB()->GetPrev() == nullptr) { + /* For 1st BB, be conservative for def of parameter registers */ + /* Since peep is light weight, do not want to insert pseudo defs */ + regno_t reg = static_cast(checkInsn.GetOperand(static_cast(opndIdx))).GetRegisterNumber(); + if ((reg >= R0 && reg <= R7) || (reg >= D0 && reg <= D7)) { + return false; + } + } + /* check current BB */ + const Insn *defInsn = DefInsnOfOperandInBB(checkInsn, checkInsn, opndIdx); + if (defInsn != nullptr) { + return OpndDefByOneValidBit(*defInsn); + } + /* check pred */ + for (auto predBB : checkInsn.GetBB()->GetPreds()) { + const Insn *tempInsn = nullptr; + if (predBB->GetLastInsn() != nullptr) { + tempInsn = DefInsnOfOperandInBB(*predBB->GetLastInsn(), checkInsn, opndIdx); + } + if (tempInsn == nullptr || !OpndDefByOneValidBit(*tempInsn)) { + return false; + } + } + return true; +} + +/* Check there is use point of rflag start from startInsn to current bb bottom */ +bool CmpCsetAArch64::FlagUsedLaterInCurBB(const BB &bb, Insn &startInsn) const +{ + if (&bb != startInsn.GetBB()) { + return false; + } + Insn *nextInsn = nullptr; + for (Insn *insn = &startInsn; insn != nullptr; insn = nextInsn) { + nextInsn = insn->GetNextMachineInsn(); + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + /* + * For condition operand, such as NE, EQ and so on, the register number should be + * same with RFLAG, we only need check the property of use/def. + */ + if (!opnd.IsConditionCode()) { + continue; + } + if (md->opndMD[i]->IsUse()) { + return true; + } else { + DEBUG_ASSERT(md->opndMD[i]->IsDef(), "register should be redefined."); + return false; + } + } + } + return false; +} + +void CmpCsetAArch64::Run(BB &bb, Insn &insn) +{ + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + MOperator firstMop = insn.GetMachineOpcode(); + MOperator secondMop = nextInsn->GetMachineOpcode(); + if ((firstMop == MOP_wcmpri || firstMop == MOP_xcmpri) && (secondMop == MOP_wcsetrc || secondMop == MOP_xcsetrc)) { + Operand &cmpFirstOpnd = insn.GetOperand(kInsnSecondOpnd); + /* get ImmOperand, must be 0 or 1 */ + Operand &cmpSecondOpnd = insn.GetOperand(kInsnThirdOpnd); + auto &cmpFlagReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + DEBUG_ASSERT(cmpSecondOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &cmpConst = static_cast(cmpSecondOpnd); + int64 cmpConstVal = cmpConst.GetValue(); + Operand &csetFirstOpnd = nextInsn->GetOperand(kInsnFirstOpnd); + if ((cmpConstVal != 0 && cmpConstVal != 1) || !CheckOpndDefPoints(insn, 1) || + (nextInsn->GetNextMachineInsn() != nullptr && FlagUsedLaterInCurBB(bb, *nextInsn->GetNextMachineInsn())) || + FindRegLiveOut(cmpFlagReg, *insn.GetBB())) { + return; + } + + Insn *csetInsn = nextInsn; + nextInsn = nextInsn->GetNextMachineInsn(); + auto &cond = static_cast(csetInsn->GetOperand(kInsnSecondOpnd)); + if ((cmpConstVal == 0 && cond.GetCode() == CC_NE) || (cmpConstVal == 1 && cond.GetCode() == CC_EQ)) { + if (RegOperand::IsSameRegNO(cmpFirstOpnd, csetFirstOpnd)) { + bb.RemoveInsn(insn); + bb.RemoveInsn(*csetInsn); + } else { + if (cmpFirstOpnd.GetSize() != csetFirstOpnd.GetSize()) { + return; + } + MOperator mopCode = (cmpFirstOpnd.GetSize() == k64BitSize) ? MOP_xmovrr : MOP_wmovrr; + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mopCode, csetFirstOpnd, cmpFirstOpnd); + bb.ReplaceInsn(insn, newInsn); + bb.RemoveInsn(*csetInsn); + } + } else if ((cmpConstVal == 1 && cond.GetCode() == CC_NE) || (cmpConstVal == 0 && cond.GetCode() == CC_EQ)) { + if (cmpFirstOpnd.GetSize() != csetFirstOpnd.GetSize()) { + return; + } + MOperator mopCode = (cmpFirstOpnd.GetSize() == k64BitSize) ? MOP_xeorrri13 : MOP_weorrri12; + ImmOperand &one = static_cast(&cgFunc)->CreateImmOperand(1, k8BitSize, false); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mopCode, csetFirstOpnd, cmpFirstOpnd, one); + bb.ReplaceInsn(insn, newInsn); + bb.RemoveInsn(*csetInsn); + } + } +} + +/* + * help function for DeleteMovAfterCbzOrCbnz + * input: + * bb: the bb to be checked out + * checkCbz: to check out BB end with cbz or cbnz, if cbz, input true + * opnd: for MOV reg, #0, opnd indicate reg + * return: + * according to cbz, return true if insn is cbz or cbnz and the first operand of cbz(cbnz) is same as input + * operand + */ +bool DeleteMovAfterCbzOrCbnzAArch64::PredBBCheck(BB &bb, bool checkCbz, const Operand &opnd) const +{ + if (bb.GetKind() != BB::kBBIf) { + return false; + } + + Insn *condBr = cgcfg->FindLastCondBrInsn(bb); + DEBUG_ASSERT(condBr != nullptr, "condBr must be found"); + if (!cgcfg->IsCompareAndBranchInsn(*condBr)) { + return false; + } + MOperator mOp = condBr->GetMachineOpcode(); + if (checkCbz && mOp != MOP_wcbz && mOp != MOP_xcbz) { + return false; + } + if (!checkCbz && mOp != MOP_xcbnz && mOp != MOP_wcbnz) { + return false; + } + return RegOperand::IsSameRegNO(condBr->GetOperand(kInsnFirstOpnd), opnd); +} + +bool DeleteMovAfterCbzOrCbnzAArch64::OpndDefByMovZero(const Insn &insn) const +{ + MOperator defMop = insn.GetMachineOpcode(); + switch (defMop) { + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &defOpnd = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(defOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &defConst = static_cast(defOpnd); + int64 defConstValue = defConst.GetValue(); + if (defConstValue == 0) { + return true; + } + return false; + } + case MOP_xmovrr: + case MOP_wmovrr: { + Operand &secondOpnd = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(secondOpnd.IsRegister(), "expects RegOperand here"); + auto ®Opnd = static_cast(secondOpnd); + return IsZeroRegister(regOpnd); + } + default: + return false; + } +} + +/* check whether predefine insn of first operand of test_insn is exist in current BB */ +bool DeleteMovAfterCbzOrCbnzAArch64::NoPreDefine(Insn &testInsn) const +{ + Insn *nextInsn = nullptr; + for (Insn *insn = testInsn.GetBB()->GetFirstInsn(); insn != nullptr && insn != &testInsn; insn = nextInsn) { + nextInsn = insn->GetNextMachineInsn(); + if (!insn->IsMachineInstruction()) { + continue; + } + DEBUG_ASSERT(!insn->IsCall(), "CG internal error, call insn should not be at the middle of the BB."); + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (!md->opndMD[i]->IsDef()) { + continue; + } + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + DEBUG_ASSERT(base != nullptr, "nullptr check"); + DEBUG_ASSERT(base->IsRegister(), "expects RegOperand"); + if (RegOperand::IsSameRegNO(*base, testInsn.GetOperand(kInsnFirstOpnd)) && + memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { + return false; + } + } else if (opnd.IsList()) { + for (auto operand : static_cast(opnd).GetOperands()) { + if (RegOperand::IsSameRegNO(testInsn.GetOperand(kInsnFirstOpnd), *operand)) { + return false; + } + } + } else if (opnd.IsRegister()) { + if (RegOperand::IsSameRegNO(testInsn.GetOperand(kInsnFirstOpnd), opnd)) { + return false; + } + } + } + } + return true; +} +void DeleteMovAfterCbzOrCbnzAArch64::ProcessBBHandle(BB *processBB, const BB &bb, const Insn &insn) const +{ + FOR_BB_INSNS_SAFE(processInsn, processBB, nextProcessInsn) { + nextProcessInsn = processInsn->GetNextMachineInsn(); + if (!processInsn->IsMachineInstruction()) { + continue; + } + /* register may be a caller save register */ + if (processInsn->IsCall()) { + break; + } + if (!OpndDefByMovZero(*processInsn) || !NoPreDefine(*processInsn) || + !RegOperand::IsSameRegNO(processInsn->GetOperand(kInsnFirstOpnd), insn.GetOperand(kInsnFirstOpnd))) { + continue; + } + bool toDoOpt = true; + MOperator condBrMop = insn.GetMachineOpcode(); + /* process elseBB, other preds must be cbz */ + if (condBrMop == MOP_wcbnz || condBrMop == MOP_xcbnz) { + /* check out all preds of process_bb */ + for (auto *processBBPred : processBB->GetPreds()) { + if (processBBPred == &bb) { + continue; + } + if (!PredBBCheck(*processBBPred, true, processInsn->GetOperand(kInsnFirstOpnd))) { + toDoOpt = false; + break; + } + } + } else { + /* process ifBB, other preds can be cbz or cbnz(one at most) */ + for (auto processBBPred : processBB->GetPreds()) { + if (processBBPred == &bb) { + continue; + } + /* for cbnz pred, there is one at most */ + if (!PredBBCheck(*processBBPred, processBBPred != processBB->GetPrev(), + processInsn->GetOperand(kInsnFirstOpnd))) { + toDoOpt = false; + break; + } + } + } + if (!toDoOpt) { + continue; + } + processBB->RemoveInsn(*processInsn); + } +} + +/* ldr wn, [x1, wn, SXTW] + * add x2, wn, x2 + */ +bool ComplexMemOperandAddAArch64::IsExpandBaseOpnd(const Insn &insn, const Insn &prevInsn) const +{ + MOperator prevMop = prevInsn.GetMachineOpcode(); + if (prevMop >= MOP_wldrsb && prevMop <= MOP_xldr && + prevInsn.GetOperand(kInsnFirstOpnd).Equals(insn.GetOperand(kInsnSecondOpnd))) { + return true; + } + return false; +} + +void ComplexMemOperandAddAArch64::Run(BB &bb, Insn &insn) +{ + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + Insn *prevInsn = insn.GetPreviousMachineInsn(); + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_xaddrrr && thisMop != MOP_waddrrr) { + return; + } + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop && ((nextMop >= MOP_wldrsb && nextMop <= MOP_dldr) || (nextMop >= MOP_wstrb && nextMop <= MOP_dstr))) { + if (!IsMemOperandOptPattern(insn, *nextInsn)) { + return; + } + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + auto newBaseOpnd = static_cast(&insn.GetOperand(kInsnSecondOpnd)); + auto newIndexOpnd = static_cast(&insn.GetOperand(kInsnThirdOpnd)); + regno_t memBaseOpndRegNO = newBaseOpnd->GetRegisterNumber(); + if (newBaseOpnd->GetSize() <= k32BitSize && prevInsn != nullptr && IsExpandBaseOpnd(insn, *prevInsn)) { + newBaseOpnd = &aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(memBaseOpndRegNO), + k64BitSize, kRegTyInt); + } + if (newBaseOpnd->GetSize() != k64BitSize) { + return; + } + if (newIndexOpnd->GetSize() <= k32BitSize) { + MemOperand &newMemOpnd = aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), + newBaseOpnd, newIndexOpnd, 0, false); + nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); + } else { + MemOperand &newMemOpnd = aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), + newBaseOpnd, newIndexOpnd, nullptr, nullptr); + nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); + } + bb.RemoveInsn(insn); + } +} + +void DeleteMovAfterCbzOrCbnzAArch64::Run(BB &bb, Insn &insn) +{ + if (bb.GetKind() != BB::kBBIf) { + return; + } + if (&insn != cgcfg->FindLastCondBrInsn(bb)) { + return; + } + if (!cgcfg->IsCompareAndBranchInsn(insn)) { + return; + } + BB *processBB = nullptr; + if (bb.GetNext() == maplebe::CGCFG::GetTargetSuc(bb)) { + return; + } + + MOperator condBrMop = insn.GetMachineOpcode(); + if (condBrMop == MOP_wcbnz || condBrMop == MOP_xcbnz) { + processBB = bb.GetNext(); + } else { + processBB = maplebe::CGCFG::GetTargetSuc(bb); + } + + DEBUG_ASSERT(processBB != nullptr, "process_bb is null in DeleteMovAfterCbzOrCbnzAArch64::Run"); + ProcessBBHandle(processBB, bb, insn); +} + +MOperator OneHoleBranchesPreAArch64::FindNewMop(const BB &bb, const Insn &insn) const +{ + MOperator newOp = MOP_undef; + if (&insn != bb.GetLastInsn()) { + return newOp; + } + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_wcbz && thisMop != MOP_wcbnz && thisMop != MOP_xcbz && thisMop != MOP_xcbnz) { + return newOp; + } + switch (thisMop) { + case MOP_wcbz: + newOp = MOP_wtbnz; + break; + case MOP_wcbnz: + newOp = MOP_wtbz; + break; + case MOP_xcbz: + newOp = MOP_xtbnz; + break; + case MOP_xcbnz: + newOp = MOP_xtbz; + break; + default: + CHECK_FATAL(false, "can not touch here"); + break; + } + return newOp; +} + +void OneHoleBranchesPreAArch64::Run(BB &bb, Insn &insn) +{ + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + MOperator newOp = FindNewMop(bb, insn); + if (newOp == MOP_undef) { + return; + } + Insn *prevInsn = insn.GetPreviousMachineInsn(); + LabelOperand &label = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (prevInsn != nullptr && prevInsn->GetMachineOpcode() == MOP_xuxtb32 && + (static_cast(prevInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() <= k8BitSize || + static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetValidBitsNum() <= k8BitSize)) { + if (&(prevInsn->GetOperand(kInsnFirstOpnd)) != &(insn.GetOperand(kInsnFirstOpnd))) { + return; + } + if (IfOperandIsLiveAfterInsn(static_cast(insn.GetOperand(kInsnFirstOpnd)), insn)) { + return; + } + insn.SetOperand(kInsnFirstOpnd, prevInsn->GetOperand(kInsnSecondOpnd)); + if (CGOptions::DoCGSSA()) { + CHECK_FATAL(false, "check this case in ssa opt"); + } + bb.RemoveInsn(*prevInsn); + } + if (prevInsn != nullptr && + (prevInsn->GetMachineOpcode() == MOP_xeorrri13 || prevInsn->GetMachineOpcode() == MOP_weorrri12) && + static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue() == 1) { + if (&(prevInsn->GetOperand(kInsnFirstOpnd)) != &(insn.GetOperand(kInsnFirstOpnd))) { + return; + } + Insn *prevPrevInsn = prevInsn->GetPreviousMachineInsn(); + if (prevPrevInsn == nullptr) { + return; + } + if (prevPrevInsn->GetMachineOpcode() != MOP_xuxtb32 || + static_cast(prevPrevInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() != 1) { + return; + } + if (&(prevPrevInsn->GetOperand(kInsnFirstOpnd)) != &(prevInsn->GetOperand(kInsnSecondOpnd))) { + return; + } + ImmOperand &oneHoleOpnd = aarch64CGFunc->CreateImmOperand(0, k8BitSize, false); + auto ®Operand = static_cast(prevPrevInsn->GetOperand(kInsnSecondOpnd)); + if (CGOptions::DoCGSSA()) { + CHECK_FATAL(false, "check this case in ssa opt"); + } + bb.InsertInsnAfter(insn, cgFunc.GetInsnBuilder()->BuildInsn(newOp, regOperand, oneHoleOpnd, label)); + bb.RemoveInsn(insn); + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(*prevPrevInsn); + } +} + +bool LoadFloatPointPattern::FindLoadFloatPoint(Insn &insn) +{ + MOperator mOp = insn.GetMachineOpcode(); + optInsn.clear(); + if (mOp != MOP_xmovzri16) { + return false; + } + optInsn.emplace_back(&insn); + + Insn *insnMov2 = insn.GetNextMachineInsn(); + if (insnMov2 == nullptr) { + return false; + } + if (insnMov2->GetMachineOpcode() != MOP_xmovkri16) { + return false; + } + optInsn.emplace_back(insnMov2); + + Insn *insnMov3 = insnMov2->GetNextMachineInsn(); + if (insnMov3 == nullptr) { + return false; + } + if (insnMov3->GetMachineOpcode() != MOP_xmovkri16) { + return false; + } + optInsn.emplace_back(insnMov3); + + Insn *insnMov4 = insnMov3->GetNextMachineInsn(); + if (insnMov4 == nullptr) { + return false; + } + if (insnMov4->GetMachineOpcode() != MOP_xmovkri16) { + return false; + } + optInsn.emplace_back(insnMov4); + return true; +} + +bool LoadFloatPointPattern::IsPatternMatch() +{ + int insnNum = 0; + Insn *insn1 = optInsn[insnNum]; + Insn *insn2 = optInsn[++insnNum]; + Insn *insn3 = optInsn[++insnNum]; + Insn *insn4 = optInsn[++insnNum]; + if ((static_cast(insn1->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != + static_cast(insn2->GetOperand(kInsnFirstOpnd)).GetRegisterNumber()) || + (static_cast(insn2->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != + static_cast(insn3->GetOperand(kInsnFirstOpnd)).GetRegisterNumber()) || + (static_cast(insn3->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != + static_cast(insn4->GetOperand(kInsnFirstOpnd)).GetRegisterNumber())) { + return false; + } + if ((static_cast(insn1->GetOperand(kInsnThirdOpnd)).GetShiftAmount() != 0) || + (static_cast(insn2->GetOperand(kInsnThirdOpnd)).GetShiftAmount() != k16BitSize) || + (static_cast(insn3->GetOperand(kInsnThirdOpnd)).GetShiftAmount() != k32BitSize) || + (static_cast(insn4->GetOperand(kInsnThirdOpnd)).GetShiftAmount() != + (k16BitSize + k32BitSize))) { + return false; + } + return true; +} + +bool LoadFloatPointPattern::CheckCondition(Insn &insn) +{ + if (FindLoadFloatPoint(insn) && IsPatternMatch()) { + return true; + } + return false; +} + +void LoadFloatPointPattern::Run(BB &bb, Insn &insn) +{ + /* logical shift left values in three optimized pattern */ + if (CheckCondition(insn)) { + int insnNum = 0; + Insn *insn1 = optInsn[insnNum]; + Insn *insn2 = optInsn[++insnNum]; + Insn *insn3 = optInsn[++insnNum]; + Insn *insn4 = optInsn[++insnNum]; + auto &movConst1 = static_cast(insn1->GetOperand(kInsnSecondOpnd)); + auto &movConst2 = static_cast(insn2->GetOperand(kInsnSecondOpnd)); + auto &movConst3 = static_cast(insn3->GetOperand(kInsnSecondOpnd)); + auto &movConst4 = static_cast(insn4->GetOperand(kInsnSecondOpnd)); + /* movk/movz's immOpnd is 16-bit unsigned immediate */ + uint64 value = static_cast(movConst1.GetValue()) + + (static_cast(movConst2.GetValue()) << k16BitSize) + + (static_cast(movConst3.GetValue()) << k32BitSize) + + (static_cast(movConst4.GetValue()) << (k16BitSize + k32BitSize)); + + LabelIdx lableIdx = cgFunc->CreateLabel(); + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + LabelOperand &target = aarch64CGFunc->GetOrCreateLabelOperand(lableIdx); + cgFunc->InsertLabelMap(lableIdx, value); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xldli, insn4->GetOperand(kInsnFirstOpnd), target); + bb.InsertInsnAfter(*insn4, newInsn); + bb.RemoveInsn(*insn1); + bb.RemoveInsn(*insn2); + bb.RemoveInsn(*insn3); + bb.RemoveInsn(*insn4); + } +} + +void ReplaceOrrToMovAArch64::Run(BB &bb, Insn &insn) +{ + Operand *opndOfOrr = nullptr; + ImmOperand *immOpnd = nullptr; + RegOperand *reg1 = nullptr; + RegOperand *reg2 = nullptr; + MOperator thisMop = insn.GetMachineOpcode(); + MOperator newMop = MOP_undef; + switch (thisMop) { + case MOP_wiorrri12: { /* opnd1 is reg32 and opnd3 is immediate. */ + opndOfOrr = &(insn.GetOperand(kInsnThirdOpnd)); + reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + newMop = MOP_wmovrr; + break; + } + case MOP_xiorrri13: { /* opnd1 is reg64 and opnd3 is immediate. */ + opndOfOrr = &(insn.GetOperand(kInsnThirdOpnd)); + reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + newMop = MOP_xmovrr; + break; + } + default: + break; + } + DEBUG_ASSERT(opndOfOrr->IsIntImmediate(), "expects immediate operand"); + immOpnd = static_cast(opndOfOrr); + if (immOpnd->GetValue() == 0) { + reg1 = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (CGOptions::DoCGSSA()) { + CHECK_FATAL(false, "check this case in ssa opt"); + } + bb.ReplaceInsn(insn, cgFunc.GetInsnBuilder()->BuildInsn(newMop, *reg1, *reg2)); + } +} + +void ReplaceCmpToCmnAArch64::Run(BB &bb, Insn &insn) +{ + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + MOperator thisMop = insn.GetMachineOpcode(); + MOperator nextMop = MOP_undef; + MOperator newMop = MOP_undef; + uint64 negOne = UINT64_MAX; + switch (thisMop) { + case MOP_wmovri32: { + nextMop = MOP_wcmprr; + newMop = MOP_wcmnri; + negOne = UINT32_MAX; + break; + } + case MOP_xmovri64: { + nextMop = MOP_xcmprr; + newMop = MOP_xcmnri; + break; + } + default: + break; + } + Operand *opnd1OfMov = &(insn.GetOperand(kInsnFirstOpnd)); + Operand *opnd2OfMov = &(insn.GetOperand(kInsnSecondOpnd)); + if (opnd2OfMov->IsIntImmediate()) { + ImmOperand *immOpnd = static_cast(opnd2OfMov); + int64 iVal = immOpnd->GetValue(); + if ((kNegativeImmLowerLimit <= iVal && iVal < 0) || iVal == negOne) { + Insn *nextInsn = insn.GetNextMachineInsn(); /* get the next insn to judge if it is a cmp instruction. */ + if (nextInsn != nullptr) { + if (nextInsn->GetMachineOpcode() == nextMop) { + Operand *opndCmp2 = &(nextInsn->GetOperand(kInsnSecondOpnd)); + Operand *opndCmp3 = &(nextInsn->GetOperand(kInsnThirdOpnd)); /* get the third operand of cmp */ + /* if the first operand of mov equals the third operand of cmp, match the pattern. */ + if (opnd1OfMov == opndCmp3) { + if (iVal == negOne) { + iVal = -1; + } + ImmOperand &newOpnd = aarch64CGFunc->CreateImmOperand(iVal * (-1), immOpnd->GetSize(), false); + Operand ®Flag = nextInsn->GetOperand(kInsnFirstOpnd); + bb.ReplaceInsn(*nextInsn, + cgFunc.GetInsnBuilder()->BuildInsn(newMop, regFlag, *opndCmp2, newOpnd)); + } + } + } + } + } +} + +bool RemoveIncRefPattern::CheckCondition(Insn &insn) +{ + MOperator mOp = insn.GetMachineOpcode(); + if (mOp != MOP_xbl) { + return false; + } + auto &target = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (target.GetName() != "MCC_IncDecRef_NaiveRCFast") { + return false; + } + insnMov2 = insn.GetPreviousMachineInsn(); + if (insnMov2 == nullptr) { + return false; + } + MOperator mopMov2 = insnMov2->GetMachineOpcode(); + if (mopMov2 != MOP_xmovrr) { + return false; + } + insnMov1 = insnMov2->GetPreviousMachineInsn(); + if (insnMov1 == nullptr) { + return false; + } + MOperator mopMov1 = insnMov1->GetMachineOpcode(); + if (mopMov1 != MOP_xmovrr) { + return false; + } + if (static_cast(insnMov1->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() != + static_cast(insnMov2->GetOperand(kInsnSecondOpnd)).GetRegisterNumber()) { + return false; + } + auto &mov2Dest = static_cast(insnMov2->GetOperand(kInsnFirstOpnd)); + auto &mov1Dest = static_cast(insnMov1->GetOperand(kInsnFirstOpnd)); + if (mov1Dest.IsVirtualRegister() || mov2Dest.IsVirtualRegister() || mov1Dest.GetRegisterNumber() != R0 || + mov2Dest.GetRegisterNumber() != R1) { + return false; + } + return true; +} + +void RemoveIncRefPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + bb.RemoveInsn(insn); + bb.RemoveInsn(*insnMov2); + bb.RemoveInsn(*insnMov1); +} + +bool LongIntCompareWithZPattern::FindLondIntCmpWithZ(Insn &insn) +{ + MOperator thisMop = insn.GetMachineOpcode(); + optInsn.clear(); + /* forth */ + if (thisMop != MOP_wcmpri) { + return false; + } + (void)optInsn.emplace_back(&insn); + + /* third */ + Insn *preInsn1 = insn.GetPreviousMachineInsn(); + if (preInsn1 == nullptr) { + return false; + } + MOperator preMop1 = preInsn1->GetMachineOpcode(); + if (preMop1 != MOP_wcsincrrrc) { + return false; + } + (void)optInsn.emplace_back(preInsn1); + + /* second */ + Insn *preInsn2 = preInsn1->GetPreviousMachineInsn(); + if (preInsn2 == nullptr) { + return false; + } + MOperator preMop2 = preInsn2->GetMachineOpcode(); + if (preMop2 != MOP_wcsinvrrrc) { + return false; + } + (void)optInsn.emplace_back(preInsn2); + + /* first */ + Insn *preInsn3 = preInsn2->GetPreviousMachineInsn(); + if (preInsn3 == nullptr) { + return false; + } + MOperator preMop3 = preInsn3->GetMachineOpcode(); + if (preMop3 != MOP_xcmpri) { + return false; + } + (void)optInsn.emplace_back(preInsn3); + return true; +} + +bool LongIntCompareWithZPattern::IsPatternMatch() +{ + constexpr int insnLen = 4; + if (optInsn.size() != insnLen) { + return false; + } + int insnNum = 0; + Insn *insn1 = optInsn[insnNum]; + Insn *insn2 = optInsn[++insnNum]; + Insn *insn3 = optInsn[++insnNum]; + Insn *insn4 = optInsn[++insnNum]; + DEBUG_ASSERT(insnNum == 3, " this specific case has three insns"); + if (IsZeroRegister(insn3->GetOperand(kInsnSecondOpnd)) && IsZeroRegister(insn3->GetOperand(kInsnThirdOpnd)) && + IsZeroRegister(insn2->GetOperand(kInsnThirdOpnd)) && + &(insn2->GetOperand(kInsnFirstOpnd)) == &(insn2->GetOperand(kInsnSecondOpnd)) && + static_cast(insn3->GetOperand(kInsnFourthOpnd)).GetCode() == CC_GE && + static_cast(insn2->GetOperand(kInsnFourthOpnd)).GetCode() == CC_LE && + static_cast(insn1->GetOperand(kInsnThirdOpnd)).GetValue() == 0 && + static_cast(insn4->GetOperand(kInsnThirdOpnd)).GetValue() == 0) { + return true; + } + return false; +} + +bool LongIntCompareWithZPattern::CheckCondition(Insn &insn) +{ + if (FindLondIntCmpWithZ(insn) && IsPatternMatch()) { + return true; + } + return false; +} + +void LongIntCompareWithZPattern::Run(BB &bb, Insn &insn) +{ + /* found pattern */ + if (CheckCondition(insn)) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + optInsn[3]->GetMachineOpcode(), optInsn[3]->GetOperand(kInsnFirstOpnd), + optInsn[3]->GetOperand(kInsnSecondOpnd), optInsn[3]->GetOperand(kInsnThirdOpnd)); + /* use newInsn to replace the third optInsn */ + bb.ReplaceInsn(*optInsn[0], newInsn); + optInsn.clear(); + } +} + +void ComplexMemOperandAArch64::Run(BB &bb, Insn &insn) +{ + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_xadrpl12) { + return; + } + + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop && ((nextMop >= MOP_wldrsb && nextMop <= MOP_dldp) || (nextMop >= MOP_wstrb && nextMop <= MOP_dstp))) { + /* Check if base register of nextInsn and the dest operand of insn are identical. */ + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + DEBUG_ASSERT(memOpnd != nullptr, "memOpnd is null in AArch64Peep::ComplexMemOperandAArch64"); + + /* Only for AddrMode_B_OI addressing mode. */ + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { + return; + } + + /* Only for intact memory addressing. */ + if (!memOpnd->IsIntactIndexed()) { + return; + } + + auto ®Opnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + + /* Avoid linking issues when object is not 16byte aligned */ + if (memOpnd->GetSize() == k128BitSize) { + return; + } + + /* Check if dest operand of insn is idential with base register of nextInsn. */ + if (memOpnd->GetBaseRegister() != ®Opnd) { + return; + } + + /* Check if x0 is used after ldr insn, and if it is in live-out. */ + if (IfOperandIsLiveAfterInsn(regOpnd, *nextInsn)) { + return; + } + + /* load store pairs cannot have relocation */ + if (nextInsn->IsLoadStorePair() && insn.GetOperand(kInsnThirdOpnd).IsStImmediate()) { + return; + } + + auto &stImmOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + OfstOperand &offOpnd = aarch64CGFunc->GetOrCreateOfstOpnd( + stImmOpnd.GetOffset() + memOpnd->GetOffsetImmediate()->GetOffsetValue(), k32BitSize); + + /* do not guarantee rodata alignment at Os */ + if (CGOptions::OptimizeForSize() && stImmOpnd.GetSymbol()->IsReadOnly()) { + return; + } + + /* avoid relocation */ + if ((offOpnd.GetValue() % static_cast(kBitsPerByte)) != 0) { + return; + } + + if (cgFunc.GetMirModule().IsCModule()) { + Insn *prevInsn = insn.GetPrev(); + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_xadrp) { + return; + } else { + auto &prevStImmOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + prevStImmOpnd.SetOffset(offOpnd.GetValue()); + } + } + auto &newBaseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + MemOperand &newMemOpnd = aarch64CGFunc->GetOrCreateMemOpnd( + MemOperand::kAddrModeLo12Li, memOpnd->GetSize(), &newBaseOpnd, nullptr, &offOpnd, stImmOpnd.GetSymbol()); + + nextInsn->SetMemOpnd(&newMemOpnd); + bb.RemoveInsn(insn); + CHECK_FATAL(!CGOptions::IsLazyBinding() || cgFunc.GetCG()->IsLibcore(), + "this pattern can't be found in this phase"); + } +} + +void ComplexMemOperandPreAddAArch64::Run(BB &bb, Insn &insn) +{ + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_xaddrrr && thisMop != MOP_waddrrr) { + return; + } + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop && ((nextMop >= MOP_wldrsb && nextMop <= MOP_dldr) || (nextMop >= MOP_wstrb && nextMop <= MOP_dstr))) { + if (!IsMemOperandOptPattern(insn, *nextInsn)) { + return; + } + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + auto &newBaseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (newBaseOpnd.GetSize() != k64BitSize) { + return; + } + auto &newIndexOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + if (newIndexOpnd.GetSize() <= k32BitSize) { + MemOperand &newMemOpnd = aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), + &newBaseOpnd, &newIndexOpnd, 0, false); + nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); + } else { + auto *newOfstOpnd = &aarch64CGFunc->GetOrCreateOfstOpnd(0, k32BitSize); + MemOperand &newMemOpnd = aarch64CGFunc->GetOrCreateMemOpnd( + MemOperand::kAddrModeBOrX, memOpnd->GetSize(), &newBaseOpnd, &newIndexOpnd, newOfstOpnd, nullptr); + nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); + } + bb.RemoveInsn(insn); + } +} + +bool ComplexMemOperandLSLAArch64::CheckShiftValid(const Insn &insn, const BitShiftOperand &lsl) const +{ + /* check if shift amount is valid */ + uint32 lslAmount = lsl.GetShiftAmount(); + constexpr uint8 twoShiftBits = 2; + constexpr uint8 threeShiftBits = 3; + uint32 memSize = insn.GetMemoryByteSize(); + if ((memSize == k4ByteSize && (lsl.GetShiftAmount() != 0 && lslAmount != twoShiftBits)) || + (memSize == k8ByteSize && (lsl.GetShiftAmount() != 0 && lslAmount != threeShiftBits))) { + return false; + } + if (memSize != (k5BitSize << lslAmount)) { + return false; + } + return true; +} + +void ComplexMemOperandLSLAArch64::Run(BB &bb, Insn &insn) +{ + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_xaddrrrs) { + return; + } + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop && ((nextMop >= MOP_wldrsb && nextMop <= MOP_dldr) || (nextMop >= MOP_wstrb && nextMop <= MOP_dstr))) { + /* Check if base register of nextInsn and the dest operand of insn are identical. */ + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + DEBUG_ASSERT(memOpnd != nullptr, "null ptr check"); + + /* Only for AddrMode_B_OI addressing mode. */ + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { + return; + } + + /* Only for immediate is 0. */ + if (memOpnd->GetOffsetImmediate()->GetOffsetValue() != 0) { + return; + } + + /* Only for intact memory addressing. */ + if (!memOpnd->IsIntactIndexed()) { + return; + } + + auto ®Opnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + + /* Check if dest operand of insn is idential with base register of nextInsn. */ + if (memOpnd->GetBaseRegister() != ®Opnd) { + return; + } + +#ifdef USE_32BIT_REF + if (nextInsn->IsAccessRefField() && nextInsn->GetOperand(kInsnFirstOpnd).GetSize() > k32BitSize) { + return; + } +#endif + + /* Check if x0 is used after ldr insn, and if it is in live-out. */ + if (IfOperandIsLiveAfterInsn(regOpnd, *nextInsn)) { + return; + } + auto &lsl = static_cast(insn.GetOperand(kInsnFourthOpnd)); + if (!CheckShiftValid(*nextInsn, lsl)) { + return; + } + auto &newBaseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &newIndexOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), &newBaseOpnd, + &newIndexOpnd, static_cast(lsl.GetShiftAmount()), false); + nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); + bb.RemoveInsn(insn); + } +} + +void ComplexMemOperandLabelAArch64::Run(BB &bb, Insn &insn) +{ + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_xldli) { + return; + } + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop != MOP_xvmovdr) { + return; + } + auto ®Opnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (regOpnd.GetRegisterNumber() != + static_cast(nextInsn->GetOperand(kInsnSecondOpnd)).GetRegisterNumber()) { + return; + } + + /* Check if x0 is used after ldr insn, and if it is in live-out. */ + if (IfOperandIsLiveAfterInsn(regOpnd, *nextInsn)) { + return; + } + if (CGOptions::DoCGSSA()) { + /* same as CombineFmovLdrPattern in ssa */ + CHECK_FATAL(false, "check this case in ssa"); + } + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_dldli, nextInsn->GetOperand(kInsnFirstOpnd), + insn.GetOperand(kInsnSecondOpnd)); + bb.InsertInsnAfter(*nextInsn, newInsn); + bb.RemoveInsn(insn); + bb.RemoveInsn(*nextInsn); +} + +static bool MayThrowBetweenInsn(const Insn &prevCallInsn, const Insn &currCallInsn) +{ + for (Insn *insn = prevCallInsn.GetNext(); insn != nullptr && insn != &currCallInsn; insn = insn->GetNext()) { + if (insn->MayThrow()) { + return true; + } + } + return false; +} + +/* + * mov R0, vreg1 / R0 -> objDesignateInsn + * add vreg2, vreg1, #imm -> fieldDesignateInsn + * mov R1, vreg2 -> fieldParamDefInsn + * mov R2, vreg3 -> fieldValueDefInsn + */ +bool WriteFieldCallPattern::WriteFieldCallOptPatternMatch(const Insn &writeFieldCallInsn, WriteRefFieldParam ¶m) +{ + Insn *fieldValueDefInsn = writeFieldCallInsn.GetPreviousMachineInsn(); + if (fieldValueDefInsn == nullptr || fieldValueDefInsn->GetMachineOpcode() != MOP_xmovrr) { + return false; + } + Operand &fieldValueDefInsnDestOpnd = fieldValueDefInsn->GetOperand(kInsnFirstOpnd); + auto &fieldValueDefInsnDestReg = static_cast(fieldValueDefInsnDestOpnd); + if (fieldValueDefInsnDestReg.GetRegisterNumber() != R2) { + return false; + } + paramDefInsns.emplace_back(fieldValueDefInsn); + param.fieldValue = &(fieldValueDefInsn->GetOperand(kInsnSecondOpnd)); + Insn *fieldParamDefInsn = fieldValueDefInsn->GetPreviousMachineInsn(); + if (fieldParamDefInsn == nullptr || fieldParamDefInsn->GetMachineOpcode() != MOP_xmovrr) { + return false; + } + Operand &fieldParamDestOpnd = fieldParamDefInsn->GetOperand(kInsnFirstOpnd); + auto &fieldParamDestReg = static_cast(fieldParamDestOpnd); + if (fieldParamDestReg.GetRegisterNumber() != R1) { + return false; + } + paramDefInsns.emplace_back(fieldParamDefInsn); + Insn *fieldDesignateInsn = fieldParamDefInsn->GetPreviousMachineInsn(); + if (fieldDesignateInsn == nullptr || fieldDesignateInsn->GetMachineOpcode() != MOP_xaddrri12) { + return false; + } + Operand &fieldParamDefSrcOpnd = fieldParamDefInsn->GetOperand(kInsnSecondOpnd); + Operand &fieldDesignateDestOpnd = fieldDesignateInsn->GetOperand(kInsnFirstOpnd); + if (!RegOperand::IsSameReg(fieldParamDefSrcOpnd, fieldDesignateDestOpnd)) { + return false; + } + Operand &fieldDesignateBaseOpnd = fieldDesignateInsn->GetOperand(kInsnSecondOpnd); + param.fieldBaseOpnd = &(static_cast(fieldDesignateBaseOpnd)); + auto &immOpnd = static_cast(fieldDesignateInsn->GetOperand(kInsnThirdOpnd)); + param.fieldOffset = immOpnd.GetValue(); + paramDefInsns.emplace_back(fieldDesignateInsn); + Insn *objDesignateInsn = fieldDesignateInsn->GetPreviousMachineInsn(); + if (objDesignateInsn == nullptr || objDesignateInsn->GetMachineOpcode() != MOP_xmovrr) { + return false; + } + Operand &objDesignateDestOpnd = objDesignateInsn->GetOperand(kInsnFirstOpnd); + auto &objDesignateDestReg = static_cast(objDesignateDestOpnd); + if (objDesignateDestReg.GetRegisterNumber() != R0) { + return false; + } + Operand &objDesignateSrcOpnd = objDesignateInsn->GetOperand(kInsnSecondOpnd); + if (RegOperand::IsSameReg(objDesignateDestOpnd, objDesignateSrcOpnd) || + !RegOperand::IsSameReg(objDesignateSrcOpnd, fieldDesignateBaseOpnd)) { + return false; + } + param.objOpnd = &(objDesignateInsn->GetOperand(kInsnSecondOpnd)); + paramDefInsns.emplace_back(objDesignateInsn); + return true; +} + +bool WriteFieldCallPattern::IsWriteRefFieldCallInsn(const Insn &insn) const +{ + if (!insn.IsCall() || insn.GetMachineOpcode() == MOP_xblr) { + return false; + } + Operand *targetOpnd = insn.GetCallTargetOperand(); + DEBUG_ASSERT(targetOpnd != nullptr, "targetOpnd must not be nullptr"); + if (!targetOpnd->IsFuncNameOpnd()) { + return false; + } + auto *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + DEBUG_ASSERT(funcSt->GetSKind() == kStFunc, "the kind of funcSt is unreasonable"); + const std::string &funcName = funcSt->GetName(); + return funcName == "MCC_WriteRefField" || funcName == "MCC_WriteVolatileField"; +} + +bool WriteFieldCallPattern::CheckCondition(Insn &insn) +{ + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return false; + } + if (!IsWriteRefFieldCallInsn(insn)) { + return false; + } + if (!hasWriteFieldCall) { + if (!WriteFieldCallOptPatternMatch(insn, firstCallParam)) { + return false; + } + prevCallInsn = &insn; + hasWriteFieldCall = true; + return false; + } + if (!WriteFieldCallOptPatternMatch(insn, currentCallParam)) { + return false; + } + if (prevCallInsn == nullptr || MayThrowBetweenInsn(*prevCallInsn, insn)) { + return false; + } + if (firstCallParam.objOpnd == nullptr || currentCallParam.objOpnd == nullptr || + currentCallParam.fieldBaseOpnd == nullptr) { + return false; + } + if (!RegOperand::IsSameReg(*firstCallParam.objOpnd, *currentCallParam.objOpnd)) { + return false; + } + return true; +} + +void WriteFieldCallPattern::Run(BB &bb, Insn &insn) +{ + paramDefInsns.clear(); + if (!CheckCondition(insn)) { + return; + } + auto *aarCGFunc = static_cast(cgFunc); + MemOperand &addr = + aarCGFunc->CreateMemOpnd(*currentCallParam.fieldBaseOpnd, currentCallParam.fieldOffset, k64BitSize); + Insn &strInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xstr, *currentCallParam.fieldValue, addr); + strInsn.AppendComment("store reference field"); + strInsn.MarkAsAccessRefField(true); + bb.InsertInsnAfter(insn, strInsn); + for (Insn *paramDefInsn : paramDefInsns) { + bb.RemoveInsn(*paramDefInsn); + } + bb.RemoveInsn(insn); + prevCallInsn = &strInsn; + nextInsn = strInsn.GetNextMachineInsn(); +} + +bool RemoveDecRefPattern::CheckCondition(Insn &insn) +{ + if (insn.GetMachineOpcode() != MOP_xbl) { + return false; + } + auto &target = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (target.GetName() != "MCC_DecRef_NaiveRCFast") { + return false; + } + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + MOperator mopMov = prevInsn->GetMachineOpcode(); + if ((mopMov != MOP_xmovrr && mopMov != MOP_xmovri64) || + static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != R0) { + return false; + } + Operand &srcOpndOfMov = prevInsn->GetOperand(kInsnSecondOpnd); + if (!IsZeroRegister(srcOpndOfMov) && + !(srcOpndOfMov.IsImmediate() && static_cast(srcOpndOfMov).GetValue() == 0)) { + return false; + } + return true; +} + +void RemoveDecRefPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(insn); +} + +/* + * We optimize the following pattern in this function: + * and x1, x1, #imm (is n power of 2) + * cbz/cbnz x1, .label + * => + * and x1, x1, #imm (is n power of 2) + * tbnz/tbz x1, #n, .label + */ +void OneHoleBranchesAArch64::Run(BB &bb, Insn &insn) +{ + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + if (&insn != bb.GetLastInsn()) { + return; + } + /* check cbz/cbnz insn */ + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_wcbz && thisMop != MOP_wcbnz && thisMop != MOP_xcbz && thisMop != MOP_xcbnz) { + return; + } + /* check and insn */ + Insn *prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wandrri12 && prevMop != MOP_xandrri13) { + return; + } + /* check opearnd of two insns */ + if (&(prevInsn->GetOperand(kInsnFirstOpnd)) != &(insn.GetOperand(kInsnFirstOpnd))) { + return; + } + auto &imm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + int n = logValueAtBase2(imm.GetValue()); + if (n < 0) { + return; + } + + /* replace insn */ + auto &label = static_cast(insn.GetOperand(kInsnSecondOpnd)); + MOperator newOp = MOP_undef; + switch (thisMop) { + case MOP_wcbz: + newOp = MOP_wtbz; + break; + case MOP_wcbnz: + newOp = MOP_wtbnz; + break; + case MOP_xcbz: + newOp = MOP_xtbz; + break; + case MOP_xcbnz: + newOp = MOP_xtbnz; + break; + default: + CHECK_FATAL(false, "can not touch here"); + break; + } + ImmOperand &oneHoleOpnd = aarch64CGFunc->CreateImmOperand(n, k8BitSize, false); + (void)bb.InsertInsnAfter( + insn, cgFunc.GetInsnBuilder()->BuildInsn(newOp, prevInsn->GetOperand(kInsnSecondOpnd), oneHoleOpnd, label)); + bb.RemoveInsn(insn); +} + +bool ReplaceIncDecWithIncPattern::CheckCondition(Insn &insn) +{ + if (insn.GetMachineOpcode() != MOP_xbl) { + return false; + } + target = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (target->GetName() != "MCC_IncDecRef_NaiveRCFast") { + return false; + } + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + MOperator mopMov = prevInsn->GetMachineOpcode(); + if (mopMov != MOP_xmovrr) { + return false; + } + if (static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != R1 || + !IsZeroRegister(prevInsn->GetOperand(kInsnSecondOpnd))) { + return false; + } + return true; +} + +void ReplaceIncDecWithIncPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + std::string funcName = "MCC_IncRef_NaiveRCFast"; + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(funcName); + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx, true); + if (st == nullptr) { + LogInfo::MapleLogger() << "WARNING: Replace IncDec With Inc fail due to no MCC_IncRef_NaiveRCFast func\n"; + return; + } + bb.RemoveInsn(*prevInsn); + target->SetFunctionSymbol(*st); +} + +void AndCmpBranchesToTbzAArch64::Run(BB &bb, Insn &insn) +{ + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + if (&insn != bb.GetLastInsn()) { + return; + } + MOperator mopB = insn.GetMachineOpcode(); + if (mopB != MOP_beq && mopB != MOP_bne) { + return; + } + auto &label = static_cast(insn.GetOperand(kInsnSecondOpnd)); + /* get the instruction before bne/beq, expects its type is cmp. */ + Insn *prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wcmpri && prevMop != MOP_xcmpri) { + return; + } + + /* get the instruction before "cmp", expect its type is "and". */ + Insn *prevPrevInsn = prevInsn->GetPreviousMachineInsn(); + if (prevPrevInsn == nullptr) { + return; + } + MOperator mopAnd = prevPrevInsn->GetMachineOpcode(); + if (mopAnd != MOP_wandrri12 && mopAnd != MOP_xandrri13) { + return; + } + + /* + * check operand + * + * the real register of "cmp" and "and" must be the same. + */ + if (&(prevInsn->GetOperand(kInsnSecondOpnd)) != &(prevPrevInsn->GetOperand(kInsnFirstOpnd))) { + return; + } + + uint32 opndIdx = 2; + if (!prevPrevInsn->GetOperand(opndIdx).IsIntImmediate() || !prevInsn->GetOperand(opndIdx).IsIntImmediate()) { + return; + } + auto &immAnd = static_cast(prevPrevInsn->GetOperand(opndIdx)); + auto &immCmp = static_cast(prevInsn->GetOperand(opndIdx)); + if (immCmp.GetValue() == 0) { + int n = logValueAtBase2(immAnd.GetValue()); + if (n < 0) { + return; + } + /* judge whether the flag_reg and "w0" is live later. */ + auto &flagReg = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &cmpReg = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + if (FindRegLiveOut(flagReg, *prevInsn->GetBB()) || FindRegLiveOut(cmpReg, *prevInsn->GetBB())) { + return; + } + MOperator mopNew = MOP_undef; + switch (mopB) { + case MOP_beq: + if (mopAnd == MOP_wandrri12) { + mopNew = MOP_wtbz; + } else if (mopAnd == MOP_xandrri13) { + mopNew = MOP_xtbz; + } + break; + case MOP_bne: + if (mopAnd == MOP_wandrri12) { + mopNew = MOP_wtbnz; + } else if (mopAnd == MOP_xandrri13) { + mopNew = MOP_xtbnz; + } + break; + default: + CHECK_FATAL(false, "expects beq or bne insn"); + break; + } + ImmOperand &newImm = aarch64CGFunc->CreateImmOperand(n, k8BitSize, false); + (void)bb.InsertInsnAfter( + insn, cgFunc.GetInsnBuilder()->BuildInsn(mopNew, prevPrevInsn->GetOperand(kInsnSecondOpnd), newImm, label)); + bb.RemoveInsn(insn); + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(*prevPrevInsn); + } else { + int n = logValueAtBase2(immAnd.GetValue()); + int m = logValueAtBase2(immCmp.GetValue()); + if (n < 0 || m < 0 || n != m) { + return; + } + /* judge whether the flag_reg and "w0" is live later. */ + auto &flagReg = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &cmpReg = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + if (FindRegLiveOut(flagReg, *prevInsn->GetBB()) || FindRegLiveOut(cmpReg, *prevInsn->GetBB())) { + return; + } + MOperator mopNew = MOP_undef; + switch (mopB) { + case MOP_beq: + if (mopAnd == MOP_wandrri12) { + mopNew = MOP_wtbnz; + } else if (mopAnd == MOP_xandrri13) { + mopNew = MOP_xtbnz; + } + break; + case MOP_bne: + if (mopAnd == MOP_wandrri12) { + mopNew = MOP_wtbz; + } else if (mopAnd == MOP_xandrri13) { + mopNew = MOP_xtbz; + } + break; + default: + CHECK_FATAL(false, "expects beq or bne insn"); + break; + } + ImmOperand &newImm = aarch64CGFunc->CreateImmOperand(n, k8BitSize, false); + (void)bb.InsertInsnAfter( + insn, cgFunc.GetInsnBuilder()->BuildInsn(mopNew, prevPrevInsn->GetOperand(kInsnSecondOpnd), newImm, label)); + bb.RemoveInsn(insn); + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(*prevPrevInsn); + } +} + +void RemoveSxtBeforeStrAArch64::Run(BB &bb, Insn &insn) +{ + MOperator mop = insn.GetMachineOpcode(); + Insn *prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (!(mop == MOP_wstrh && prevMop == MOP_xsxth32) && !(mop == MOP_wstrb && prevMop == MOP_xsxtb32)) { + return; + } + auto &prevOpnd0 = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (IfOperandIsLiveAfterInsn(prevOpnd0, insn)) { + return; + } + auto &prevOpnd1 = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + regno_t prevRegNO0 = prevOpnd0.GetRegisterNumber(); + regno_t prevRegNO1 = prevOpnd1.GetRegisterNumber(); + regno_t regNO0 = static_cast(insn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + if (prevRegNO0 != prevRegNO1) { + return; + } + if (prevRegNO0 == regNO0) { + bb.RemoveInsn(*prevInsn); + return; + } + insn.SetOperand(0, prevOpnd1); + bb.RemoveInsn(*prevInsn); +} + +void UbfxToUxtwPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + Insn *newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(MOP_xuxtw64, insn.GetOperand(kInsnFirstOpnd), + insn.GetOperand(kInsnSecondOpnd)); + bb.ReplaceInsn(insn, *newInsn); + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, newInsn, nullptr); + } +} + +bool UbfxToUxtwPattern::CheckCondition(Insn &insn) +{ + ImmOperand &imm0 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + ImmOperand &imm1 = static_cast(insn.GetOperand(kInsnFourthOpnd)); + if ((imm0.GetValue() != 0) || (imm1.GetValue() != k32BitSize)) { + return false; + } + return true; +} + +void UbfxAndCbzToTbzPattern::Run(BB &bb, Insn &insn) +{ + Operand &opnd2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &imm3 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + if (!CheckCondition(insn)) { + return; + } + auto &label = static_cast(useInsn->GetOperand(kInsnSecondOpnd)); + MOperator nextMop = useInsn->GetMachineOpcode(); + switch (nextMop) { + case MOP_wcbz: + newMop = MOP_wtbz; + break; + case MOP_xcbz: + newMop = MOP_xtbz; + break; + case MOP_wcbnz: + newMop = MOP_wtbnz; + break; + case MOP_xcbnz: + newMop = MOP_xtbnz; + break; + default: + return; + } + if (newMop == MOP_undef) { + return; + } + Insn *newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, opnd2, imm3, label); + BB *useInsnBB = useInsn->GetBB(); + useInsnBB->ReplaceInsn(*useInsn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(*useInsn, *newInsn); + optSuccess = true; + if (CG_PEEP_DUMP) { + std::vector prevs; + (void)prevs.emplace_back(useInsn); + DumpAfterPattern(prevs, newInsn, nullptr); + } +} + +bool UbfxAndCbzToTbzPattern::CheckCondition(Insn &insn) +{ + ImmOperand &imm4 = static_cast(insn.GetOperand(kInsnFourthOpnd)); + RegOperand &opnd1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + InsnSet useInsns = GetAllUseInsn(opnd1); + if (useInsns.size() != 1) { + return false; + } + useInsn = *useInsns.begin(); + if (useInsn == nullptr) { + return false; + } + if (imm4.GetValue() == 1) { + switch (useInsn->GetMachineOpcode()) { + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: + return true; + default: + break; + } + } + return false; +} + +bool ComplexExtendWordLslAArch64::IsExtendWordLslPattern(const Insn &insn) const +{ + Insn *nextInsn = insn.GetNext(); + if (nextInsn == nullptr) { + return false; + } + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop != MOP_xlslrri6) { + return false; + } + return true; +} + +void ComplexExtendWordLslAArch64::Run(BB &bb, Insn &insn) +{ + if (!IsExtendWordLslPattern(insn)) { + return; + } + MOperator mop = insn.GetMachineOpcode(); + Insn *nextInsn = insn.GetNext(); + auto &nextOpnd2 = static_cast(nextInsn->GetOperand(kInsnThirdOpnd)); + if (nextOpnd2.GetValue() > k32BitSize) { + return; + } + auto &opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &nextOpnd1 = static_cast(nextInsn->GetOperand(kInsnSecondOpnd)); + regno_t regNO0 = opnd0.GetRegisterNumber(); + regno_t nextRegNO1 = nextOpnd1.GetRegisterNumber(); + if (regNO0 != nextRegNO1 || IfOperandIsLiveAfterInsn(opnd0, *nextInsn)) { + return; + } + auto &opnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &nextOpnd0 = static_cast(nextInsn->GetOperand(kInsnFirstOpnd)); + regno_t regNO1 = opnd1.GetRegisterNumber(); + cgFunc.InsertExtendSet(regNO1); + MOperator mopNew = mop == MOP_xsxtw64 ? MOP_xsbfizrri6i6 : MOP_xubfizrri6i6; + auto *aarch64CGFunc = static_cast(&cgFunc); + RegOperand ®1 = aarch64CGFunc->GetOrCreateVirtualRegisterOperand(regNO1); + ImmOperand &newImm = aarch64CGFunc->CreateImmOperand(k32BitSize, k6BitSize, false); + Insn &newInsnSbfiz = cgFunc.GetInsnBuilder()->BuildInsn(mopNew, nextOpnd0, reg1, nextOpnd2, newImm); + bb.RemoveInsn(*nextInsn); + bb.ReplaceInsn(insn, newInsnSbfiz); +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_phi_elimination.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_phi_elimination.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1e9c5e264e9b0433ddb8f274fc88fbdb956be8ee --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_phi_elimination.cpp @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_phi_elimination.h" +#include "aarch64_cg.h" + +namespace maplebe { +RegOperand &AArch64PhiEliminate::CreateTempRegForCSSA(RegOperand &oriOpnd) +{ + return *phiEliAlloc.New(GetAndIncreaseTempRegNO(), oriOpnd.GetSize(), oriOpnd.GetRegisterType()); +} + +Insn &AArch64PhiEliminate::CreateMov(RegOperand &destOpnd, RegOperand &fromOpnd) +{ + DEBUG_ASSERT(destOpnd.GetRegisterType() == fromOpnd.GetRegisterType(), "do not support this move in aarch64"); + bool is64bit = destOpnd.GetSize() == k64BitSize; + bool isFloat = destOpnd.IsOfFloatOrSIMDClass(); + Insn *insn = nullptr; + if (destOpnd.GetSize() == k128BitSize) { + DEBUG_ASSERT(isFloat, "unexpect 128bit int operand in aarch64"); + insn = &cgFunc->GetInsnBuilder()->BuildVectorInsn(MOP_vmovvv, AArch64CG::kMd[MOP_vmovvv]); + insn->AddOpndChain(destOpnd).AddOpndChain(fromOpnd); + auto *vecSpecSrc = cgFunc->GetMemoryPool()->New(k128BitSize >> k3ByteSize, k8BitSize); + auto *vecSpecDest = cgFunc->GetMemoryPool()->New(k128BitSize >> k3ByteSize, k8BitSize); + static_cast(insn)->PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + } else { + insn = &cgFunc->GetInsnBuilder()->BuildInsn( + is64bit ? isFloat ? MOP_xvmovd : MOP_xmovrr : isFloat ? MOP_xvmovs : MOP_wmovrr, destOpnd, fromOpnd); + } + /* restore validBitNum */ + if (destOpnd.GetValidBitsNum() != k64BitSize && destOpnd.GetValidBitsNum() != k32BitSize) { + destOpnd.SetValidBitsNum(destOpnd.GetSize()); + } + if (fromOpnd.GetValidBitsNum() != k64BitSize && fromOpnd.GetValidBitsNum() != k32BitSize) { + fromOpnd.SetValidBitsNum(fromOpnd.GetSize()); + } + /* copy remat info */ + MaintainRematInfo(destOpnd, fromOpnd, true); + DEBUG_ASSERT(insn != nullptr, "create move insn failed"); + insn->SetIsPhiMovInsn(true); + return *insn; +} + +RegOperand &AArch64PhiEliminate::GetCGVirtualOpearnd(RegOperand &ssaOpnd, const Insn &curInsn) +{ + VRegVersion *ssaVersion = GetSSAInfo()->FindSSAVersion(ssaOpnd.GetRegisterNumber()); + DEBUG_ASSERT(ssaVersion != nullptr, "find ssaVersion failed"); + DEBUG_ASSERT(!ssaVersion->IsDeleted(), "ssaVersion has been deleted"); + RegOperand *regForRecreate = &ssaOpnd; + if (curInsn.GetMachineOpcode() != MOP_asm && !curInsn.IsVectorOp() && !curInsn.IsSpecialIntrinsic() && + ssaVersion->GetAllUseInsns().empty() && !curInsn.IsAtomic()) { + CHECK_FATAL(false, "plz delete dead version"); + } + if (GetSSAInfo()->IsNoDefVReg(ssaOpnd.GetRegisterNumber())) { + regForRecreate = MakeRoomForNoDefVreg(ssaOpnd); + } else { + DEBUG_ASSERT(regForRecreate->IsSSAForm(), "Opnd is not in ssa form"); + } + RegOperand &newReg = cgFunc->GetOrCreateVirtualRegisterOperand(*regForRecreate); + + DUInsnInfo *defInfo = ssaVersion->GetDefInsnInfo(); + Insn *defInsn = defInfo != nullptr ? defInfo->GetInsn() : nullptr; + /* + * case1 : both def/use + * case2 : inline-asm (do not do aggressive optimization) "0" + * case3 : cc flag operand + */ + if (defInsn != nullptr) { + /* case 1 */ + uint32 defUseIdx = defInsn->GetBothDefUseOpnd(); + if (defUseIdx != kInsnMaxOpnd) { + if (defInfo->GetOperands().count(defUseIdx)) { + CHECK_FATAL(defInfo->GetOperands()[defUseIdx] == 1, "multiple definiation"); + Operand &preRegOpnd = defInsn->GetOperand(defUseIdx); + DEBUG_ASSERT(preRegOpnd.IsRegister(), "unexpect operand type"); + newReg.SetRegisterNumber(static_cast(preRegOpnd).GetRegisterNumber()); + } + } + /* case 2 */ + if (defInsn->GetMachineOpcode() == MOP_asm) { + auto &inputList = static_cast(defInsn->GetOperand(kAsmInputListOpnd)); + VRegVersion *LastVersion = nullptr; + for (auto inputReg : inputList.GetOperands()) { + LastVersion = GetSSAInfo()->FindSSAVersion(inputReg->GetRegisterNumber()); + if (LastVersion != nullptr && LastVersion->GetOriginalRegNO() == ssaVersion->GetOriginalRegNO()) { + break; + } + LastVersion = nullptr; + } + if (LastVersion != nullptr) { + newReg.SetRegisterNumber(LastVersion->GetSSAvRegOpnd()->GetRegisterNumber()); + } else { + const MapleMap &bindingMap = defInsn->GetRegBinding(); + auto pairIt = bindingMap.find(ssaVersion->GetOriginalRegNO()); + if (pairIt != bindingMap.end()) { + newReg.SetRegisterNumber(pairIt->second); + } + } + } + /* case 3 */ + if (ssaVersion->GetOriginalRegNO() == kRFLAG) { + newReg.SetRegisterNumber(kRFLAG); + } + } else { + newReg.SetRegisterNumber(ssaVersion->GetOriginalRegNO()); + } + MaintainRematInfo(newReg, ssaOpnd, true); + newReg.SetOpndOutOfSSAForm(); + return newReg; +} + +void AArch64PhiEliminate::AppendMovAfterLastVregDef(BB &bb, Insn &movInsn) const +{ + Insn *posInsn = nullptr; + bool isPosPhi = false; + FOR_BB_INSNS_REV(insn, &bb) { + if (insn->IsPhi()) { + posInsn = insn; + isPosPhi = true; + break; + } + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsBranch()) { + posInsn = insn; + continue; + } + break; + } + CHECK_FATAL(posInsn != nullptr, "insert mov for phi failed"); + if (isPosPhi) { + bb.InsertInsnAfter(*posInsn, movInsn); + } else { + bb.InsertInsnBefore(*posInsn, movInsn); + } +} + +/* copy remat info */ +void AArch64PhiEliminate::MaintainRematInfo(RegOperand &destOpnd, RegOperand &fromOpnd, bool isCopy) +{ + if (CGOptions::GetRematLevel() > 0 && isCopy) { + if (fromOpnd.IsSSAForm()) { + VRegVersion *fromSSAVersion = GetSSAInfo()->FindSSAVersion(fromOpnd.GetRegisterNumber()); + regno_t rematRegNO = fromSSAVersion->GetOriginalRegNO(); + MIRPreg *fPreg = static_cast(cgFunc)->GetPseudoRegFromVirtualRegNO(rematRegNO); + if (fPreg != nullptr) { + PregIdx fPregIdx = + cgFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno(static_cast(fPreg->GetPregNo())); + RecordRematInfo(destOpnd.GetRegisterNumber(), fPregIdx); + } + } else { + regno_t rematRegNO = fromOpnd.GetRegisterNumber(); + PregIdx fPreg = FindRematInfo(rematRegNO); + if (fPreg > 0) { + RecordRematInfo(destOpnd.GetRegisterNumber(), fPreg); + } + } + } +} + +void AArch64PhiEliminate::ReCreateRegOperand(Insn &insn) +{ + auto opndNum = static_cast(insn.GetOperandSize()); + for (int i = opndNum - 1; i >= 0; --i) { + Operand &opnd = insn.GetOperand(static_cast(i)); + A64OperandPhiElmVisitor a64OpndPhiElmVisitor(this, insn, i); + opnd.Accept(a64OpndPhiElmVisitor); + } +} + +void A64OperandPhiElmVisitor::Visit(RegOperand *v) +{ + if (v->IsSSAForm()) { + DEBUG_ASSERT(v->GetRegisterNumber() != kRFLAG, "both condi and reg"); + insn->SetOperand(idx, a64PhiEliminator->GetCGVirtualOpearnd(*v, *insn)); + } +} + +void A64OperandPhiElmVisitor::Visit(ListOperand *v) +{ + std::list tempRegStore; + auto &opndList = v->GetOperands(); + + while (!opndList.empty()) { + auto *regOpnd = opndList.front(); + opndList.pop_front(); + + if (regOpnd->IsSSAForm()) { + tempRegStore.push_back(&a64PhiEliminator->GetCGVirtualOpearnd(*regOpnd, *insn)); + } else { + tempRegStore.push_back(regOpnd); + } + } + + DEBUG_ASSERT(v->GetOperands().empty(), "need to clean list"); + v->GetOperands().assign(tempRegStore.begin(), tempRegStore.end()); +} + +void A64OperandPhiElmVisitor::Visit(MemOperand *a64MemOpnd) +{ + RegOperand *baseRegOpnd = a64MemOpnd->GetBaseRegister(); + RegOperand *indexRegOpnd = a64MemOpnd->GetIndexRegister(); + if ((baseRegOpnd != nullptr && baseRegOpnd->IsSSAForm()) || + (indexRegOpnd != nullptr && indexRegOpnd->IsSSAForm())) { + if (baseRegOpnd != nullptr && baseRegOpnd->IsSSAForm()) { + a64MemOpnd->SetBaseRegister(a64PhiEliminator->GetCGVirtualOpearnd(*baseRegOpnd, *insn)); + } + if (indexRegOpnd != nullptr && indexRegOpnd->IsSSAForm()) { + a64MemOpnd->SetIndexRegister(a64PhiEliminator->GetCGVirtualOpearnd(*indexRegOpnd, *insn)); + } + } +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_proepilog.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_proepilog.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3bcce958f87dc92c1cb2ba7b5fc239c94e4e378f --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_proepilog.cpp @@ -0,0 +1,2134 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_proepilog.h" +#include "aarch64_cg.h" +#include "cg_option.h" +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; + +namespace { +const std::set kFrameWhiteListFunc { +#include "framewhitelist.def" +}; + +bool IsFuncNeedFrame(const std::string &funcName) +{ + return kFrameWhiteListFunc.find(funcName) != kFrameWhiteListFunc.end(); +} +constexpr int32 kSoeChckOffset = 8192; + +enum RegsPushPop : uint8 { kRegsPushOp, kRegsPopOp }; + +enum PushPopType : uint8 { kPushPopSingle = 0, kPushPopPair = 1 }; + +MOperator pushPopOps[kRegsPopOp + 1][kRegTyFloat + 1][kPushPopPair + 1] = {{ + /* push */ + {0}, /* undef */ + { + /* kRegTyInt */ + MOP_xstr, /* single */ + MOP_xstp, /* pair */ + }, + { + /* kRegTyFloat */ + MOP_dstr, /* single */ + MOP_dstp, /* pair */ + }, + }, + { + /* pop */ + {0}, /* undef */ + { + /* kRegTyInt */ + MOP_xldr, /* single */ + MOP_xldp, /* pair */ + }, + { + /* kRegTyFloat */ + MOP_dldr, /* single */ + MOP_dldp, /* pair */ + }, + }}; + +inline void AppendInstructionTo(Insn &insn, CGFunc &func) +{ + func.GetCurBB()->AppendInsn(insn); +} +} // namespace + +bool AArch64GenProEpilog::HasLoop() +{ + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsBackEdgeDest()) { + return true; + } + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->HasLoop()) { + return true; + } + } + } + return false; +} + +/* + * Remove redundant mov and mark optimizable bl/blr insn in the BB. + * Return value: true to call this modified block again. + */ +bool AArch64GenProEpilog::OptimizeTailBB(BB &bb, MapleSet &callInsns, const BB &exitBB) const +{ + if (bb.NumInsn() == 1 && + (bb.GetLastInsn()->GetMachineOpcode() != MOP_xbr && bb.GetLastInsn()->GetMachineOpcode() != MOP_xblr && + bb.GetLastInsn()->GetMachineOpcode() != MOP_xbl && bb.GetLastInsn()->GetMachineOpcode() != MOP_xuncond)) { + return false; + } + FOR_BB_INSNS_REV_SAFE(insn, &bb, prev_insn) { + if (!insn->IsMachineInstruction() || AArch64isa::IsPseudoInstruction(insn->GetMachineOpcode())) { + continue; + } + MOperator insnMop = insn->GetMachineOpcode(); + switch (insnMop) { + case MOP_xldr: + case MOP_xldp: + case MOP_dldr: + case MOP_dldp: { + if (bb.GetKind() == BB::kBBReturn) { + RegOperand ® = static_cast(insn->GetOperand(0)); + if (AArch64Abi::IsCalleeSavedReg(static_cast(reg.GetRegisterNumber()))) { + break; /* inserted restore from calleeregs-placement, ignore */ + } + } + return false; + } + case MOP_wmovrr: + case MOP_xmovrr: { + CHECK_FATAL(insn->GetOperand(0).IsRegister(), "operand0 is not register"); + CHECK_FATAL(insn->GetOperand(1).IsRegister(), "operand1 is not register"); + auto ®1 = static_cast(insn->GetOperand(0)); + auto ®2 = static_cast(insn->GetOperand(1)); + + if (reg1.GetRegisterNumber() != R0 || reg2.GetRegisterNumber() != R0) { + return false; + } + + bb.RemoveInsn(*insn); + break; + } + case MOP_xblr: { + if (insn->GetOperand(0).IsRegister()) { + RegOperand ® = static_cast(insn->GetOperand(0)); + if (AArch64Abi::IsCalleeSavedReg(static_cast(reg.GetRegisterNumber()))) { + return false; /* can't tailcall, register will be overwritten by restore */ + } + } + /* flow through */ + } + [[clang::fallthrough]]; + case MOP_xbl: { + callInsns.insert(insn); + return false; + } + case MOP_xuncond: { + LabelOperand &bLab = static_cast(insn->GetOperand(0)); + if (exitBB.GetLabIdx() == bLab.GetLabelIndex()) { + break; + } + return false; + } + default: + return false; + } + } + + return true; +} + +/* Recursively invoke this function for all predecessors of exitBB */ +void AArch64GenProEpilog::TailCallBBOpt(BB &bb, MapleSet &callInsns, BB &exitBB) +{ + /* callsite also in the return block as in "if () return; else foo();" + call in the exit block */ + if (!bb.IsEmpty() && !OptimizeTailBB(bb, callInsns, exitBB)) { + return; + } + + for (auto tmpBB : bb.GetPreds()) { + if (tmpBB->GetSuccs().size() != 1 || !tmpBB->GetEhSuccs().empty() || + (tmpBB->GetKind() != BB::kBBFallthru && tmpBB->GetKind() != BB::kBBGoto)) { + continue; + } + + if (OptimizeTailBB(*tmpBB, callInsns, exitBB)) { + TailCallBBOpt(*tmpBB, callInsns, exitBB); + } + } +} + +/* + * If a function without callee-saved register, and end with a function call, + * then transfer bl/blr to b/br. + * Return value: true if function do not need Prologue/Epilogue. false otherwise. + */ +bool AArch64GenProEpilog::TailCallOpt() +{ + /* Count how many call insns in the whole function. */ + uint32 nCount = 0; + bool hasGetStackClass = false; + + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsMachineInstruction() && insn->IsCall()) { + if (insn->GetMachineOpcode() == MOP_xbl) { + auto &target = static_cast(insn->GetOperand(0)); + if (IsFuncNeedFrame(target.GetName())) { + hasGetStackClass = true; + } + } + ++nCount; + } + } + } + if ((nCount > 0 && cgFunc.GetFunction().GetAttr(FUNCATTR_interface)) || hasGetStackClass) { + return false; + } + + if (nCount == 0) { + // no bl instr in any bb + return true; + } + + size_t exitBBSize = cgFunc.GetExitBBsVec().size(); + /* For now to reduce complexity */ + + BB *exitBB = nullptr; + if (exitBBSize == 0) { + if (cgFunc.GetLastBB()->GetPrev()->GetFirstStmt() == cgFunc.GetCleanupLabel() && + cgFunc.GetLastBB()->GetPrev()->GetPrev() != nullptr) { + exitBB = cgFunc.GetLastBB()->GetPrev()->GetPrev(); + } else { + exitBB = cgFunc.GetLastBB()->GetPrev(); + } + } else { + exitBB = cgFunc.GetExitBBsVec().front(); + } + uint32 i = 1; + size_t optCount = 0; + do { + MapleSet callInsns(tmpAlloc.Adapter()); + TailCallBBOpt(*exitBB, callInsns, *exitBB); + if (callInsns.size() != 0) { + optCount += callInsns.size(); + (void)exitBB2CallSitesMap.emplace(exitBB, callInsns); + } + if (i < exitBBSize) { + exitBB = cgFunc.GetExitBBsVec()[i]; + ++i; + } else { + break; + } + } while (1); + + /* regular calls exist in function */ + if (nCount != optCount) { + return false; + } + return true; +} + +static bool IsAddOrSubOp(MOperator mOp) +{ + switch (mOp) { + case MOP_xaddrrr: + case MOP_xaddrrrs: + case MOP_xxwaddrrre: + case MOP_xaddrri24: + case MOP_xaddrri12: + case MOP_xsubrrr: + case MOP_xsubrrrs: + case MOP_xxwsubrrre: + case MOP_xsubrri12: + return true; + default: + return false; + } +} + +/* tailcallopt cannot be used if stack address of this function is taken and passed, + not checking the passing for now, just taken */ +static bool IsStackAddrTaken(CGFunc &cgFunc) +{ + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (IsAddOrSubOp(insn->GetMachineOpcode())) { + for (uint32 i = 0; i < insn->GetOperandSize(); i++) { + if (insn->GetOperand(i).IsRegister()) { + RegOperand ® = static_cast(insn->GetOperand(i)); + if (reg.GetRegisterNumber() == R29 || reg.GetRegisterNumber() == R31 || + reg.GetRegisterNumber() == RSP) { + return true; + } + } + } + } + } + } + return false; +} + +bool AArch64GenProEpilog::NeedProEpilog() +{ + if (cgFunc.GetMirModule().GetSrcLang() != kSrcLangC) { + return true; + } else if (cgFunc.GetFunction().GetAttr(FUNCATTR_varargs) || cgFunc.HasVLAOrAlloca()) { + return true; + } + bool funcHasCalls = false; + if (cgFunc.GetCG()->DoTailCall() && !IsStackAddrTaken(cgFunc) && !stackProtect) { + funcHasCalls = !TailCallOpt(); // return value == "no call instr/only or 1 tailcall" + } else { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (insn->IsMachineInstruction() && insn->IsCall()) { + funcHasCalls = true; + } + } + } + } + auto &aarchCGFunc = static_cast(cgFunc); + const MapleVector ®sToRestore = + (!CGOptions::DoRegSavesOpt()) ? aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + size_t calleeSavedRegSize = kTwoRegister; + CHECK_FATAL(regsToRestore.size() >= calleeSavedRegSize, "Forgot FP and LR ?"); + if (funcHasCalls || regsToRestore.size() > calleeSavedRegSize || aarchCGFunc.HasStackLoadStore() || + static_cast(cgFunc.GetMemlayout())->GetSizeOfLocals() > 0 || + cgFunc.GetFunction().GetAttr(FUNCATTR_callersensitive)) { + return true; + } + return false; +} + +void AArch64GenProEpilog::GenStackGuard(BB &bb) +{ + if (!stackProtect) { + return; + } + auto &aarchCGFunc = static_cast(cgFunc); + BB *formerCurBB = cgFunc.GetCurBB(); + aarchCGFunc.GetDummyBB()->ClearInsns(); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(true); + cgFunc.SetCurBB(*aarchCGFunc.GetDummyBB()); + + MIRSymbol *stkGuardSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(std::string("__stack_chk_guard"))); + StImmOperand &stOpnd = aarchCGFunc.CreateStImmOperand(*stkGuardSym, 0, 0); + RegOperand &stAddrOpnd = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, kRegTyInt); + aarchCGFunc.SelectAddrof(stAddrOpnd, stOpnd); + + MemOperand *guardMemOp = + aarchCGFunc.CreateMemOperand(MemOperand::kAddrModeBOi, GetPointerSize() * kBitsPerByte, stAddrOpnd, nullptr, + &aarchCGFunc.GetOrCreateOfstOpnd(0, k32BitSize), stkGuardSym); + MOperator mOp = aarchCGFunc.PickLdInsn(k64BitSize, PTY_u64); + Insn &insn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, stAddrOpnd, *guardMemOp); + insn.SetDoNotRemove(true); + cgFunc.GetCurBB()->AppendInsn(insn); + + uint64 vArea = 0; + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + AArch64MemLayout *ml = static_cast(cgFunc.GetMemlayout()); + if (ml->GetSizeOfGRSaveArea() > 0) { + vArea += RoundUp(ml->GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment); + } + if (ml->GetSizeOfVRSaveArea() > 0) { + vArea += RoundUp(ml->GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment); + } + } + + int32 stkSize = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + if (useFP) { + stkSize -= + (static_cast(static_cast(cgFunc.GetMemlayout())->SizeOfArgsToStackPass()) + + cgFunc.GetFunction().GetFrameReseverdSlot()); + } + int32 memSize = (stkSize - kOffset8MemPos) - static_cast(vArea); + MemOperand *downStk = aarchCGFunc.CreateStackMemOpnd(stackBaseReg, memSize, GetPointerSize() * kBitsPerByte); + if (downStk->GetMemVaryType() == kNotVary && aarchCGFunc.IsImmediateOffsetOutOfRange(*downStk, k64BitSize)) { + downStk = &aarchCGFunc.SplitOffsetWithAddInstruction(*downStk, k64BitSize, R10); + } + mOp = aarchCGFunc.PickStInsn(GetPointerSize() * kBitsPerByte, PTY_u64); + Insn &tmpInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, stAddrOpnd, *downStk); + tmpInsn.SetDoNotRemove(true); + cgFunc.GetCurBB()->AppendInsn(tmpInsn); + + bb.InsertAtBeginning(*aarchCGFunc.GetDummyBB()); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(false); + cgFunc.SetCurBB(*formerCurBB); +} + +BB &AArch64GenProEpilog::GenStackGuardCheckInsn(BB &bb) +{ + if (!stackProtect) { + return bb; + } + + BB *formerCurBB = cgFunc.GetCurBB(); + cgFunc.GetDummyBB()->ClearInsns(); + cgFunc.SetCurBB(*(cgFunc.GetDummyBB())); + auto &aarchCGFunc = static_cast(cgFunc); + + const MIRSymbol *stkGuardSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(std::string("__stack_chk_guard"))); + StImmOperand &stOpnd = aarchCGFunc.CreateStImmOperand(*stkGuardSym, 0, 0); + RegOperand &stAddrOpnd = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, kRegTyInt); + aarchCGFunc.SelectAddrof(stAddrOpnd, stOpnd); + + MemOperand *guardMemOp = + aarchCGFunc.CreateMemOperand(MemOperand::kAddrModeBOi, GetPointerSize() * kBitsPerByte, stAddrOpnd, nullptr, + &aarchCGFunc.GetOrCreateOfstOpnd(0, k32BitSize), stkGuardSym); + MOperator mOp = aarchCGFunc.PickLdInsn(k64BitSize, PTY_u64); + Insn &insn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, stAddrOpnd, *guardMemOp); + insn.SetDoNotRemove(true); + cgFunc.GetCurBB()->AppendInsn(insn); + + uint64 vArea = 0; + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + AArch64MemLayout *ml = static_cast(cgFunc.GetMemlayout()); + if (ml->GetSizeOfGRSaveArea() > 0) { + vArea += RoundUp(ml->GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment); + } + if (ml->GetSizeOfVRSaveArea() > 0) { + vArea += RoundUp(ml->GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment); + } + } + + RegOperand &checkOp = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R10, GetPointerSize() * kBitsPerByte, kRegTyInt); + int32 stkSize = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + if (useFP) { + stkSize -= + (static_cast(static_cast(cgFunc.GetMemlayout())->SizeOfArgsToStackPass()) + + cgFunc.GetFunction().GetFrameReseverdSlot()); + } + int32 memSize = (stkSize - kOffset8MemPos) - static_cast(vArea); + MemOperand *downStk = aarchCGFunc.CreateStackMemOpnd(stackBaseReg, memSize, GetPointerSize() * kBitsPerByte); + if (downStk->GetMemVaryType() == kNotVary && aarchCGFunc.IsImmediateOffsetOutOfRange(*downStk, k64BitSize)) { + downStk = &aarchCGFunc.SplitOffsetWithAddInstruction(*downStk, k64BitSize, R10); + } + mOp = aarchCGFunc.PickLdInsn(GetPointerSize() * kBitsPerByte, PTY_u64); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, checkOp, *downStk); + newInsn.SetDoNotRemove(true); + cgFunc.GetCurBB()->AppendInsn(newInsn); + + cgFunc.SelectBxor(stAddrOpnd, stAddrOpnd, checkOp, PTY_u64); + LabelIdx failLable = aarchCGFunc.CreateLabel(); + aarchCGFunc.SelectCondGoto(aarchCGFunc.GetOrCreateLabelOperand(failLable), OP_brtrue, OP_eq, stAddrOpnd, + aarchCGFunc.CreateImmOperand(0, k64BitSize, false), PTY_u64, false); + + bb.AppendBBInsns(*(cgFunc.GetCurBB())); + + LabelIdx nextBBLableIdx = aarchCGFunc.CreateLabel(); + BB *nextBB = aarchCGFunc.CreateNewBB(nextBBLableIdx, bb.IsUnreachable(), BB::kBBFallthru, bb.GetFrequency()); + bb.AppendBB(*nextBB); + bb.PushBackSuccs(*nextBB); + nextBB->PushBackPreds(bb); + cgFunc.SetCurBB(*nextBB); + MIRSymbol *failFunc = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(std::string("__stack_chk_fail"))); + ListOperand *srcOpnds = aarchCGFunc.CreateListOpnd(*cgFunc.GetFuncScopeAllocator()); + Insn &callInsn = aarchCGFunc.AppendCall(*failFunc, *srcOpnds); + callInsn.SetDoNotRemove(true); + + BB *newBB = cgFunc.CreateNewBB(failLable, bb.IsUnreachable(), bb.GetKind(), bb.GetFrequency()); + nextBB->AppendBB(*newBB); + if (cgFunc.GetLastBB() == &bb) { + cgFunc.SetLastBB(*newBB); + } + bb.PushBackSuccs(*newBB); + nextBB->PushBackSuccs(*newBB); + newBB->PushBackPreds(*nextBB); + newBB->PushBackPreds(bb); + + bb.SetKind(BB::kBBIf); + cgFunc.SetCurBB(*formerCurBB); + return *newBB; +} + +bool AArch64GenProEpilog::InsertOpndRegs(Operand &op, std::set &vecRegs) const +{ + Operand *opnd = &op; + CHECK_FATAL(opnd != nullptr, "opnd is nullptr in InsertRegs"); + if (opnd->IsList()) { + MapleList pregList = static_cast(opnd)->GetOperands(); + for (auto *preg : pregList) { + if (preg != nullptr) { + vecRegs.insert(preg->GetRegisterNumber()); + } + } + } + if (opnd->IsMemoryAccessOperand()) { /* the registers of kOpdMem are complex to be detected */ + RegOperand *baseOpnd = static_cast(opnd)->GetBaseRegister(); + if (baseOpnd != nullptr) { + vecRegs.insert(baseOpnd->GetRegisterNumber()); + } + RegOperand *indexOpnd = static_cast(opnd)->GetIndexRegister(); + if (indexOpnd != nullptr) { + vecRegs.insert(indexOpnd->GetRegisterNumber()); + } + } + if (opnd->IsRegister()) { + RegOperand *preg = static_cast(opnd); + if (preg != nullptr) { + vecRegs.insert(preg->GetRegisterNumber()); + } + } + return true; +} + +bool AArch64GenProEpilog::InsertInsnRegs(Insn &insn, bool insertSource, std::set &vecSourceRegs, + bool insertTarget, std::set &vecTargetRegs) +{ + Insn *curInsn = &insn; + for (uint32 o = 0; o < curInsn->GetOperandSize(); ++o) { + Operand &opnd = curInsn->GetOperand(o); + if (insertSource && curInsn->OpndIsUse(o)) { + InsertOpndRegs(opnd, vecSourceRegs); + } + if (insertTarget && curInsn->OpndIsDef(o)) { + InsertOpndRegs(opnd, vecTargetRegs); + } + } + return true; +} + +bool AArch64GenProEpilog::FindRegs(Operand &op, std::set &vecRegs) const +{ + Operand *opnd = &op; + if (opnd == nullptr || vecRegs.empty()) { + return false; + } + if (opnd->IsList()) { + MapleList pregList = static_cast(opnd)->GetOperands(); + for (auto *preg : pregList) { + if (preg->GetRegisterNumber() == R29 || vecRegs.find(preg->GetRegisterNumber()) != vecRegs.end()) { + return true; /* the opReg will overwrite or reread the vecRegs */ + } + } + } + if (opnd->IsMemoryAccessOperand()) { /* the registers of kOpdMem are complex to be detected */ + RegOperand *baseOpnd = static_cast(opnd)->GetBaseRegister(); + RegOperand *indexOpnd = static_cast(opnd)->GetIndexRegister(); + if ((baseOpnd != nullptr && baseOpnd->GetRegisterNumber() == R29) || + (indexOpnd != nullptr && indexOpnd->GetRegisterNumber() == R29)) { + return true; /* Avoid modifying data on the stack */ + } + if ((baseOpnd != nullptr && vecRegs.find(baseOpnd->GetRegisterNumber()) != vecRegs.end()) || + (indexOpnd != nullptr && vecRegs.find(indexOpnd->GetRegisterNumber()) != vecRegs.end())) { + return true; + } + } + if (opnd->IsRegister()) { + RegOperand *regOpnd = static_cast(opnd); + if (regOpnd->GetRegisterNumber() == R29 || vecRegs.find(regOpnd->GetRegisterNumber()) != vecRegs.end()) { + return true; /* dst is a target register, result_dst is a target register */ + } + } + return false; +} + +bool AArch64GenProEpilog::BackwardFindDependency(BB &ifbb, std::set &vecReturnSourceRegs, + std::list &existingInsns, std::list &moveInsns) +{ + /* + * Pattern match,(*) instruction are moved down below branch. + * ******************** + * curInsn: + * in predBB + * in ifBB + * in returnBB + * ********************* + * list: the insns can be moved into the coldBB + * (1) the instruction is neither a branch nor a call, except for the ifbb.GetLastInsn() + * As long as a branch insn exists, + * the fast path finding fails and the return value is false, + * but the code sinking can be continued. + * (2) the predBB is not a ifBB, + * As long as a ifBB in preds exists, + * the code sinking fails, + * but fast path finding can be continued. + * (3) the targetRegs of insns in existingInsns can neither be reread or overwrite + * (4) the sourceRegs of insns in existingInsns can not be overwrite + * (5) the sourceRegs of insns in returnBB can neither be reread or overwrite + * (6) the targetRegs and sourceRegs cannot be R29 R30, to protect the stack + * (7) modified the reg when: + * -------------- + * curInsn: move R2,R1 + * : s s s + * s s s + * -> s s s + * ------------ + * (a) all targets cannot be R1, all sources cannot be R1 + * all targets cannot be R2, all return sources cannot be R2 + * (b) the targetRegs and sourceRegs cannot be list or MemoryAccess + * (c) no ifBB in preds, no branch insns + * (d) the bits of source-R2 must be equal to the R2 + * (e) replace the R2 with R1 + */ + BB *pred = &ifbb; + std::set vecTargetRegs; /* the targrtRegs of existingInsns */ + std::set vecSourceRegs; /* the soureRegs of existingInsns */ + bool ifPred = false; /* Indicates whether a ifBB in pred exists */ + bool bl = false; /* Indicates whether a branch insn exists */ + do { + FOR_BB_INSNS_REV(insn, pred) { + /* code sinking */ + if (insn->IsImmaterialInsn()) { + moveInsns.push_back(insn); + continue; + } + /* code sinking */ + if (!insn->IsMachineInstruction()) { + moveInsns.push_back(insn); + continue; + } + /* code sinking fails, the insns must be retained in the ifBB */ + if (ifPred || insn == ifbb.GetLastInsn() || insn->IsBranch() || insn->IsCall() || insn->IsStore() || + insn->IsStorePair()) { + /* fast path finding fails */ + if (insn != ifbb.GetLastInsn() && + (insn->IsBranch() || insn->IsCall() || insn->IsStore() || insn->IsStorePair())) { + bl = true; + } + InsertInsnRegs(*insn, true, vecSourceRegs, true, vecTargetRegs); + existingInsns.push_back(insn); + continue; + } + bool allow = true; /* whether allow this insn move into the codeBB */ + for (uint32 o = 0; allow && o < insn->GetOperandSize(); ++o) { + Operand &opnd = insn->GetOperand(o); + if (insn->OpndIsDef(o)) { + allow = allow & !FindRegs(opnd, vecTargetRegs); + allow = allow & !FindRegs(opnd, vecSourceRegs); + allow = allow & !FindRegs(opnd, vecReturnSourceRegs); + } + if (insn->OpndIsUse(o)) { + allow = allow & !FindRegs(opnd, vecTargetRegs); + } + } + /* if a result_dst not allowed, this insn can be allowed on the condition of mov Rx,R0/R1, + * and tje existing insns cannot be blr + * RLR 31, RFP 32, RSP 33, RZR 34 */ + if (!ifPred && !bl && !allow && + (insn->GetMachineOpcode() == MOP_xmovrr || insn->GetMachineOpcode() == MOP_wmovrr)) { + Operand *resultOpnd = &(insn->GetOperand(0)); + Operand *srcOpnd = &(insn->GetOperand(1)); + regno_t resultNO = static_cast(resultOpnd)->GetRegisterNumber(); + regno_t srcNO = static_cast(srcOpnd)->GetRegisterNumber(); + if (!FindRegs(*resultOpnd, vecTargetRegs) && !FindRegs(*srcOpnd, vecTargetRegs) && + !FindRegs(*srcOpnd, vecSourceRegs) && !FindRegs(*srcOpnd, vecReturnSourceRegs) && + (srcNO < RLR || srcNO > RZR)) { + allow = true; /* allow on the conditional mov Rx,Rxx */ + for (auto *exit : existingInsns) { + /* the registers of kOpdMem are complex to be detected */ + for (uint32 o = 0; o < exit->GetOperandSize(); ++o) { + if (!exit->OpndIsUse(o)) { + continue; + } + Operand *opd = &(exit->GetOperand(o)); + if (opd->IsList() || opd->IsMemoryAccessOperand()) { + allow = false; + break; + } + /* Distinguish between 32-bit regs and 64-bit regs */ + if (opd->IsRegister() && static_cast(opd)->GetRegisterNumber() == resultNO && + opd != resultOpnd) { + allow = false; + break; + } + } + } + } + /* replace the R2 with R1 */ + if (allow) { + for (auto *exit : existingInsns) { + for (uint32 o = 0; o < exit->GetOperandSize(); ++o) { + if (!exit->OpndIsUse(o)) { + continue; + } + Operand *opd = &(exit->GetOperand(o)); + if (opd->IsRegister() && (opd == resultOpnd)) { + exit->SetOperand(o, *srcOpnd); + } + } + } + } + } + if (!allow) { /* all result_dsts are not target register */ + /* code sinking fails */ + InsertInsnRegs(*insn, true, vecSourceRegs, true, vecTargetRegs); + existingInsns.push_back(insn); + } else { + moveInsns.push_back(insn); + } + } + if (pred->GetPreds().empty()) { + break; + } + if (!ifPred) { + for (auto *tmPred : pred->GetPreds()) { + pred = tmPred; + /* try to find the BB without branch */ + if (tmPred->GetKind() == BB::kBBGoto || tmPred->GetKind() == BB::kBBFallthru) { + ifPred = false; + break; + } else { + ifPred = true; + } + } + } + } while (pred != nullptr); + for (std::set::iterator it = vecTargetRegs.begin(); it != vecTargetRegs.end(); ++it) { + if (AArch64Abi::IsCalleeSavedReg(static_cast(*it))) { /* flag register */ + return false; + } + } + return !bl; +} + +BB *AArch64GenProEpilog::IsolateFastPath(BB &bb) +{ + /* + * Detect "if (cond) return" fast path, and move extra instructions + * to the slow path. + * Must match the following block structure. BB1 can be a series of + * single-pred/single-succ blocks. + * BB1 ops1 cmp-br to BB3 BB1 cmp-br to BB3 + * BB2 ops2 br to retBB ==> BB2 ret + * BB3 slow path BB3 ops1 ops2 + * if the detect is successful, BB3 will be used to generate prolog stuff. + */ + if (bb.GetPrev() != nullptr) { + return nullptr; + } + BB *ifBB = nullptr; + BB *returnBB = nullptr; + BB *coldBB = nullptr; + { + BB *curBB = &bb; + /* Look for straight line code */ + while (1) { + if (!curBB->GetEhSuccs().empty()) { + return nullptr; + } + if (curBB->GetSuccs().size() == 1) { + if (curBB->HasCall()) { + return nullptr; + } + BB *succ = curBB->GetSuccs().front(); + if (succ->GetPreds().size() != 1 || !succ->GetEhPreds().empty()) { + return nullptr; + } + curBB = succ; + } else if (curBB->GetKind() == BB::kBBIf) { + ifBB = curBB; + break; + } else { + return nullptr; + } + } + } + /* targets of if bb can only be reached by if bb */ + { + CHECK_FATAL(!ifBB->GetSuccs().empty(), "null succs check!"); + BB *first = ifBB->GetSuccs().front(); + BB *second = ifBB->GetSuccs().back(); + if (first->GetPreds().size() != 1 || !first->GetEhPreds().empty()) { + return nullptr; + } + if (second->GetPreds().size() != 1 || !second->GetEhPreds().empty()) { + return nullptr; + } + /* One target of the if bb jumps to a return bb */ + if (first->GetKind() != BB::kBBGoto && first->GetKind() != BB::kBBFallthru) { + return nullptr; + } + if (first->GetSuccs().size() != 1) { + return nullptr; + } + if (first->GetSuccs().front()->GetKind() != BB::kBBReturn) { + return nullptr; + } + if (first->GetSuccs().front()->GetPreds().size() != 1) { + return nullptr; + } + if (first->GetSuccs().front()->NumInsn() > 2) { /* avoid a insn is used to debug */ + return nullptr; + } + if (second->GetSuccs().empty()) { + return nullptr; + } + returnBB = first; + coldBB = second; + } + /* Search backward looking for dependencies for the cond branch */ + std::list existingInsns; /* the insns must be retained in the ifBB (and the return BB) */ + std::list moveInsns; /* instructions to be moved to coldbb */ + /* + * The control flow matches at this point. + * Make sure the SourceRegs of the insns in returnBB (vecReturnSourceReg) cannot be overwrite. + * the regs in insns have three forms: list, MemoryAccess, or Register. + */ + CHECK_FATAL(returnBB != nullptr, "null ptr check"); + std::set vecReturnSourceRegs; + FOR_BB_INSNS_REV(insn, returnBB) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsBranch() || insn->IsCall() || insn->IsStore() || insn->IsStorePair()) { + return nullptr; + } + InsertInsnRegs(*insn, true, vecReturnSourceRegs, false, vecReturnSourceRegs); + existingInsns.push_back(insn); + } + FOR_BB_INSNS_REV(insn, returnBB->GetSuccs().front()) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsBranch() || insn->IsCall() || insn->IsStore() || insn->IsStorePair()) { + return nullptr; + } + InsertInsnRegs(*insn, true, vecReturnSourceRegs, false, vecReturnSourceRegs); + existingInsns.push_back(insn); + } + /* + * The mv is the 1st move using the parameter register leading to the branch + * The ld is the load using the parameter register indirectly for the branch + * The depMv is the move which preserves the result of the load but might + * destroy a parameter register which will be moved below the branch. + */ + bool fast = BackwardFindDependency(*ifBB, vecReturnSourceRegs, existingInsns, moveInsns); + /* move extra instructions to the slow path */ + if (!fast) { + return nullptr; + } + for (auto in : moveInsns) { + in->GetBB()->RemoveInsn(*in); + CHECK_FATAL(coldBB != nullptr, "null ptr check"); + static_cast(coldBB->InsertInsnBegin(*in)); + } + /* All instructions are in the right place, replace branch to ret bb to just ret. */ + /* Remove the lastInsn of gotoBB */ + if (returnBB->GetKind() == BB::kBBGoto) { + returnBB->RemoveInsn(*returnBB->GetLastInsn()); + } + BB *tgtBB = returnBB->GetSuccs().front(); + CHECK_FATAL(tgtBB != nullptr, "null ptr check"); + FOR_BB_INSNS(insn, tgtBB) { + returnBB->AppendInsn(*insn); /* add the insns such as MOP_xret */ + } + returnBB->AppendInsn(cgFunc.GetInsnBuilder()->BuildInsn(MOP_xret)); + /* bb is now a retbb and has no succ. */ + returnBB->SetKind(BB::kBBReturn); + auto predIt = std::find(tgtBB->GetPredsBegin(), tgtBB->GetPredsEnd(), returnBB); + tgtBB->ErasePreds(predIt); + tgtBB->ClearInsns(); + returnBB->ClearSuccs(); + if (tgtBB->GetPrev() != nullptr && tgtBB->GetNext() != nullptr) { + tgtBB->GetPrev()->SetNext(tgtBB->GetNext()); + tgtBB->GetNext()->SetPrev(tgtBB->GetPrev()); + } + SetFastPathReturnBB(tgtBB); + return coldBB; +} + +MemOperand *AArch64GenProEpilog::SplitStpLdpOffsetForCalleeSavedWithAddInstruction(CGFunc &cgFunc, const MemOperand &mo, + uint32 bitLen, AArch64reg baseRegNum) +{ + auto &aarchCGFunc = static_cast(cgFunc); + CHECK_FATAL(mo.GetAddrMode() == MemOperand::kAddrModeBOi, "mode should be kAddrModeBOi"); + OfstOperand *ofstOp = mo.GetOffsetImmediate(); + int32 offsetVal = static_cast(ofstOp->GetOffsetValue()); + CHECK_FATAL(offsetVal > 0, "offsetVal should be greater than 0"); + CHECK_FATAL((static_cast(offsetVal) & 0x7) == 0, "(offsetVal & 0x7) should be equal to 0"); + /* + * Offset adjustment due to FP/SP has already been done + * in AArch64GenProEpilog::GeneratePushRegs() and AArch64GenProEpilog::GeneratePopRegs() + */ + RegOperand &br = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(baseRegNum, bitLen, kRegTyInt); + if (aarchCGFunc.GetSplitBaseOffset() == 0) { + aarchCGFunc.SetSplitBaseOffset(offsetVal); /* remember the offset; don't forget to clear it */ + ImmOperand &immAddEnd = aarchCGFunc.CreateImmOperand(offsetVal, k64BitSize, true); + RegOperand *origBaseReg = mo.GetBaseRegister(); + aarchCGFunc.SelectAdd(br, *origBaseReg, immAddEnd, PTY_i64); + } + offsetVal = offsetVal - aarchCGFunc.GetSplitBaseOffset(); + return &aarchCGFunc.CreateReplacementMemOperand(bitLen, br, offsetVal); +} + +void AArch64GenProEpilog::AppendInstructionPushPair(CGFunc &cgFunc, AArch64reg reg0, AArch64reg reg1, RegType rty, + int32 offset) +{ + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + Operand *o2 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + + uint32 dataSize = GetPointerSize() * kBitsPerByte; + CHECK_FATAL(offset >= 0, "offset must >= 0"); + if (offset > kStpLdpImm64UpperBound) { + o2 = SplitStpLdpOffsetForCalleeSavedWithAddInstruction(cgFunc, *static_cast(o2), dataSize, R16); + } + Insn &pushInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + std::string comment = "SAVE CALLEE REGISTER PAIR"; + pushInsn.SetComment(comment); + AppendInstructionTo(pushInsn, cgFunc); + + /* Append CFi code */ + if (cgFunc.GenCfi() && !CGOptions::IsNoCalleeCFI()) { + int32 stackFrameSize = + static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + stackFrameSize -= (static_cast(cgFunc.GetMemlayout()->SizeOfArgsToStackPass()) + + cgFunc.GetFunction().GetFrameReseverdSlot()); + int32 cfiOffset = stackFrameSize - offset; + BB *curBB = cgFunc.GetCurBB(); + Insn *newInsn = curBB->InsertInsnAfter(pushInsn, aarchCGFunc.CreateCfiOffsetInsn(reg0, -cfiOffset, k64BitSize)); + curBB->InsertInsnAfter(*newInsn, + aarchCGFunc.CreateCfiOffsetInsn(reg1, -cfiOffset + kOffset8MemPos, k64BitSize)); + } +} + +void AArch64GenProEpilog::AppendInstructionPushSingle(CGFunc &cgFunc, AArch64reg reg, RegType rty, int32 offset) +{ + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopSingle]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, GetPointerSize() * kBitsPerByte, rty); + Operand *o1 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + + MemOperand *aarchMemO1 = static_cast(o1); + uint32 dataSize = GetPointerSize() * kBitsPerByte; + if (aarchMemO1->GetMemVaryType() == kNotVary && aarchCGFunc.IsImmediateOffsetOutOfRange(*aarchMemO1, dataSize)) { + o1 = &aarchCGFunc.SplitOffsetWithAddInstruction(*aarchMemO1, dataSize, R9); + } + + Insn &pushInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, *o1); + std::string comment = "SAVE CALLEE REGISTER"; + pushInsn.SetComment(comment); + AppendInstructionTo(pushInsn, cgFunc); + + /* Append CFI code */ + if (cgFunc.GenCfi() && !CGOptions::IsNoCalleeCFI()) { + int32 stackFrameSize = + static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + stackFrameSize -= (static_cast(cgFunc.GetMemlayout()->SizeOfArgsToStackPass()) + + cgFunc.GetFunction().GetFrameReseverdSlot()); + int32 cfiOffset = stackFrameSize - offset; + cgFunc.GetCurBB()->InsertInsnAfter(pushInsn, aarchCGFunc.CreateCfiOffsetInsn(reg, -cfiOffset, k64BitSize)); + } +} + +Insn &AArch64GenProEpilog::AppendInstructionForAllocateOrDeallocateCallFrame(int64 fpToSpDistance, AArch64reg reg0, + AArch64reg reg1, RegType rty, + bool isAllocate) +{ + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = isAllocate ? pushPopOps[kRegsPushOp][rty][kPushPopPair] : pushPopOps[kRegsPopOp][rty][kPushPopPair]; + uint8 size; + if (CGOptions::IsArm64ilp32()) { + size = k8ByteSize; + } else { + size = GetPointerSize(); + } + if (fpToSpDistance <= kStrLdrImm64UpperBound - kOffset8MemPos) { + mOp = isAllocate ? pushPopOps[kRegsPushOp][rty][kPushPopSingle] : pushPopOps[kRegsPopOp][rty][kPushPopSingle]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, size * kBitsPerByte, rty); + MemOperand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(fpToSpDistance), size * kBitsPerByte); + Insn &insn1 = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, *o2); + AppendInstructionTo(insn1, cgFunc); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, size * kBitsPerByte, rty); + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(fpToSpDistance + size), size * kBitsPerByte); + Insn &insn2 = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o1, *o2); + AppendInstructionTo(insn2, cgFunc); + return insn2; + } else { + RegOperand &oo = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, size * kBitsPerByte, kRegTyInt); + ImmOperand &io1 = aarchCGFunc.CreateImmOperand(fpToSpDistance, k64BitSize, true); + aarchCGFunc.SelectCopyImm(oo, io1, PTY_i64); + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, size * kBitsPerByte, rty); + RegOperand &rsp = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, size * kBitsPerByte, kRegTyInt); + MemOperand *mo = aarchCGFunc.CreateMemOperand(MemOperand::kAddrModeBOrX, size * kBitsPerByte, rsp, oo, 0); + Insn &insn1 = cgFunc.GetInsnBuilder()->BuildInsn(isAllocate ? MOP_xstr : MOP_xldr, o0, *mo); + AppendInstructionTo(insn1, cgFunc); + ImmOperand &io2 = aarchCGFunc.CreateImmOperand(size, k64BitSize, true); + aarchCGFunc.SelectAdd(oo, oo, io2, PTY_i64); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, size * kBitsPerByte, rty); + mo = aarchCGFunc.CreateMemOperand(MemOperand::kAddrModeBOrX, size * kBitsPerByte, rsp, oo, 0); + Insn &insn2 = cgFunc.GetInsnBuilder()->BuildInsn(isAllocate ? MOP_xstr : MOP_xldr, o1, *mo); + AppendInstructionTo(insn2, cgFunc); + return insn2; + } +} + +Insn &AArch64GenProEpilog::CreateAndAppendInstructionForAllocateCallFrame(int64 fpToSpDistance, AArch64reg reg0, + AArch64reg reg1, RegType rty) +{ + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + Insn *allocInsn = nullptr; + if (fpToSpDistance > kStpLdpImm64UpperBound) { + allocInsn = &AppendInstructionForAllocateOrDeallocateCallFrame(fpToSpDistance, reg0, reg1, rty, true); + } else { + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + Operand *o2 = + aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(fpToSpDistance), GetPointerSize() * kBitsPerByte); + allocInsn = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + AppendInstructionTo(*allocInsn, cgFunc); + } + if (currCG->NeedInsertInstrumentationFunction()) { + aarchCGFunc.AppendCall(*currCG->GetInstrumentationFunction()); + } else if (currCG->InstrumentWithDebugTraceCall()) { + aarchCGFunc.AppendCall(*currCG->GetDebugTraceEnterFunction()); + } else if (currCG->InstrumentWithProfile()) { + aarchCGFunc.AppendCall(*currCG->GetProfileFunction()); + } + return *allocInsn; +} + +void AArch64GenProEpilog::AppendInstructionAllocateCallFrame(AArch64reg reg0, AArch64reg reg1, RegType rty) +{ + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("allocate activation frame")); + } + + Insn *ipoint = nullptr; + /* + * stackFrameSize includes the size of args to stack-pass + * if a function has neither VLA nor alloca. + */ + int32 stackFrameSize = + static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + int64 fpToSpDistance = cgFunc.GetMemlayout()->SizeOfArgsToStackPass() + cgFunc.GetFunction().GetFrameReseverdSlot(); + /* + * ldp/stp's imm should be within -512 and 504; + * if stp's imm > 512, we fall back to the stp-sub version + */ + bool useStpSub = false; + int64 offset = 0; + int32 cfiOffset = 0; + if (!cgFunc.HasVLAOrAlloca() && fpToSpDistance > 0) { + /* + * stack_frame_size == size of formal parameters + callee-saved (including FP/RL) + * + size of local vars + * + size of actuals + * (when passing more than 8 args, its caller's responsibility to + * allocate space for it. size of actuals represent largest such size in the function. + */ + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + ipoint = cgFunc.GetCurBB()->GetLastInsn(); + cfiOffset = stackFrameSize; + } else { + if (stackFrameSize > kStpLdpImm64UpperBound) { + useStpSub = true; + offset = kOffset16MemPos; + stackFrameSize -= offset; + } else { + offset = stackFrameSize; + } + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + MemOperand &o2 = + aarchCGFunc.CreateCallFrameOperand(static_cast(-offset), GetPointerSize() * kBitsPerByte); + ipoint = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2); + AppendInstructionTo(*ipoint, cgFunc); + cfiOffset = offset; + if (currCG->NeedInsertInstrumentationFunction()) { + aarchCGFunc.AppendCall(*currCG->GetInstrumentationFunction()); + } else if (currCG->InstrumentWithDebugTraceCall()) { + aarchCGFunc.AppendCall(*currCG->GetDebugTraceEnterFunction()); + } else if (currCG->InstrumentWithProfile()) { + aarchCGFunc.AppendCall(*currCG->GetProfileFunction()); + } + } + + ipoint = InsertCFIDefCfaOffset(cfiOffset, *ipoint); + + if (!cgFunc.HasVLAOrAlloca() && fpToSpDistance > 0) { + CHECK_FATAL(!useStpSub, "Invalid assumption"); + ipoint = &CreateAndAppendInstructionForAllocateCallFrame(fpToSpDistance, reg0, reg1, rty); + } + + if (useStpSub) { + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + ipoint = cgFunc.GetCurBB()->GetLastInsn(); + aarchCGFunc.SetUsedStpSubPairForCallFrameAllocation(true); + } + + CHECK_FATAL(ipoint != nullptr, "ipoint should not be nullptr at this point"); + int32 cfiOffsetSecond = 0; + if (useStpSub) { + cfiOffsetSecond = stackFrameSize; + ipoint = InsertCFIDefCfaOffset(cfiOffsetSecond, *ipoint); + } + cfiOffsetSecond = GetOffsetFromCFA(); + if (!cgFunc.HasVLAOrAlloca()) { + cfiOffsetSecond -= fpToSpDistance; + } + if (cgFunc.GenCfi()) { + BB *curBB = cgFunc.GetCurBB(); + if (useFP) { + ipoint = curBB->InsertInsnAfter( + *ipoint, aarchCGFunc.CreateCfiOffsetInsn(stackBaseReg, -cfiOffsetSecond, k64BitSize)); + } + curBB->InsertInsnAfter(*ipoint, + aarchCGFunc.CreateCfiOffsetInsn(RLR, -cfiOffsetSecond + kOffset8MemPos, k64BitSize)); + } +} + +void AArch64GenProEpilog::AppendInstructionAllocateCallFrameDebug(AArch64reg reg0, AArch64reg reg1, RegType rty) +{ + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("allocate activation frame for debugging")); + } + + int32 stackFrameSize = + static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + int64 fpToSpDistance = + (cgFunc.GetMemlayout()->SizeOfArgsToStackPass() + cgFunc.GetFunction().GetFrameReseverdSlot()); + + Insn *ipoint = nullptr; + int32 cfiOffset = 0; + + if (fpToSpDistance > 0) { + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + ipoint = cgFunc.GetCurBB()->GetLastInsn(); + cfiOffset = stackFrameSize; + (void)InsertCFIDefCfaOffset(cfiOffset, *ipoint); + if (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + fpToSpDistance -= (kDivide2 * k8ByteSize); + } + ipoint = &CreateAndAppendInstructionForAllocateCallFrame(fpToSpDistance, reg0, reg1, rty); + CHECK_FATAL(ipoint != nullptr, "ipoint should not be nullptr at this point"); + cfiOffset = GetOffsetFromCFA(); + cfiOffset -= fpToSpDistance; + } else { + bool useStpSub = false; + + if (stackFrameSize > kStpLdpImm64UpperBound) { + useStpSub = true; + RegOperand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + ImmOperand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + ipoint = cgFunc.GetCurBB()->GetLastInsn(); + cfiOffset = stackFrameSize; + ipoint = InsertCFIDefCfaOffset(cfiOffset, *ipoint); + } else { + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(-stackFrameSize, GetPointerSize() * kBitsPerByte); + ipoint = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2); + AppendInstructionTo(*ipoint, cgFunc); + cfiOffset = stackFrameSize; + ipoint = InsertCFIDefCfaOffset(cfiOffset, *ipoint); + } + + if (useStpSub) { + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + MemOperand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, 0, GetPointerSize() * kBitsPerByte); + ipoint = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + AppendInstructionTo(*ipoint, cgFunc); + } + + if (currCG->NeedInsertInstrumentationFunction()) { + aarchCGFunc.AppendCall(*currCG->GetInstrumentationFunction()); + } else if (currCG->InstrumentWithDebugTraceCall()) { + aarchCGFunc.AppendCall(*currCG->GetDebugTraceEnterFunction()); + } else if (currCG->InstrumentWithProfile()) { + aarchCGFunc.AppendCall(*currCG->GetProfileFunction()); + } + + CHECK_FATAL(ipoint != nullptr, "ipoint should not be nullptr at this point"); + cfiOffset = GetOffsetFromCFA(); + } + if (cgFunc.GenCfi()) { + BB *curBB = cgFunc.GetCurBB(); + if (useFP) { + ipoint = + curBB->InsertInsnAfter(*ipoint, aarchCGFunc.CreateCfiOffsetInsn(stackBaseReg, -cfiOffset, k64BitSize)); + } + curBB->InsertInsnAfter(*ipoint, aarchCGFunc.CreateCfiOffsetInsn(RLR, -cfiOffset + kOffset8MemPos, k64BitSize)); + } +} + +/* + * From AArch64 Reference Manual + * C1.3.3 Load/Store Addressing Mode + * ... + * When stack alignment checking is enabled by system software and + * the base register is the SP, the current stack pointer must be + * initially quadword aligned, that is aligned to 16 bytes. Misalignment + * generates a Stack Alignment fault. The offset does not have to + * be a multiple of 16 bytes unless the specific Load/Store instruction + * requires this. SP cannot be used as a register offset. + */ +void AArch64GenProEpilog::GeneratePushRegs() +{ + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + const MapleVector ®sToSave = + (!CGOptions::DoRegSavesOpt()) ? aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + + CHECK_FATAL(!regsToSave.empty(), "FP/LR not added to callee-saved list?"); + + AArch64reg intRegFirstHalf = kRinvalid; + AArch64reg fpRegFirstHalf = kRinvalid; + + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("save callee-saved registers")); + } + + /* + * Even if we don't use RFP, since we push a pair of registers in one instruction + * and the stack needs be aligned on a 16-byte boundary, push RFP as well if function has a call + * Make sure this is reflected when computing callee_saved_regs.size() + */ + if (!currCG->GenerateDebugFriendlyCode()) { + AppendInstructionAllocateCallFrame(R29, RLR, kRegTyInt); + } else { + AppendInstructionAllocateCallFrameDebug(R29, RLR, kRegTyInt); + } + + if (useFP) { + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("copy SP to FP")); + } + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &fpOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); + int64 fpToSpDistance = + (cgFunc.GetMemlayout()->SizeOfArgsToStackPass() + cgFunc.GetFunction().GetFrameReseverdSlot()); + bool isLmbc = cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc; + if ((fpToSpDistance > 0) || isLmbc) { + Operand *immOpnd; + if (isLmbc) { + int32 size = + static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + immOpnd = &aarchCGFunc.CreateImmOperand(size, k32BitSize, true); + } else { + immOpnd = &aarchCGFunc.CreateImmOperand(fpToSpDistance, k32BitSize, true); + } + if (!isLmbc || cgFunc.SeenFP() || cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + aarchCGFunc.SelectAdd(fpOpnd, spOpnd, *immOpnd, PTY_u64); + } + cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); + if (cgFunc.GenCfi()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiDefCfaInsn( + stackBaseReg, + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize() - fpToSpDistance, + k64BitSize)); + } + } else { + aarchCGFunc.SelectCopy(fpOpnd, PTY_u64, spOpnd, PTY_u64); + cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); + if (cgFunc.GenCfi()) { + cgFunc.GetCurBB()->AppendInsn( + cgFunc.GetInsnBuilder() + ->BuildCfiInsn(cfi::OP_CFI_def_cfa_register) + .AddOpndChain(aarchCGFunc.CreateCfiRegOperand(stackBaseReg, k64BitSize))); + } + } + } + + MapleVector::const_iterator it = regsToSave.begin(); + /* skip the first two registers */ + CHECK_FATAL(*it == RFP, "The first callee saved reg is expected to be RFP"); + ++it; + CHECK_FATAL(*it == RLR, "The second callee saved reg is expected to be RLR"); + ++it; + + AArch64MemLayout *memLayout = static_cast(cgFunc.GetMemlayout()); + int32 offset; + if (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + offset = static_cast((memLayout->RealStackFrameSize() - aarchCGFunc.SizeOfCalleeSaved()) - + memLayout->GetSizeOfLocals()); + } else { + offset = (static_cast(memLayout->RealStackFrameSize() - + (aarchCGFunc.SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen))) - /* for FP/LR */ + memLayout->SizeOfArgsToStackPass() - + cgFunc.GetFunction().GetFrameReseverdSlot()); + } + + if (cgFunc.GetCG()->IsStackProtectorStrong() || cgFunc.GetCG()->IsStackProtectorAll()) { + offset -= kAarch64StackPtrAlignment; + } + + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + /* GR/VR save areas are above the callee save area */ + AArch64MemLayout *ml = static_cast(cgFunc.GetMemlayout()); + auto saveareasize = static_cast(RoundUp(ml->GetSizeOfGRSaveArea(), GetPointerSize() * k2BitSize) + + RoundUp(ml->GetSizeOfVRSaveArea(), GetPointerSize() * k2BitSize)); + offset -= saveareasize; + } + + std::vector> calleeRegAndOffsetVec; + for (; it != regsToSave.end(); ++it) { + AArch64reg reg = *it; + CHECK_FATAL(reg != RFP, "stray RFP in callee_saved_list?"); + CHECK_FATAL(reg != RLR, "stray RLR in callee_saved_list?"); + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64reg &firstHalf = AArch64isa::IsGPRegister(reg) ? intRegFirstHalf : fpRegFirstHalf; + if (firstHalf == kRinvalid) { + /* remember it */ + firstHalf = reg; + /* for int callee-saved register: x19->19,x20->20 ... + for float callee-saved register: d8->72, d9->73 ..., d15->79 + */ + uint16 regNO = (regType == kRegTyInt) ? static_cast(reg - 1) : static_cast(reg - V8 + 72); + calleeRegAndOffsetVec.push_back(std::pair(regNO, offset)); + } else { + uint16 regNO = (regType == kRegTyInt) ? static_cast(reg - 1) : static_cast(reg - V8 + 72); + calleeRegAndOffsetVec.push_back(std::pair(regNO, offset + k8ByteSize)); + AppendInstructionPushPair(cgFunc, firstHalf, reg, regType, offset); + GetNextOffsetCalleeSaved(offset); + firstHalf = kRinvalid; + } + } + + if (intRegFirstHalf != kRinvalid) { + AppendInstructionPushSingle(cgFunc, intRegFirstHalf, kRegTyInt, offset); + GetNextOffsetCalleeSaved(offset); + } + + if (fpRegFirstHalf != kRinvalid) { + AppendInstructionPushSingle(cgFunc, fpRegFirstHalf, kRegTyFloat, offset); + GetNextOffsetCalleeSaved(offset); + } + + /* + * in case we split stp/ldp instructions, + * so that we generate a load-into-base-register instruction + * for pop pairs as well. + */ + aarchCGFunc.SetSplitBaseOffset(0); + + const auto &emitMemoryManager = CGOptions::GetInstance().GetEmitMemoryManager(); + if (emitMemoryManager.codeSpace != nullptr) { + emitMemoryManager.funcCalleeOffsetSaver(emitMemoryManager.codeSpace, cgFunc.GetName(), calleeRegAndOffsetVec); + int64 fpToCurSpDistance = + (cgFunc.GetMemlayout()->SizeOfArgsToStackPass() + cgFunc.GetFunction().GetFrameReseverdSlot()); + int32 fp2PrevFrameSPDelta = + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize() - fpToCurSpDistance; + emitMemoryManager.funcFpSPDeltaSaver(emitMemoryManager.codeSpace, cgFunc.GetName(), fp2PrevFrameSPDelta); + } +} + +void AArch64GenProEpilog::GeneratePushUnnamedVarargRegs() +{ + auto &aarchCGFunc = static_cast(cgFunc); + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + AArch64MemLayout *memlayout = static_cast(cgFunc.GetMemlayout()); + uint8 size; + if (CGOptions::IsArm64ilp32()) { + size = k8ByteSize; + } else { + size = GetPointerSize(); + } + uint32 dataSizeBits = size * kBitsPerByte; + uint32 offset; + if (cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + offset = static_cast(memlayout->GetGRSaveAreaBaseLoc()); /* SP reference */ + if (memlayout->GetSizeOfGRSaveArea() % kAarch64StackPtrAlignment) { + offset += size; /* End of area should be aligned. Hole between VR and GR area */ + } + } else { + offset = (UINT32_MAX - memlayout->GetSizeOfGRSaveArea()) + 1; /* FP reference */ + if (memlayout->GetSizeOfGRSaveArea() % kAarch64StackPtrAlignment) { + offset -= size; + } + } + uint32 grSize = (UINT32_MAX - offset) + 1; + uint32 start_regno = k8BitSize - (memlayout->GetSizeOfGRSaveArea() / size); + DEBUG_ASSERT(start_regno <= k8BitSize, "Incorrect starting GR regno for GR Save Area"); + for (uint32 i = start_regno + static_cast(R0); i < static_cast(R8); i++) { + uint32 tmpOffset = 0; + if (CGOptions::IsBigEndian()) { + if ((dataSizeBits >> 3) < 8) { + tmpOffset += 8U - (dataSizeBits >> 3); + } + } + Operand *stackLoc; + if (cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + stackLoc = &aarchCGFunc.CreateStkTopOpnd(offset + tmpOffset, dataSizeBits); + } else { + stackLoc = aarchCGFunc.GenLmbcFpMemOperand(offset, size); + } + RegOperand ® = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyInt); + Insn &inst = + cgFunc.GetInsnBuilder()->BuildInsn(aarchCGFunc.PickStInsn(dataSizeBits, PTY_i64), reg, *stackLoc); + cgFunc.GetCurBB()->AppendInsn(inst); + offset += size; + } + if (!CGOptions::UseGeneralRegOnly()) { + if (cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + offset = static_cast(memlayout->GetVRSaveAreaBaseLoc()); + } else { + offset = (UINT32_MAX - (memlayout->GetSizeOfVRSaveArea() + grSize)) + 1; + } + start_regno = k8BitSize - (memlayout->GetSizeOfVRSaveArea() / (size * k2BitSize)); + DEBUG_ASSERT(start_regno <= k8BitSize, "Incorrect starting GR regno for VR Save Area"); + for (uint32 i = start_regno + static_cast(V0); i < static_cast(V8); i++) { + uint32 tmpOffset = 0; + if (CGOptions::IsBigEndian()) { + if ((dataSizeBits >> 3) < 16) { + tmpOffset += 16U - (dataSizeBits >> 3); + } + } + Operand *stackLoc; + if (cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + stackLoc = &aarchCGFunc.CreateStkTopOpnd(offset + tmpOffset, dataSizeBits); + } else { + stackLoc = aarchCGFunc.GenLmbcFpMemOperand(offset, size); + } + RegOperand ® = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); + Insn &inst = + cgFunc.GetInsnBuilder()->BuildInsn(aarchCGFunc.PickStInsn(dataSizeBits, PTY_f64), reg, *stackLoc); + cgFunc.GetCurBB()->AppendInsn(inst); + offset += (size * k2BitSize); + } + } + } +} + +void AArch64GenProEpilog::AppendInstructionStackCheck(AArch64reg reg, RegType rty, int32 offset) +{ + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + /* sub x16, sp, #0x2000 */ + auto &x16Opnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, rty); + auto &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, rty); + auto &imm1 = aarchCGFunc.CreateImmOperand(offset, k64BitSize, true); + aarchCGFunc.SelectSub(x16Opnd, spOpnd, imm1, PTY_u64); + + /* ldr wzr, [x16] */ + auto &wzr = cgFunc.GetZeroOpnd(k32BitSize); + auto &refX16 = aarchCGFunc.CreateMemOpnd(reg, 0, k64BitSize); + auto &soeInstr = cgFunc.GetInsnBuilder()->BuildInsn(MOP_wldr, wzr, refX16); + if (currCG->GenerateVerboseCG()) { + soeInstr.SetComment("soerror"); + } + soeInstr.SetDoNotRemove(true); + AppendInstructionTo(soeInstr, cgFunc); +} + +void AArch64GenProEpilog::GenerateProlog(BB &bb) +{ + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + BB *formerCurBB = cgFunc.GetCurBB(); + aarchCGFunc.GetDummyBB()->ClearInsns(); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(true); + cgFunc.SetCurBB(*aarchCGFunc.GetDummyBB()); + if (!cgFunc.GetHasProEpilogue()) { + return; + } + + // insert .loc for function + if (currCG->GetCGOptions().WithLoc() && + (!currCG->GetMIRModule()->IsCModule() || currCG->GetMIRModule()->IsWithDbgInfo())) { + MIRFunction *func = &cgFunc.GetFunction(); + MIRSymbol *fSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + if (currCG->GetCGOptions().WithSrc()) { + uint32 tempmaxsize = static_cast(currCG->GetMIRModule()->GetSrcFileInfo().size()); + uint32 endfilenum = currCG->GetMIRModule()->GetSrcFileInfo()[tempmaxsize - 1].second; + if (fSym->GetSrcPosition().FileNum() != 0 && fSym->GetSrcPosition().FileNum() <= endfilenum) { + Operand *o0 = cgFunc.CreateDbgImmOperand(fSym->GetSrcPosition().FileNum()); + int64_t lineNum = fSym->GetSrcPosition().LineNum(); + if (lineNum == 0) { + if (cgFunc.GetFunction().GetAttr(FUNCATTR_native)) { + lineNum = 0xffffe; + } else { + lineNum = 0xffffd; + } + } + Operand *o1 = cgFunc.CreateDbgImmOperand(lineNum); + Insn &loc = + cgFunc.GetInsnBuilder()->BuildDbgInsn(mpldbg::OP_DBG_loc).AddOpndChain(*o0).AddOpndChain(*o1); + cgFunc.GetCurBB()->AppendInsn(loc); + } + } else { + Operand *o0 = cgFunc.CreateDbgImmOperand(1); + Operand *o1 = cgFunc.CreateDbgImmOperand(fSym->GetSrcPosition().MplLineNum()); + Insn &loc = cgFunc.GetInsnBuilder()->BuildDbgInsn(mpldbg::OP_DBG_loc).AddOpndChain(*o0).AddOpndChain(*o1); + cgFunc.GetCurBB()->AppendInsn(loc); + } + } + + const MapleVector ®sToSave = + (!CGOptions::DoRegSavesOpt()) ? aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + if (!regsToSave.empty()) { + /* + * Among other things, push the FP & LR pair. + * FP/LR are added to the callee-saved list in AllocateRegisters() + * We add them to the callee-saved list regardless of UseFP() being true/false. + * Activation Frame is allocated as part of pushing FP/LR pair + */ + GeneratePushRegs(); + } else { + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + int32 stackFrameSize = + static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + if (stackFrameSize > 0) { + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("allocate activation frame")); + } + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + + int32 offset = stackFrameSize; + (void)InsertCFIDefCfaOffset(offset, *(cgFunc.GetCurBB()->GetLastInsn())); + } + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("copy SP to FP")); + } + if (useFP) { + Operand &fpOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); + bool isLmbc = cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc; + int64 fpToSpDistance = + cgFunc.GetMemlayout()->SizeOfArgsToStackPass() + cgFunc.GetFunction().GetFrameReseverdSlot(); + if ((fpToSpDistance > 0) || isLmbc) { + Operand *immOpnd; + if (isLmbc) { + int32 size = static_cast( + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + immOpnd = &aarchCGFunc.CreateImmOperand(size, k32BitSize, true); + } else { + immOpnd = &aarchCGFunc.CreateImmOperand(fpToSpDistance, k32BitSize, true); + } + aarchCGFunc.SelectAdd(fpOpnd, spOpnd, *immOpnd, PTY_u64); + cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); + if (cgFunc.GenCfi()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiDefCfaInsn( + stackBaseReg, + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize() - fpToSpDistance, + k64BitSize)); + } + } else { + aarchCGFunc.SelectCopy(fpOpnd, PTY_u64, spOpnd, PTY_u64); + cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); + if (cgFunc.GenCfi()) { + cgFunc.GetCurBB()->AppendInsn( + cgFunc.GetInsnBuilder() + ->BuildCfiInsn(cfi::OP_CFI_def_cfa_register) + .AddOpndChain(aarchCGFunc.CreateCfiRegOperand(stackBaseReg, k64BitSize))); + } + } + } + } + GeneratePushUnnamedVarargRegs(); + if (currCG->DoCheckSOE()) { + AppendInstructionStackCheck(R16, kRegTyInt, kSoeChckOffset); + } + bb.InsertAtBeginning(*aarchCGFunc.GetDummyBB()); + cgFunc.SetCurBB(*formerCurBB); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(false); +} + +void AArch64GenProEpilog::GenerateRet(BB &bb) +{ + bb.AppendInsn(cgFunc.GetInsnBuilder()->BuildInsn(MOP_xret)); +} + +/* + * If all the preds of exitBB made the TailcallOpt(replace blr/bl with br/b), return true, we don't create ret insn. + * Otherwise, return false, create the ret insn. + */ +bool AArch64GenProEpilog::TestPredsOfRetBB(const BB &exitBB) +{ + AArch64MemLayout *ml = static_cast(cgFunc.GetMemlayout()); + if (cgFunc.GetMirModule().IsCModule() && + (cgFunc.GetFunction().GetAttr(FUNCATTR_varargs) || ml->GetSizeOfLocals() > 0 || cgFunc.HasVLAOrAlloca())) { + return false; + } + for (auto tmpBB : exitBB.GetPreds()) { + Insn *firstInsn = tmpBB->GetFirstInsn(); + if ((firstInsn == nullptr || tmpBB->IsCommentBB()) && (!tmpBB->GetPreds().empty())) { + if (!TestPredsOfRetBB(*tmpBB)) { + return false; + } + } else { + Insn *lastInsn = tmpBB->GetLastInsn(); + if (lastInsn == nullptr) { + return false; + } + MOperator insnMop = lastInsn->GetMachineOpcode(); + if (insnMop != MOP_tail_call_opt_xbl && insnMop != MOP_tail_call_opt_xblr) { + return false; + } + } + } + return true; +} + +void AArch64GenProEpilog::AppendInstructionPopSingle(CGFunc &cgFunc, AArch64reg reg, RegType rty, int32 offset) +{ + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopSingle]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, GetPointerSize() * kBitsPerByte, rty); + Operand *o1 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + MemOperand *aarchMemO1 = static_cast(o1); + uint32 dataSize = GetPointerSize() * kBitsPerByte; + if (aarchMemO1->GetMemVaryType() == kNotVary && aarchCGFunc.IsImmediateOffsetOutOfRange(*aarchMemO1, dataSize)) { + o1 = &aarchCGFunc.SplitOffsetWithAddInstruction(*aarchMemO1, dataSize, R9); + } + + Insn &popInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, *o1); + popInsn.SetComment("RESTORE"); + cgFunc.GetCurBB()->AppendInsn(popInsn); + + /* Append CFI code. */ + if (cgFunc.GenCfi() && !CGOptions::IsNoCalleeCFI()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(reg, k64BitSize)); + } +} + +void AArch64GenProEpilog::AppendInstructionPopPair(CGFunc &cgFunc, AArch64reg reg0, AArch64reg reg1, RegType rty, + int32 offset) +{ + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopPair]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + Operand *o2 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + + uint32 dataSize = GetPointerSize() * kBitsPerByte; + CHECK_FATAL(offset >= 0, "offset must >= 0"); + if (offset > kStpLdpImm64UpperBound) { + o2 = SplitStpLdpOffsetForCalleeSavedWithAddInstruction(cgFunc, static_cast(*o2), dataSize, R16); + } + Insn &popInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + popInsn.SetComment("RESTORE RESTORE"); + cgFunc.GetCurBB()->AppendInsn(popInsn); + + /* Append CFI code */ + if (cgFunc.GenCfi() && !CGOptions::IsNoCalleeCFI()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(reg0, k64BitSize)); + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(reg1, k64BitSize)); + } +} + +void AArch64GenProEpilog::AppendInstructionDeallocateCallFrame(AArch64reg reg0, AArch64reg reg1, RegType rty) +{ + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopPair]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + int32 stackFrameSize = + static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + int64 fpToSpDistance = cgFunc.GetMemlayout()->SizeOfArgsToStackPass() + cgFunc.GetFunction().GetFrameReseverdSlot(); + /* + * ldp/stp's imm should be within -512 and 504; + * if ldp's imm > 504, we fall back to the ldp-add version + */ + bool useLdpAdd = false; + int32 offset = 0; + + Operand *o2 = nullptr; + if (!cgFunc.HasVLAOrAlloca() && fpToSpDistance > 0) { + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(fpToSpDistance), GetPointerSize() * kBitsPerByte); + } else { + if (stackFrameSize > kStpLdpImm64UpperBound) { + useLdpAdd = true; + offset = kOffset16MemPos; + stackFrameSize -= offset; + } else { + offset = stackFrameSize; + } + o2 = &aarchCGFunc.CreateCallFrameOperand(offset, GetPointerSize() * kBitsPerByte); + } + + if (useLdpAdd) { + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + if (cgFunc.GenCfi()) { + int64 cfiOffset = GetOffsetFromCFA(); + BB *curBB = cgFunc.GetCurBB(); + curBB->InsertInsnAfter(*(curBB->GetLastInsn()), + aarchCGFunc.CreateCfiDefCfaInsn(RSP, cfiOffset - stackFrameSize, k64BitSize)); + } + } + + if (!cgFunc.HasVLAOrAlloca() && fpToSpDistance > 0) { + CHECK_FATAL(!useLdpAdd, "Invalid assumption"); + if (fpToSpDistance > kStpLdpImm64UpperBound) { + (void)AppendInstructionForAllocateOrDeallocateCallFrame(fpToSpDistance, reg0, reg1, rty, false); + } else { + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + } + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + } else { + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + } + + if (cgFunc.GenCfi()) { + /* Append CFI restore */ + if (useFP) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(stackBaseReg, k64BitSize)); + } + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(RLR, k64BitSize)); + } +} + +void AArch64GenProEpilog::AppendInstructionDeallocateCallFrameDebug(AArch64reg reg0, AArch64reg reg1, RegType rty) +{ + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopPair]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + int32 stackFrameSize = + static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + int64 fpToSpDistance = cgFunc.GetMemlayout()->SizeOfArgsToStackPass() + cgFunc.GetFunction().GetFrameReseverdSlot(); + /* + * ldp/stp's imm should be within -512 and 504; + * if ldp's imm > 504, we fall back to the ldp-add version + */ + bool isLmbc = (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc); + if (cgFunc.HasVLAOrAlloca() || fpToSpDistance == 0 || isLmbc) { + int lmbcOffset = 0; + if (!isLmbc) { + stackFrameSize -= fpToSpDistance; + } else { + lmbcOffset = fpToSpDistance - (kDivide2 * k8ByteSize); + } + if (stackFrameSize > kStpLdpImm64UpperBound || isLmbc) { + Operand *o2; + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, (isLmbc ? lmbcOffset : 0), GetPointerSize() * kBitsPerByte); + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + if (cgFunc.GenCfi()) { + /* Append CFI restore */ + if (useFP) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(stackBaseReg, k64BitSize)); + } + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(RLR, k64BitSize)); + } + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + } else { + MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(stackFrameSize, GetPointerSize() * kBitsPerByte); + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + if (cgFunc.GenCfi()) { + if (useFP) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(stackBaseReg, k64BitSize)); + } + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(RLR, k64BitSize)); + } + } + } else { + Operand *o2; + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(fpToSpDistance), GetPointerSize() * kBitsPerByte); + if (fpToSpDistance > kStpLdpImm64UpperBound) { + (void)AppendInstructionForAllocateOrDeallocateCallFrame(fpToSpDistance, reg0, reg1, rty, false); + } else { + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + } + + if (cgFunc.GenCfi()) { + if (useFP) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(stackBaseReg, k64BitSize)); + } + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiRestoreInsn(RLR, k64BitSize)); + } + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + } +} + +void AArch64GenProEpilog::GeneratePopRegs() +{ + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + + const MapleVector ®sToRestore = + (!CGOptions::DoRegSavesOpt()) ? aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + + CHECK_FATAL(!regsToRestore.empty(), "FP/LR not added to callee-saved list?"); + + AArch64reg intRegFirstHalf = kRinvalid; + AArch64reg fpRegFirstHalf = kRinvalid; + + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("restore callee-saved registers")); + } + + MapleVector::const_iterator it = regsToRestore.begin(); + /* + * Even if we don't use FP, since we push a pair of registers + * in a single instruction (i.e., stp) and the stack needs be aligned + * on a 16-byte boundary, push FP as well if the function has a call. + * Make sure this is reflected when computing calleeSavedRegs.size() + * skip the first two registers + */ + CHECK_FATAL(*it == RFP, "The first callee saved reg is expected to be RFP"); + ++it; + CHECK_FATAL(*it == RLR, "The second callee saved reg is expected to be RLR"); + ++it; + + AArch64MemLayout *memLayout = static_cast(cgFunc.GetMemlayout()); + int32 offset; + if (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + offset = static_cast((memLayout->RealStackFrameSize() - aarchCGFunc.SizeOfCalleeSaved()) - + memLayout->GetSizeOfLocals()); + } else { + offset = (static_cast(cgFunc.GetMemlayout())->RealStackFrameSize() - + (aarchCGFunc.SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen))) - /* for FP/LR */ + memLayout->SizeOfArgsToStackPass() - + cgFunc.GetFunction().GetFrameReseverdSlot(); + } + + if (cgFunc.GetCG()->IsStackProtectorStrong() || cgFunc.GetCG()->IsStackProtectorAll()) { + offset -= kAarch64StackPtrAlignment; + } + + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + /* GR/VR save areas are above the callee save area */ + AArch64MemLayout *ml = static_cast(cgFunc.GetMemlayout()); + auto saveareasize = static_cast(RoundUp(ml->GetSizeOfGRSaveArea(), GetPointerSize() * k2BitSize) + + RoundUp(ml->GetSizeOfVRSaveArea(), GetPointerSize() * k2BitSize)); + offset -= saveareasize; + } + + /* + * We are using a cleared dummy block; so insertPoint cannot be ret; + * see GenerateEpilog() + */ + for (; it != regsToRestore.end(); ++it) { + AArch64reg reg = *it; + CHECK_FATAL(reg != RFP, "stray RFP in callee_saved_list?"); + CHECK_FATAL(reg != RLR, "stray RLR in callee_saved_list?"); + + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64reg &firstHalf = AArch64isa::IsGPRegister(reg) ? intRegFirstHalf : fpRegFirstHalf; + if (firstHalf == kRinvalid) { + /* remember it */ + firstHalf = reg; + } else { + /* flush the pair */ + AppendInstructionPopPair(cgFunc, firstHalf, reg, regType, offset); + GetNextOffsetCalleeSaved(offset); + firstHalf = kRinvalid; + } + } + + if (intRegFirstHalf != kRinvalid) { + AppendInstructionPopSingle(cgFunc, intRegFirstHalf, kRegTyInt, offset); + GetNextOffsetCalleeSaved(offset); + } + + if (fpRegFirstHalf != kRinvalid) { + AppendInstructionPopSingle(cgFunc, fpRegFirstHalf, kRegTyFloat, offset); + GetNextOffsetCalleeSaved(offset); + } + + if (!currCG->GenerateDebugFriendlyCode()) { + AppendInstructionDeallocateCallFrame(R29, RLR, kRegTyInt); + } else { + AppendInstructionDeallocateCallFrameDebug(R29, RLR, kRegTyInt); + } + + if (cgFunc.GenCfi()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiDefCfaInsn(RSP, 0, k64BitSize)); + } + /* + * in case we split stp/ldp instructions, + * so that we generate a load-into-base-register instruction + * for the next function, maybe? (seems not necessary, but...) + */ + aarchCGFunc.SetSplitBaseOffset(0); +} + +void AArch64GenProEpilog::AppendJump(const MIRSymbol &funcSymbol) +{ + auto &aarchCGFunc = static_cast(cgFunc); + Operand &targetOpnd = aarchCGFunc.GetOrCreateFuncNameOpnd(funcSymbol); + cgFunc.GetCurBB()->AppendInsn(cgFunc.GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); +} + +void AArch64GenProEpilog::GenerateEpilog(BB &bb) +{ + if (!cgFunc.GetHasProEpilogue()) { + if (bb.GetPreds().empty() || !TestPredsOfRetBB(bb)) { + GenerateRet(bb); + } + return; + } + + /* generate stack protected instruction */ + BB &epilogBB = GenStackGuardCheckInsn(bb); + + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + BB *formerCurBB = cgFunc.GetCurBB(); + aarchCGFunc.GetDummyBB()->ClearInsns(); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(true); + cgFunc.SetCurBB(*aarchCGFunc.GetDummyBB()); + + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &fpOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); + + if (cgFunc.HasVLAOrAlloca() && cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + aarchCGFunc.SelectCopy(spOpnd, PTY_u64, fpOpnd, PTY_u64); + } + + /* Hack: exit bb should always be reachable, since we need its existance for ".cfi_remember_state" */ + if (&epilogBB != cgFunc.GetLastBB() && epilogBB.GetNext() != nullptr) { + BB *nextBB = epilogBB.GetNext(); + do { + if (nextBB == cgFunc.GetLastBB() || !nextBB->IsEmpty()) { + break; + } + nextBB = nextBB->GetNext(); + } while (nextBB != nullptr); + if (nextBB != nullptr && !nextBB->IsEmpty() && cgFunc.GenCfi()) { + cgFunc.GetCurBB()->AppendInsn(cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_remember_state)); + cgFunc.GetCurBB()->SetHasCfi(); + nextBB->InsertInsnBefore(*nextBB->GetFirstInsn(), + cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_restore_state)); + nextBB->SetHasCfi(); + } + } + + const MapleVector ®sToSave = + (!CGOptions::DoRegSavesOpt()) ? aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + if (!regsToSave.empty()) { + GeneratePopRegs(); + } else { + auto stackFrameSize = static_cast(cgFunc.GetMemlayout())->RealStackFrameSize(); + if (stackFrameSize > 0) { + if (currCG->GenerateVerboseCG()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCommentInsn("pop up activation frame")); + } + + if (cgFunc.HasVLAOrAlloca()) { + auto size = static_cast(cgFunc.GetMemlayout())->GetSegArgsToStkPass().GetSize(); + stackFrameSize = stackFrameSize < size ? 0 : stackFrameSize - size; + } + + if (stackFrameSize > 0) { + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + if (cgFunc.GenCfi()) { + cgFunc.GetCurBB()->AppendInsn(aarchCGFunc.CreateCfiDefCfaInsn(RSP, 0, k64BitSize)); + } + } + } + } + + if (currCG->InstrumentWithDebugTraceCall()) { + AppendJump(*(currCG->GetDebugTraceExitFunction())); + } + + GenerateRet(*(cgFunc.GetCurBB())); + epilogBB.AppendBBInsns(*cgFunc.GetCurBB()); + if (cgFunc.GetCurBB()->GetHasCfi()) { + epilogBB.SetHasCfi(); + } + + cgFunc.SetCurBB(*formerCurBB); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(false); +} + +void AArch64GenProEpilog::GenerateEpilogForCleanup(BB &bb) +{ + auto &aarchCGFunc = static_cast(cgFunc); + CHECK_FATAL(!cgFunc.GetExitBBsVec().empty(), "exit bb size is zero!"); + if (cgFunc.GetExitBB(0)->IsUnreachable()) { + /* if exitbb is unreachable then exitbb can not be generated */ + GenerateEpilog(bb); + } else if (aarchCGFunc.NeedCleanup()) { /* bl to the exit epilogue */ + LabelOperand &targetOpnd = aarchCGFunc.GetOrCreateLabelOperand(cgFunc.GetExitBB(0)->GetLabIdx()); + bb.AppendInsn(cgFunc.GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); + } +} + +void AArch64GenProEpilog::ConvertToTailCalls(MapleSet &callInsnsMap) +{ + BB *exitBB = GetCurTailcallExitBB(); + + /* ExitBB is filled only by now. If exitBB has restore of SP indicating extra stack space has + been allocated, such as a function call with more than 8 args, argument with large aggr etc */ + FOR_BB_INSNS(insn, exitBB) { + if (insn->GetMachineOpcode() == MOP_xaddrri12 || insn->GetMachineOpcode() == MOP_xaddrri24) { + RegOperand ® = static_cast(insn->GetOperand(0)); + if (reg.GetRegisterNumber() == RSP) { + return; + } + } + } + + /* Replace all of the call insns. */ + for (Insn *callInsn : callInsnsMap) { + MOperator insnMop = callInsn->GetMachineOpcode(); + switch (insnMop) { + case MOP_xbl: { + callInsn->SetMOP(AArch64CG::kMd[MOP_tail_call_opt_xbl]); + break; + } + case MOP_xblr: { + callInsn->SetMOP(AArch64CG::kMd[MOP_tail_call_opt_xblr]); + break; + } + default: + CHECK_FATAL(false, "Internal error."); + break; + } + BB *bb = callInsn->GetBB(); + if (bb->GetKind() == BB::kBBGoto) { + bb->SetKind(BB::kBBFallthru); + if (bb->GetLastInsn()->GetMachineOpcode() == MOP_xuncond) { + bb->RemoveInsn(*bb->GetLastInsn()); + } + } + for (auto sBB : bb->GetSuccs()) { + bb->RemoveSuccs(*sBB); + sBB->RemovePreds(*bb); + break; + } + } + + /* copy instrs from exit block */ + for (Insn *callInsn : callInsnsMap) { + BB *toBB = callInsn->GetBB(); + BB *fromBB = exitBB; + if (toBB == fromBB) { + /* callsite also in the return exit block, just change the return to branch */ + Insn *lastInsn = toBB->GetLastInsn(); + if (lastInsn->GetMachineOpcode() == MOP_xret) { + Insn *newInsn = cgFunc.GetTheCFG()->CloneInsn(*callInsn); + toBB->ReplaceInsn(*lastInsn, *newInsn); + for (Insn *insn = callInsn->GetNextMachineInsn(); insn != newInsn; insn = insn->GetNextMachineInsn()) { + insn->SetDoNotRemove(true); + } + toBB->RemoveInsn(*callInsn); + return; + } + CHECK_FATAL(0, "Tailcall in incorrect block"); + } + FOR_BB_INSNS_SAFE(insn, fromBB, next) { + if (insn->IsCfiInsn() || (insn->IsMachineInstruction() && insn->GetMachineOpcode() != MOP_xret)) { + Insn *newInsn = cgFunc.GetTheCFG()->CloneInsn(*insn); + newInsn->SetDoNotRemove(true); + toBB->InsertInsnBefore(*callInsn, *newInsn); + } + } + } + + /* remove instrs in exit block */ + BB *bb = exitBB; + if (bb->GetPreds().size() > 0) { + return; /* exit block still needed by other non-tailcall blocks */ + } + Insn &junk = cgFunc.GetInsnBuilder()->BuildInsn(MOP_pseudo_none); + bb->AppendInsn(junk); + FOR_BB_INSNS_SAFE(insn, bb, next) { + if (insn->GetMachineOpcode() != MOP_pseudo_none) { + bb->RemoveInsn(*insn); + } + } +} + +void AArch64GenProEpilog::Run() +{ + CHECK_FATAL(cgFunc.GetFunction().GetBody()->GetFirst()->GetOpCode() == OP_label, + "The first statement should be a label"); + NeedStackProtect(); + cgFunc.SetHasProEpilogue(NeedProEpilog()); + if (cgFunc.GetHasProEpilogue()) { + GenStackGuard(*(cgFunc.GetFirstBB())); + } + BB *proLog = nullptr; + if (cgFunc.GetCG()->DoPrologueEpilogue() && Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel2) { + /* There are some O2 dependent assumptions made */ + proLog = IsolateFastPath(*(cgFunc.GetFirstBB())); + } + + if (cgFunc.IsExitBBsVecEmpty()) { + if (cgFunc.GetLastBB()->GetPrev()->GetFirstStmt() == cgFunc.GetCleanupLabel() && + cgFunc.GetLastBB()->GetPrev()->GetPrev()) { + cgFunc.PushBackExitBBsVec(*cgFunc.GetLastBB()->GetPrev()->GetPrev()); + } else { + cgFunc.PushBackExitBBsVec(*cgFunc.GetLastBB()->GetPrev()); + } + } + + if (proLog != nullptr) { + GenerateProlog(*proLog); + proLog->SetFastPath(true); + cgFunc.GetFirstBB()->SetFastPath(true); + } else { + GenerateProlog(*(cgFunc.GetFirstBB())); + } + + for (auto *exitBB : cgFunc.GetExitBBsVec()) { + if (GetFastPathReturnBB() != exitBB) { + GenerateEpilog(*exitBB); + } + } + + if (cgFunc.GetFunction().IsJava()) { + GenerateEpilogForCleanup(*(cgFunc.GetCleanupBB())); + } + + if (cgFunc.GetMirModule().IsCModule() && !exitBB2CallSitesMap.empty()) { + cgFunc.GetTheCFG()->InitInsnVisitor(cgFunc); + for (auto pair : exitBB2CallSitesMap) { + BB *curExitBB = pair.first; + MapleSet &callInsnsMap = pair.second; + SetCurTailcallExitBB(curExitBB); + ConvertToTailCalls(callInsnsMap); + } + } +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_prop.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_prop.cpp new file mode 100644 index 0000000000000000000000000000000000000000..59777a7986480ba6cf5af40fcd69ffaf17b1502d --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_prop.cpp @@ -0,0 +1,2506 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_prop.h" +#include "aarch64_isa.h" +#include "aarch64_cg.h" +#include "aarch64_reg_coalesce.h" +#include + +namespace maplebe { + +#define PROP_DUMP CG_DEBUG_FUNC(cgFunc) + +bool MayOverflow(const ImmOperand &value1, const ImmOperand &value2, bool is64Bit, bool isAdd, bool isSigned) +{ + if (value1.GetVary() || value2.GetVary()) { + return false; + } + int64 cstA = value1.GetValue(); + int64 cstB = value2.GetValue(); + if (isAdd) { + int64 res = static_cast(static_cast(cstA) + static_cast(cstB)); + if (!isSigned) { + return static_cast(res) < static_cast(cstA); + } + auto rightShiftNumToGetSignFlag = (is64Bit ? 64 : 32) - 1; + return (static_cast(res) >> rightShiftNumToGetSignFlag != + static_cast(cstA) >> rightShiftNumToGetSignFlag) && + (static_cast(res) >> rightShiftNumToGetSignFlag != + static_cast(cstB) >> rightShiftNumToGetSignFlag); + } else { + /* sub */ + if (!isSigned) { + return cstA < cstB; + } + int64 res = static_cast(static_cast(cstA) - static_cast(cstB)); + auto rightShiftNumToGetSignFlag = (is64Bit ? 64 : 32) - 1; + return (static_cast(cstA) >> rightShiftNumToGetSignFlag != + static_cast(cstB) >> rightShiftNumToGetSignFlag) && + (static_cast(res) >> rightShiftNumToGetSignFlag != + static_cast(cstA) >> rightShiftNumToGetSignFlag); + } +} + +bool AArch64Prop::IsInLimitCopyRange(VRegVersion *toBeReplaced) +{ + uint32 baseID = toBeReplaced->GetDefInsnInfo()->GetInsn()->GetId(); + MapleUnorderedMap &useList = toBeReplaced->GetAllUseInsns(); + for (auto it : useList) { + if (it.second->GetInsn()->GetId() - baseID > k16BitSize) { + return false; + } + } + return true; +} + +void AArch64Prop::CopyProp() +{ + PropOptimizeManager optManager; + optManager.Optimize(*cgFunc, GetSSAInfo(), GetRegll()); + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); +} + +void AArch64Prop::TargetProp(Insn &insn) +{ + A64ConstProp a64ConstProp(*memPool, *cgFunc, *GetSSAInfo(), insn); + a64ConstProp.DoOpt(); + A64StrLdrProp a64StrLdrProp(*memPool, *cgFunc, *GetSSAInfo(), insn, *GetDce()); + a64StrLdrProp.DoOpt(); +} + +void A64ConstProp::DoOpt() +{ + if (curInsn->GetMachineOpcode() == MOP_wmovri32 || curInsn->GetMachineOpcode() == MOP_xmovri64) { + Operand &destOpnd = curInsn->GetOperand(kInsnFirstOpnd); + CHECK_FATAL(destOpnd.IsRegister(), "must be reg operand"); + auto &destReg = static_cast(destOpnd); + if (destReg.IsSSAForm()) { + VRegVersion *destVersion = ssaInfo->FindSSAVersion(destReg.GetRegisterNumber()); + DEBUG_ASSERT(destVersion != nullptr, "find Version failed"); + Operand &constOpnd = curInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(constOpnd.IsImmediate(), "must be imm operand"); + auto &immOperand = static_cast(constOpnd); + bool isZero = immOperand.IsZero(); + for (auto useDUInfoIt : destVersion->GetAllUseInsns()) { + if (isZero) { + ZeroRegProp(*useDUInfoIt.second, *destVersion->GetSSAvRegOpnd()); + destVersion->CheckDeadUse(*useDUInfoIt.second->GetInsn()); + } + (void)ConstProp(*useDUInfoIt.second, immOperand); + } + } + } +} + +void A64ConstProp::ZeroRegProp(DUInsnInfo &useDUInfo, RegOperand &toReplaceReg) +{ + auto *useInsn = useDUInfo.GetInsn(); + const InsnDesc *md = &AArch64CG::kMd[(useInsn->GetMachineOpcode())]; + /* special case */ + bool isSpecficCase = useInsn->GetMachineOpcode() == MOP_wbfirri5i5 || useInsn->GetMachineOpcode() == MOP_xbfirri6i6; + isSpecficCase &= + (useDUInfo.GetOperands().size() == 1) && (useDUInfo.GetOperands().begin()->first == kInsnSecondOpnd); + if (useInsn->IsStore() || md->IsCondDef() || isSpecficCase) { + RegOperand &zeroOpnd = cgFunc->GetZeroOpnd(toReplaceReg.GetSize()); + for (auto &opndIt : useDUInfo.GetOperands()) { + if (useInsn->IsStore() && opndIt.first != 0) { + return; + } + Operand &opnd = useInsn->GetOperand(opndIt.first); + A64ReplaceRegOpndVisitor replaceRegOpndVisitor(*cgFunc, *useInsn, opndIt.first, toReplaceReg, zeroOpnd); + opnd.Accept(replaceRegOpndVisitor); + useDUInfo.ClearDU(opndIt.first); + } + } +} + +MOperator A64ConstProp::GetReversalMOP(MOperator arithMop) +{ + switch (arithMop) { + case MOP_waddrri12: + return MOP_wsubrri12; + case MOP_xaddrri12: + return MOP_xsubrri12; + case MOP_xsubrri12: + return MOP_xaddrri12; + case MOP_wsubrri12: + return MOP_waddrri12; + default: + CHECK_FATAL(false, "NYI"); + break; + } + return MOP_undef; +} + +MOperator A64ConstProp::GetRegImmMOP(MOperator regregMop, bool withLeftShift) +{ + switch (regregMop) { + case MOP_xaddrrrs: + case MOP_xaddrrr: { + return withLeftShift ? MOP_xaddrri24 : MOP_xaddrri12; + } + case MOP_waddrrrs: + case MOP_waddrrr: { + return withLeftShift ? MOP_waddrri24 : MOP_waddrri12; + } + case MOP_xsubrrrs: + case MOP_xsubrrr: { + return withLeftShift ? MOP_xsubrri24 : MOP_xsubrri12; + } + case MOP_wsubrrrs: + case MOP_wsubrrr: { + return withLeftShift ? MOP_wsubrri24 : MOP_wsubrri12; + } + case MOP_xandrrrs: + return MOP_xandrri13; + case MOP_wandrrrs: + return MOP_wandrri12; + case MOP_xeorrrrs: + return MOP_xeorrri13; + case MOP_weorrrrs: + return MOP_weorrri12; + case MOP_xiorrrrs: + case MOP_xbfirri6i6: + return MOP_xiorrri13; + case MOP_wiorrrrs: + case MOP_wbfirri5i5: + return MOP_wiorrri12; + case MOP_xmovrr: { + return MOP_xmovri64; + } + case MOP_wmovrr: { + return MOP_wmovri32; + } + default: + CHECK_FATAL(false, "NYI"); + break; + } + return MOP_undef; +} + +MOperator A64ConstProp::GetFoldMopAndVal(int64 &newVal, int64 constVal, const Insn &arithInsn) +{ + MOperator arithMop = arithInsn.GetMachineOpcode(); + MOperator newMop = MOP_undef; + switch (arithMop) { + case MOP_waddrrr: + case MOP_xaddrrr: { + newVal = constVal + constVal; + newMop = (arithMop == MOP_waddrrr) ? MOP_wmovri32 : MOP_xmovri64; + break; + } + case MOP_waddrrrs: + case MOP_xaddrrrs: { + auto &shiftOpnd = static_cast(arithInsn.GetOperand(kInsnFourthOpnd)); + uint32 amount = shiftOpnd.GetShiftAmount(); + BitShiftOperand::ShiftOp sOp = shiftOpnd.GetShiftOp(); + switch (sOp) { + case BitShiftOperand::kLSL: { + newVal = constVal + static_cast((static_cast(constVal) << amount)); + break; + } + case BitShiftOperand::kLSR: { + newVal = constVal + (static_cast(constVal) >> amount); + break; + } + case BitShiftOperand::kASR: { + newVal = constVal + (constVal >> amount); + break; + } + default: + CHECK_FATAL(false, "NYI"); + break; + } + newMop = (arithMop == MOP_waddrrrs) ? MOP_wmovri32 : MOP_xmovri64; + break; + } + case MOP_wsubrrr: + case MOP_xsubrrr: { + newVal = 0; + newMop = (arithMop == MOP_wsubrrr) ? MOP_wmovri32 : MOP_xmovri64; + break; + } + case MOP_wsubrrrs: + case MOP_xsubrrrs: { + auto &shiftOpnd = static_cast(arithInsn.GetOperand(kInsnFourthOpnd)); + uint32 amount = shiftOpnd.GetShiftAmount(); + BitShiftOperand::ShiftOp sOp = shiftOpnd.GetShiftOp(); + switch (sOp) { + case BitShiftOperand::kLSL: { + newVal = constVal - static_cast((static_cast(constVal) << amount)); + break; + } + case BitShiftOperand::kLSR: { + newVal = constVal - (static_cast(constVal) >> amount); + break; + } + case BitShiftOperand::kASR: { + newVal = constVal - (constVal >> amount); + break; + } + default: + CHECK_FATAL(false, "NYI"); + break; + } + newMop = (arithMop == MOP_wsubrrrs) ? MOP_wmovri32 : MOP_xmovri64; + break; + } + default: + DEBUG_ASSERT(false, "this case is not supported currently"); + break; + } + return newMop; +} + +void A64ConstProp::ReplaceInsnAndUpdateSSA(Insn &oriInsn, Insn &newInsn) const +{ + ssaInfo->ReplaceInsn(oriInsn, newInsn); + oriInsn.GetBB()->ReplaceInsn(oriInsn, newInsn); + /* dump insn replacement here */ +} + +bool A64ConstProp::MovConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) +{ + Insn *useInsn = useDUInfo.GetInsn(); + MOperator curMop = useInsn->GetMachineOpcode(); + if (useDUInfo.GetOperands().size() == 1) { + MOperator newMop = GetRegImmMOP(curMop, false); + Operand &destOpnd = useInsn->GetOperand(kInsnFirstOpnd); + if (constOpnd.IsSingleInstructionMovable(destOpnd.GetSize())) { + auto useOpndInfoIt = useDUInfo.GetOperands().begin(); + uint32 useOpndIdx = useOpndInfoIt->first; + DEBUG_ASSERT(useOpndIdx == kInsnSecondOpnd, "invalid instruction in ssa form"); + if (useOpndIdx == kInsnSecondOpnd) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, destOpnd, constOpnd); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } + } + } else { + DEBUG_ASSERT(false, "invalid instruction in ssa form"); + } + return false; +} + +/* support add now */ +bool A64ConstProp::ArithmeticConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd, ArithmeticType aT) +{ + Insn *useInsn = useDUInfo.GetInsn(); + MOperator curMop = useInsn->GetMachineOpcode(); + if (useDUInfo.GetOperands().size() == 1) { + MOperator newMop = GetRegImmMOP(curMop, false); + auto useOpndInfoIt = useDUInfo.GetOperands().begin(); + uint32 useOpndIdx = useOpndInfoIt->first; + CHECK_FATAL(useOpndIdx == kInsnSecondOpnd || useOpndIdx == kInsnThirdOpnd, "check this insn"); + Insn *newInsn = nullptr; + if (static_cast(cgFunc)->IsOperandImmValid(newMop, &constOpnd, kInsnThirdOpnd)) { + if (useOpndIdx == kInsnThirdOpnd) { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, useInsn->GetOperand(kInsnFirstOpnd), + useInsn->GetOperand(kInsnSecondOpnd), constOpnd); + } else if (useOpndIdx == kInsnSecondOpnd && aT == kAArch64Add) { /* swap operand due to legality in aarch */ + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, useInsn->GetOperand(kInsnFirstOpnd), + useInsn->GetOperand(kInsnThirdOpnd), constOpnd); + } + } + /* try aggressive opt in aarch64 add and sub */ + if (newInsn == nullptr && (aT == kAArch64Add || aT == kAArch64Sub)) { + auto *tempImm = static_cast(constOpnd.Clone(*constPropMp)); + /* try aarch64 imm shift mode */ + tempImm->SetValue(tempImm->GetValue() >> 12); + if (static_cast(cgFunc)->IsOperandImmValid(newMop, tempImm, kInsnThirdOpnd) && + CGOptions::GetInstance().GetOptimizeLevel() < CGOptions::kLevel0) { + DEBUG_ASSERT(false, "NIY"); + } + auto *zeroImm = &(static_cast(cgFunc)->CreateImmOperand(0, constOpnd.GetSize(), true)); + /* value in immOpnd is signed */ + if (MayOverflow(*zeroImm, constOpnd, constOpnd.GetSize() == 64, false, true)) { + return false; + } + /* (constA - var) can not reversal to (var + (-constA)) */ + if (useOpndIdx == kInsnSecondOpnd && aT == kAArch64Sub) { + return false; + } + /* Addition and subtraction reversal */ + tempImm->SetValue(-constOpnd.GetValue()); + newMop = GetReversalMOP(newMop); + if (static_cast(cgFunc)->IsOperandImmValid(newMop, tempImm, kInsnThirdOpnd)) { + auto *cgImm = static_cast(tempImm->Clone(*cgFunc->GetMemoryPool())); + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, useInsn->GetOperand(kInsnFirstOpnd), + useInsn->GetOperand(kInsnSecondOpnd), *cgImm); + if (useOpndIdx == kInsnSecondOpnd) { /* swap operand due to legality in aarch */ + newInsn->SetOperand(kInsnSecondOpnd, useInsn->GetOperand(kInsnThirdOpnd)); + } + } + } + if (newInsn != nullptr) { + ReplaceInsnAndUpdateSSA(*useInsn, *newInsn); + return true; + } + } else if (useDUInfo.GetOperands().size() == 2) { + /* only support add & sub now */ + int64 newValue = 0; + MOperator newMop = GetFoldMopAndVal(newValue, constOpnd.GetValue(), *useInsn); + bool isSigned = (newValue < 0); + auto *tempImm = static_cast(constOpnd.Clone(*constPropMp)); + tempImm->SetValue(newValue); + tempImm->SetSigned(isSigned); + if (tempImm->IsSingleInstructionMovable()) { + auto *newImmOpnd = static_cast(tempImm->Clone(*cgFunc->GetMemoryPool())); + auto &newInsn = + cgFunc->GetInsnBuilder()->BuildInsn(newMop, useInsn->GetOperand(kInsnFirstOpnd), *newImmOpnd); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } else { + CHECK_FATAL(false, "invalid immediate"); + } + } else { + DEBUG_ASSERT(false, "invalid instruction in ssa form"); + } + return false; +} + +bool A64ConstProp::ArithmeticConstFold(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd, ArithmeticType aT) +{ + Insn *useInsn = useDUInfo.GetInsn(); + if (useDUInfo.GetOperands().size() == 1) { + Operand &existedImm = useInsn->GetOperand(kInsnThirdOpnd); + DEBUG_ASSERT(existedImm.IsImmediate(), "must be"); + Operand &destOpnd = useInsn->GetOperand(kInsnFirstOpnd); + bool is64Bit = destOpnd.GetSize() == k64BitSize; + ImmOperand *foldConst = CanDoConstFold(constOpnd, static_cast(existedImm), aT, is64Bit); + if (foldConst != nullptr) { + MOperator newMop = is64Bit ? MOP_xmovri64 : MOP_wmovri32; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, destOpnd, *foldConst); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } + } + return false; +} + +bool A64ConstProp::ShiftConstReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd) +{ + Insn *useInsn = useDUInfo.GetInsn(); + MOperator curMop = useInsn->GetMachineOpcode(); + if (useDUInfo.GetOperands().size() == 1) { + auto useOpndInfoIt = useDUInfo.GetOperands().begin(); + uint32 useOpndIdx = useOpndInfoIt->first; + if (useOpndIdx == kInsnThirdOpnd) { + auto &shiftBit = static_cast(useInsn->GetOperand(kInsnFourthOpnd)); + int64 val = constOpnd.GetValue(); + if (shiftBit.GetShiftOp() == BitShiftOperand::kLSL) { + val = val << shiftBit.GetShiftAmount(); + } else if (shiftBit.GetShiftOp() == BitShiftOperand::kLSR) { + val = val >> shiftBit.GetShiftAmount(); + } else if (shiftBit.GetShiftOp() == BitShiftOperand::kASR) { + val = static_cast((static_cast(val)) >> shiftBit.GetShiftAmount()); + } else { + CHECK_FATAL(false, "shift type is not defined"); + } + auto *newImm = static_cast(constOpnd.Clone(*constPropMp)); + newImm->SetValue(val); + MOperator newMop = GetRegImmMOP(curMop, false); + if (static_cast(cgFunc)->IsOperandImmValid(newMop, newImm, kInsnThirdOpnd)) { + auto *cgNewImm = static_cast(constOpnd.Clone(*cgFunc->GetMemoryPool())); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, useInsn->GetOperand(kInsnFirstOpnd), + useInsn->GetOperand(kInsnSecondOpnd), *cgNewImm); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } + } + } + return false; +} + +bool A64ConstProp::ConstProp(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) +{ + MOperator curMop = useDUInfo.GetInsn()->GetMachineOpcode(); + switch (curMop) { + case MOP_xmovrr: + case MOP_wmovrr: { + return MovConstReplace(useDUInfo, constOpnd); + } + case MOP_xsubrrr: + case MOP_wsubrrr: { + return ArithmeticConstReplace(useDUInfo, constOpnd, kAArch64Sub); + } + case MOP_xaddrrr: + case MOP_waddrrr: { + return ArithmeticConstReplace(useDUInfo, constOpnd, kAArch64Add); + } + case MOP_waddrri12: + case MOP_xaddrri12: { + return ArithmeticConstFold(useDUInfo, constOpnd, kAArch64Add); + } + case MOP_xsubrri12: + case MOP_wsubrri12: { + return ArithmeticConstFold(useDUInfo, constOpnd, kAArch64Sub); + } + case MOP_xiorrrrs: + case MOP_wiorrrrs: + case MOP_xeorrrrs: + case MOP_weorrrrs: + case MOP_xandrrrs: + case MOP_wandrrrs: + case MOP_xaddrrrs: + case MOP_waddrrrs: + case MOP_wsubrrrs: + case MOP_xsubrrrs: { + return ShiftConstReplace(useDUInfo, constOpnd); + } + case MOP_wbfirri5i5: + case MOP_xbfirri6i6: { + return BitInsertReplace(useDUInfo, constOpnd); + } + default: + break; + } + return false; +} + +bool A64ConstProp::BitInsertReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd) +{ + Insn *useInsn = useDUInfo.GetInsn(); + MOperator curMop = useInsn->GetMachineOpcode(); + if (useDUInfo.GetOperands().size() == 1) { + auto useOpndInfoIt = useDUInfo.GetOperands().begin(); + uint32 useOpndIdx = useOpndInfoIt->first; + if (useOpndIdx == kInsnSecondOpnd) { + auto &lsbOpnd = static_cast(useInsn->GetOperand(kInsnThirdOpnd)); + auto &widthOpnd = static_cast(useInsn->GetOperand(kInsnFourthOpnd)); + auto val = static_cast(constOpnd.GetValue()); + /* bfi width in the range [1 -64] */ + auto width = static_cast(widthOpnd.GetValue()); + /* bit number of the lsb of the destination bitfield */ + auto lsb = static_cast(lsbOpnd.GetValue()); + val = val & ((1U << width) - 1U); + if (__builtin_popcountl(val) == width) { + val = val << lsb; + MOperator newMop = GetRegImmMOP(curMop, false); + Operand &newOpnd = cgFunc->CreateImmOperand(PTY_i64, static_cast(val)); + if (static_cast(cgFunc)->IsOperandImmValid(newMop, &newOpnd, kInsnThirdOpnd)) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, useInsn->GetOperand(kInsnFirstOpnd), + useInsn->GetOperand(kInsnFirstOpnd), newOpnd); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } + } + } + } + return false; +} + +ImmOperand *A64ConstProp::CanDoConstFold(const ImmOperand &value1, const ImmOperand &value2, ArithmeticType aT, + bool is64Bit) +{ + auto *tempImm = static_cast(value1.Clone(*constPropMp)); + int64 newVal = 0; + bool isSigned = value1.IsSignedValue(); + if (value1.IsSignedValue() != value2.IsSignedValue()) { + isSigned = false; + } + if (MayOverflow(value1, value2, is64Bit, aT == kAArch64Add, isSigned)) { + return nullptr; + } + switch (aT) { + case kAArch64Add: { + newVal = value1.GetValue() + value2.GetValue(); + break; + } + case kAArch64Sub: { + newVal = value1.GetValue() - value2.GetValue(); + break; + } + default: + return nullptr; + } + if (!is64Bit && isSigned && (newVal > INT_MAX || newVal < INT_MIN)) { + return nullptr; + } + if (!is64Bit && !isSigned && (newVal > UINT_MAX || newVal < 0)) { + return nullptr; + } + if (newVal < 0) { + tempImm->SetSigned(); + } + tempImm->SetValue(newVal); + if (value2.GetVary() == kUnAdjustVary) { + tempImm->SetVary(kUnAdjustVary); + } + bool canBeMove = tempImm->IsSingleInstructionMovable(k64BitSize); + return canBeMove ? static_cast(tempImm->Clone(*cgFunc->GetMemoryPool())) : nullptr; +} + +void A64StrLdrProp::DoOpt() +{ + DEBUG_ASSERT(curInsn != nullptr, "not input insn"); + bool tryOptAgain = false; + do { + tryOptAgain = false; + MemOperand *currMemOpnd = StrLdrPropPreCheck(*curInsn); + if (currMemOpnd != nullptr && memPropMode != kUndef) { + /* can be changed to recursive propagation */ + if (ReplaceMemOpnd(*currMemOpnd, nullptr)) { + tryOptAgain = true; + } + replaceVersions.clear(); + } + } while (tryOptAgain); +} + +bool A64StrLdrProp::ReplaceMemOpnd(const MemOperand &currMemOpnd, const Insn *defInsn) +{ + auto GetDefInsn = [&defInsn, this](const RegOperand ®Opnd, std::vector &allUseInsns) -> void { + if (regOpnd.IsSSAForm() && defInsn == nullptr) { + VRegVersion *replacedV = ssaInfo->FindSSAVersion(regOpnd.GetRegisterNumber()); + if (replacedV->GetDefInsnInfo() != nullptr) { + for (auto it : replacedV->GetAllUseInsns()) { + allUseInsns.emplace_back(it.second->GetInsn()); + } + defInsn = replacedV->GetDefInsnInfo()->GetInsn(); + } + } + }; + RegOperand *replacedReg = nullptr; + std::vector allUseInsns; + std::vector newMemOpnds; + bool doFullReplaceProp = true; /* due to register pressure, do not do partial prop */ + if (memPropMode == kPropBase) { + replacedReg = currMemOpnd.GetBaseRegister(); + } else { + Operand *offset = currMemOpnd.GetOffset(); + DEBUG_ASSERT(offset->IsRegister(), "must be"); + replacedReg = static_cast(offset); + } + CHECK_FATAL(replacedReg != nullptr, "check this insn"); + GetDefInsn(*replacedReg, allUseInsns); + if (defInsn != nullptr) { + for (auto useInsn : allUseInsns) { + MemOperand *oldMemOpnd = StrLdrPropPreCheck(*useInsn, memPropMode); + if (CheckSameReplace(*replacedReg, oldMemOpnd)) { + MemOperand *newMemOpnd = SelectReplaceMem(*defInsn, *oldMemOpnd); + if (newMemOpnd != nullptr) { + uint32 opndIdx = GetMemOpndIdx(oldMemOpnd, *useInsn); + if (CheckNewMemOffset(*useInsn, newMemOpnd, opndIdx)) { + newMemOpnds.emplace_back(newMemOpnd); + continue; + } + } + } + doFullReplaceProp = false; + break; + } + } else { + doFullReplaceProp = false; + } + if (doFullReplaceProp) { + for (size_t i = 0; i < newMemOpnds.size(); ++i) { + DoMemReplace(*replacedReg, *newMemOpnds[i], *allUseInsns[i]); + } + return true; + } + return false; +} + +bool A64StrLdrProp::CheckSameReplace(const RegOperand &replacedReg, const MemOperand *memOpnd) const +{ + if (memOpnd != nullptr && memPropMode != kUndef) { + if (memPropMode == kPropBase) { + return replacedReg.GetRegisterNumber() == memOpnd->GetBaseRegister()->GetRegisterNumber(); + } else { + Operand *offset = memOpnd->GetOffset(); + DEBUG_ASSERT(offset->IsRegister(), "must be"); + return replacedReg.GetRegisterNumber() == static_cast(offset)->GetRegisterNumber(); + } + } + return false; +} + +uint32 A64StrLdrProp::GetMemOpndIdx(MemOperand *newMemOpnd, const Insn &insn) const +{ + uint32 opndIdx = kInsnMaxOpnd; + if (insn.IsLoadPair() || insn.IsStorePair()) { + DEBUG_ASSERT(newMemOpnd->GetOffsetImmediate() != nullptr, "unexpect insn"); + opndIdx = kInsnThirdOpnd; + } else { + opndIdx = kInsnSecondOpnd; + } + return opndIdx; +} + +void A64StrLdrProp::DoMemReplace(const RegOperand &replacedReg, MemOperand &newMem, Insn &useInsn) +{ + VRegVersion *replacedV = ssaInfo->FindSSAVersion(replacedReg.GetRegisterNumber()); + DEBUG_ASSERT(replacedV != nullptr, "must in ssa form"); + uint32 opndIdx = GetMemOpndIdx(&newMem, useInsn); + replacedV->RemoveUseInsn(useInsn, opndIdx); + if (replacedV->GetAllUseInsns().empty()) { + (void)cgDce->RemoveUnuseDef(*replacedV); + } + for (auto &replaceit : replaceVersions) { + replaceit.second->AddUseInsn(*ssaInfo, useInsn, opndIdx); + } + useInsn.SetOperand(opndIdx, newMem); +} + +MemOperand *A64StrLdrProp::StrLdrPropPreCheck(const Insn &insn, MemPropMode prevMod) +{ + memPropMode = kUndef; + if (insn.IsLoad() || insn.IsStore()) { + if (insn.IsAtomic()) { + return nullptr; + } + auto *currMemOpnd = static_cast(insn.GetMemOpnd()); + if (currMemOpnd != nullptr) { + memPropMode = SelectStrLdrPropMode(*currMemOpnd); + if (prevMod != kUndef) { + if (prevMod != memPropMode) { + memPropMode = prevMod; + return nullptr; + } + } + return currMemOpnd; + } + } + return nullptr; +} + +MemPropMode A64StrLdrProp::SelectStrLdrPropMode(const MemOperand &currMemOpnd) +{ + MemOperand::AArch64AddressingMode currAddrMode = currMemOpnd.GetAddrMode(); + MemPropMode innerMemPropMode = kUndef; + switch (currAddrMode) { + case MemOperand::kAddrModeBOi: { + if (!currMemOpnd.IsPreIndexed() && !currMemOpnd.IsPostIndexed()) { + innerMemPropMode = kPropBase; + } + break; + } + case MemOperand::kAddrModeBOrX: { + innerMemPropMode = kPropOffset; + auto amount = currMemOpnd.ShiftAmount(); + if (currMemOpnd.GetExtendAsString() == "LSL") { + if (amount != 0) { + innerMemPropMode = kPropShift; + } + break; + } else if (currMemOpnd.SignedExtend()) { + innerMemPropMode = kPropSignedExtend; + } else if (currMemOpnd.UnsignedExtend()) { + innerMemPropMode = kPropUnsignedExtend; + } + break; + } + default: + innerMemPropMode = kUndef; + } + return innerMemPropMode; +} + +MemOperand *A64StrLdrProp::SelectReplaceMem(const Insn &defInsn, const MemOperand &currMemOpnd) +{ + MemOperand *newMemOpnd = nullptr; + Operand *offset = currMemOpnd.GetOffset(); + RegOperand *base = currMemOpnd.GetBaseRegister(); + MOperator opCode = defInsn.GetMachineOpcode(); + switch (opCode) { + case MOP_xsubrri12: + case MOP_wsubrri12: { + RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + int64 defVal = -(immOpnd.GetValue()); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); + } + break; + } + case MOP_xaddrri12: + case MOP_waddrri12: { + RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + int64 defVal = immOpnd.GetValue(); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); + } + break; + } + case MOP_xaddrri24: + case MOP_waddrri24: { + RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + auto &shiftOpnd = static_cast(defInsn.GetOperand(kInsnFourthOpnd)); + CHECK_FATAL(shiftOpnd.GetShiftAmount() == 12, "invalid shiftAmount"); + int64 defVal = (immOpnd.GetValue() << shiftOpnd.GetShiftAmount()); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); + } + break; + } + case MOP_xsubrri24: + case MOP_wsubrri24: { + RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + auto &shiftOpnd = static_cast(defInsn.GetOperand(kInsnFourthOpnd)); + CHECK_FATAL(shiftOpnd.GetShiftAmount() == 12, "invalid shiftAmount"); + int64 defVal = -(immOpnd.GetValue() << shiftOpnd.GetShiftAmount()); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); + } + break; + } + case MOP_xaddrrr: + case MOP_waddrrr: + case MOP_dadd: + case MOP_sadd: { + if (memPropMode == kPropBase) { + auto *ofstOpnd = static_cast(offset); + if (!ofstOpnd->IsZero()) { + break; + } + + RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + RegOperand *newOfst = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnThirdOpnd))); + + if (replace != nullptr && newOfst != nullptr) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *replace, newOfst, nullptr, nullptr); + } + } + break; + } + case MOP_xaddrrrs: + case MOP_waddrrrs: { + if (memPropMode == kPropBase) { + auto *ofstOpnd = static_cast(offset); + if (!ofstOpnd->IsZero()) { + break; + } + RegOperand *newBaseOpnd = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + RegOperand *newIndexOpnd = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnThirdOpnd))); + auto &shift = static_cast(defInsn.GetOperand(kInsnFourthOpnd)); + if (shift.GetShiftOp() != BitShiftOperand::kLSL) { + break; + } + if (newBaseOpnd != nullptr && newIndexOpnd != nullptr) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *newBaseOpnd, *newIndexOpnd, + shift.GetShiftAmount(), false); + } + } + break; + } + case MOP_xadrpl12: { + if (memPropMode == kPropBase) { + if (currMemOpnd.GetSize() >= 128) { + // We can not be sure that the page offset is 16-byte aligned + break; + } + auto *ofstOpnd = static_cast(offset); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffset is null!"); + int64 val = ofstOpnd->GetValue(); + auto *offset1 = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + CHECK_FATAL(offset1 != nullptr, "offset1 is null!"); + val += offset1->GetOffset(); + OfstOperand *newOfsetOpnd = + &static_cast(cgFunc)->CreateOfstOpnd(static_cast(val), k32BitSize); + CHECK_FATAL(newOfsetOpnd != nullptr, "newOfsetOpnd is null!"); + const MIRSymbol *addr = offset1->GetSymbol(); + /* do not guarantee rodata alignment at Os */ + if (CGOptions::OptimizeForSize() && addr->IsReadOnly()) { + break; + } + RegOperand *replace = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeLo12Li, currMemOpnd.GetSize(), *replace, nullptr, newOfsetOpnd, addr); + } + } + break; + } + /* do this in const prop ? */ + case MOP_wmovri32: + case MOP_xmovri64: { + if (memPropMode == kPropOffset) { + auto *imm = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + OfstOperand *newOffset = &static_cast(cgFunc)->CreateOfstOpnd( + static_cast(imm->GetValue()), k32BitSize); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOi, currMemOpnd.GetSize(), *base, nullptr, newOffset, nullptr); + } + break; + } + case MOP_xlslrri6: + case MOP_wlslrri5: { + auto *imm = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + RegOperand *newOfst = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + if (newOfst != nullptr) { + uint32 shift = static_cast(static_cast(imm->GetValue())); + if (memPropMode == kPropOffset) { + if (shift < k4ByteSize) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *base, *newOfst, shift); + } + } else if (memPropMode == kPropShift) { + shift += currMemOpnd.ShiftAmount(); + if (shift < k4ByteSize) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *base, *newOfst, shift); + } + } + } + break; + } + case MOP_xsxtw64: { + newMemOpnd = SelectReplaceExt(defInsn, *base, static_cast(currMemOpnd.ShiftAmount()), true, + currMemOpnd.GetSize()); + break; + } + case MOP_xuxtw64: { + newMemOpnd = SelectReplaceExt(defInsn, *base, static_cast(currMemOpnd.ShiftAmount()), false, + currMemOpnd.GetSize()); + break; + } + default: + break; + } + return newMemOpnd; +} + +RegOperand *A64StrLdrProp::GetReplaceReg(RegOperand &a64Reg) +{ + if (a64Reg.IsSSAForm()) { + regno_t ssaIndex = a64Reg.GetRegisterNumber(); + replaceVersions[ssaIndex] = ssaInfo->FindSSAVersion(ssaIndex); + DEBUG_ASSERT(replaceVersions.size() <= 2, "CHECK THIS CASE IN A64PROP"); + return &a64Reg; + } + return nullptr; +} + +MemOperand *A64StrLdrProp::HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal, + uint32 memSize) const +{ + if (memPropMode != kPropBase) { + return nullptr; + } + OfstOperand *newOfstImm = nullptr; + if (oldOffset == nullptr) { + newOfstImm = &static_cast(cgFunc)->CreateOfstOpnd(static_cast(defVal), k32BitSize); + } else { + auto *ofstOpnd = static_cast(oldOffset); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffsetOpnd is null"); + newOfstImm = &static_cast(cgFunc)->CreateOfstOpnd( + static_cast(defVal + ofstOpnd->GetValue()), k32BitSize); + } + CHECK_FATAL(newOfstImm != nullptr, "newOffset is null!"); + return static_cast(cgFunc)->CreateMemOperand(MemOperand::kAddrModeBOi, memSize, replace, nullptr, + newOfstImm, nullptr); +} + +MemOperand *A64StrLdrProp::SelectReplaceExt(const Insn &defInsn, RegOperand &base, uint32 amount, bool isSigned, + uint32 memSize) +{ + MemOperand *newMemOpnd = nullptr; + RegOperand *newOfst = GetReplaceReg(static_cast(defInsn.GetOperand(kInsnSecondOpnd))); + if (newOfst == nullptr) { + return nullptr; + } + /* defInsn is extend, currMemOpnd is same extend or shift */ + bool propExtend = (memPropMode == kPropShift) || ((memPropMode == kPropSignedExtend) && isSigned) || + ((memPropMode == kPropUnsignedExtend) && !isSigned); + if (memPropMode == kPropOffset) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand(MemOperand::kAddrModeBOrX, memSize, base, + *newOfst, 0, isSigned); + } else if (propExtend) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand(MemOperand::kAddrModeBOrX, memSize, base, + *newOfst, amount, isSigned); + } else { + return nullptr; + } + return newMemOpnd; +} + +bool A64StrLdrProp::CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx) const +{ + auto *a64CgFunc = static_cast(cgFunc); + if ((newMemOpnd->GetOffsetImmediate() != nullptr) && + !a64CgFunc->IsOperandImmValid(insn.GetMachineOpcode(), newMemOpnd, opndIdx)) { + return false; + } + auto newAmount = static_cast(newMemOpnd->ShiftAmount()); + if (!AArch64StoreLoadOpt::CheckNewAmount(insn, newAmount)) { + return false; + } + /* is ldp or stp, addrMode must be BOI */ + if ((opndIdx == kInsnThirdOpnd) && (newMemOpnd->GetAddrMode() != MemOperand::kAddrModeBOi)) { + return false; + } + return true; +} + +void AArch64Prop::PropPatternOpt() +{ + PropOptimizeManager optManager; + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); +} + +bool ExtendShiftPattern::IsSwapInsn(const Insn &insn) const +{ + MOperator op = insn.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_waddrrr: + case MOP_xiorrrr: + case MOP_wiorrrr: + return true; + default: + return false; + } +} + +void ExtendShiftPattern::SetExMOpType(const Insn &use) +{ + MOperator op = use.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_xxwaddrrre: + case MOP_xaddrrrs: { + exMOpType = kExAdd; + break; + } + case MOP_waddrrr: + case MOP_wwwaddrrre: + case MOP_waddrrrs: { + exMOpType = kEwAdd; + break; + } + case MOP_xsubrrr: + case MOP_xxwsubrrre: + case MOP_xsubrrrs: { + exMOpType = kExSub; + break; + } + case MOP_wsubrrr: + case MOP_wwwsubrrre: + case MOP_wsubrrrs: { + exMOpType = kEwSub; + break; + } + case MOP_xcmnrr: + case MOP_xwcmnrre: + case MOP_xcmnrrs: { + exMOpType = kExCmn; + break; + } + case MOP_wcmnrr: + case MOP_wwcmnrre: + case MOP_wcmnrrs: { + exMOpType = kEwCmn; + break; + } + case MOP_xcmprr: + case MOP_xwcmprre: + case MOP_xcmprrs: { + exMOpType = kExCmp; + break; + } + case MOP_wcmprr: + case MOP_wwcmprre: + case MOP_wcmprrs: { + exMOpType = kEwCmp; + break; + } + default: { + exMOpType = kExUndef; + } + } +} + +void ExtendShiftPattern::SetLsMOpType(const Insn &use) +{ + MOperator op = use.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_xaddrrrs: { + lsMOpType = kLxAdd; + break; + } + case MOP_waddrrr: + case MOP_waddrrrs: { + lsMOpType = kLwAdd; + break; + } + case MOP_xsubrrr: + case MOP_xsubrrrs: { + lsMOpType = kLxSub; + break; + } + case MOP_wsubrrr: + case MOP_wsubrrrs: { + lsMOpType = kLwSub; + break; + } + case MOP_xcmnrr: + case MOP_xcmnrrs: { + lsMOpType = kLxCmn; + break; + } + case MOP_wcmnrr: + case MOP_wcmnrrs: { + lsMOpType = kLwCmn; + break; + } + case MOP_xcmprr: + case MOP_xcmprrs: { + lsMOpType = kLxCmp; + break; + } + case MOP_wcmprr: + case MOP_wcmprrs: { + lsMOpType = kLwCmp; + break; + } + case MOP_xeorrrr: + case MOP_xeorrrrs: { + lsMOpType = kLxEor; + break; + } + case MOP_weorrrr: + case MOP_weorrrrs: { + lsMOpType = kLwEor; + break; + } + case MOP_xinegrr: + case MOP_xinegrrs: { + lsMOpType = kLxNeg; + replaceIdx = kInsnSecondOpnd; + break; + } + case MOP_winegrr: + case MOP_winegrrs: { + lsMOpType = kLwNeg; + replaceIdx = kInsnSecondOpnd; + break; + } + case MOP_xiorrrr: + case MOP_xiorrrrs: { + lsMOpType = kLxIor; + break; + } + case MOP_wiorrrr: + case MOP_wiorrrrs: { + lsMOpType = kLwIor; + break; + } + default: { + lsMOpType = kLsUndef; + } + } +} + +void ExtendShiftPattern::SelectExtendOrShift(const Insn &def) +{ + MOperator op = def.GetMachineOpcode(); + switch (op) { + case MOP_xsxtb32: + case MOP_xsxtb64: + extendOp = ExtendShiftOperand::kSXTB; + break; + case MOP_xsxth32: + case MOP_xsxth64: + extendOp = ExtendShiftOperand::kSXTH; + break; + case MOP_xsxtw64: + extendOp = ExtendShiftOperand::kSXTW; + break; + case MOP_xuxtb32: + extendOp = ExtendShiftOperand::kUXTB; + break; + case MOP_xuxth32: + extendOp = ExtendShiftOperand::kUXTH; + break; + case MOP_xuxtw64: + extendOp = ExtendShiftOperand::kUXTW; + break; + case MOP_wlslrri5: + case MOP_xlslrri6: + shiftOp = BitShiftOperand::kLSL; + break; + case MOP_xlsrrri6: + case MOP_wlsrrri5: + shiftOp = BitShiftOperand::kLSR; + break; + case MOP_xasrrri6: + case MOP_wasrrri5: + shiftOp = BitShiftOperand::kASR; + break; + default: { + extendOp = ExtendShiftOperand::kUndef; + shiftOp = BitShiftOperand::kUndef; + } + } +} + +/* Optimize ExtendShiftPattern: + * ========================================================== + * nosuffix LSL LSR ASR extrn (def) + * nosuffix | F | LSL | LSR | ASR | extrn | + * LSL | F | LSL | F | F | extrn | + * LSR | F | F | LSR | F | F | + * ASR | F | F | F | ASR | F | + * exten | F | F | F | F |exten(self)| + * (use) + * =========================================================== + */ +constexpr uint32 kExtenAddShiftNum = 5; +ExtendShiftPattern::SuffixType optTable[kExtenAddShiftNum][kExtenAddShiftNum] = { + {ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kLSL, ExtendShiftPattern::kLSR, ExtendShiftPattern::kASR, + ExtendShiftPattern::kExten}, + {ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kLSL, ExtendShiftPattern::kNoSuffix, + ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kExten}, + {ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kLSR, + ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kNoSuffix}, + {ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kNoSuffix, + ExtendShiftPattern::kASR, ExtendShiftPattern::kNoSuffix}, + {ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kNoSuffix, + ExtendShiftPattern::kNoSuffix, ExtendShiftPattern::kExten}}; + +/* Check whether ExtendShiftPattern optimization can be performed. */ +ExtendShiftPattern::SuffixType ExtendShiftPattern::CheckOpType(const Operand &lastOpnd) const +{ + /* Assign values to useType and defType. */ + uint32 useType = ExtendShiftPattern::kNoSuffix; + uint32 defType = shiftOp; + if (extendOp != ExtendShiftOperand::kUndef) { + defType = ExtendShiftPattern::kExten; + } + if (lastOpnd.IsOpdShift()) { + BitShiftOperand lastShiftOpnd = static_cast(lastOpnd); + useType = lastShiftOpnd.GetShiftOp(); + } else if (lastOpnd.IsOpdExtend()) { + ExtendShiftOperand lastExtendOpnd = static_cast(lastOpnd); + useType = ExtendShiftPattern::kExten; + /* two insn is exten and exten ,value is exten(oneself) */ + if (useType == defType && extendOp != lastExtendOpnd.GetExtendOp()) { + return ExtendShiftPattern::kNoSuffix; + } + } + return optTable[useType][defType]; +} + +constexpr uint32 kExMopTypeSize = 9; +constexpr uint32 kLsMopTypeSize = 15; + +MOperator exMopTable[kExMopTypeSize] = {MOP_undef, MOP_xxwaddrrre, MOP_wwwaddrrre, MOP_xxwsubrrre, MOP_wwwsubrrre, + MOP_xwcmnrre, MOP_wwcmnrre, MOP_xwcmprre, MOP_wwcmprre}; +MOperator lsMopTable[kLsMopTypeSize] = {MOP_undef, MOP_xaddrrrs, MOP_waddrrrs, MOP_xsubrrrs, MOP_wsubrrrs, + MOP_xcmnrrs, MOP_wcmnrrs, MOP_xcmprrs, MOP_wcmprrs, MOP_xeorrrrs, + MOP_weorrrrs, MOP_xinegrrs, MOP_winegrrs, MOP_xiorrrrs, MOP_wiorrrrs}; +/* new Insn extenType: + * ===================== + * (useMop) (defMop) (newmop) + * | nosuffix | all | all| + * | exten | ex | ex | + * | ls | ex | ls | + * | asr | !asr | F | + * | !asr | asr | F | + * (useMop) (defMop) + * ===================== + */ +void ExtendShiftPattern::ReplaceUseInsn(Insn &use, const Insn &def, uint32 amount) +{ + AArch64CGFunc &a64CGFunc = static_cast(cgFunc); + uint32 lastIdx = use.GetOperandSize() - k1BitSize; + Operand &lastOpnd = use.GetOperand(lastIdx); + ExtendShiftPattern::SuffixType optType = CheckOpType(lastOpnd); + Operand *shiftOpnd = nullptr; + if (optType == ExtendShiftPattern::kNoSuffix) { + return; + } else if (optType == ExtendShiftPattern::kExten) { + replaceOp = exMopTable[exMOpType]; + if (amount > k4BitSize) { + return; + } + shiftOpnd = &a64CGFunc.CreateExtendShiftOperand(extendOp, amount, static_cast(k64BitSize)); + } else { + replaceOp = lsMopTable[lsMOpType]; + if (amount >= k32BitSize) { + return; + } + shiftOpnd = &a64CGFunc.CreateBitShiftOperand(shiftOp, amount, static_cast(k64BitSize)); + } + if (replaceOp == MOP_undef) { + return; + } + + Insn *replaceUseInsn = nullptr; + Operand &firstOpnd = use.GetOperand(kInsnFirstOpnd); + Operand *secondOpnd = &use.GetOperand(kInsnSecondOpnd); + if (replaceIdx == kInsnSecondOpnd) { /* replace neg insn */ + secondOpnd = &def.GetOperand(kInsnSecondOpnd); + replaceUseInsn = &cgFunc.GetInsnBuilder()->BuildInsn(replaceOp, firstOpnd, *secondOpnd, *shiftOpnd); + } else { + Operand &thirdOpnd = def.GetOperand(kInsnSecondOpnd); + replaceUseInsn = &cgFunc.GetInsnBuilder()->BuildInsn(replaceOp, firstOpnd, *secondOpnd, thirdOpnd, *shiftOpnd); + } + use.GetBB()->ReplaceInsn(use, *replaceUseInsn); + if (PROP_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In ExtendShiftPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======ReplaceInsn :\n"; + use.Dump(); + LogInfo::MapleLogger() << "=======NewInsn :\n"; + replaceUseInsn->Dump(); + } + /* update ssa info */ + optSsaInfo->ReplaceInsn(use, *replaceUseInsn); + newInsn = replaceUseInsn; + optSuccess = true; +} + +/* + * pattern1: + * UXTB/UXTW X0, W1 <---- def x0 + * .... <---- (X0 not used) + * AND/SUB/EOR X0, X1, X0 <---- use x0 + * ======> + * AND/SUB/EOR X0, X1, W1 UXTB/UXTW + * + * pattern2: + * LSL/LSR X0, X1, #8 + * ....(X0 not used) + * AND/SUB/EOR X0, X1, X0 + * ======> + * AND/SUB/EOR X0, X1, X1 LSL/LSR #8 + */ +void ExtendShiftPattern::Optimize(Insn &insn) +{ + uint32 amount = 0; + uint32 offset = 0; + uint32 lastIdx = insn.GetOperandSize() - k1BitSize; + Operand &lastOpnd = insn.GetOperand(lastIdx); + if (lastOpnd.IsOpdShift()) { + auto &lastShiftOpnd = static_cast(lastOpnd); + amount = lastShiftOpnd.GetShiftAmount(); + } else if (lastOpnd.IsOpdExtend()) { + auto &lastExtendOpnd = static_cast(lastOpnd); + amount = lastExtendOpnd.GetShiftAmount(); + } + if (shiftOp != BitShiftOperand::kUndef) { + auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + offset = static_cast(immOpnd.GetValue()); + } + amount += offset; + + ReplaceUseInsn(insn, *defInsn, amount); +} + +void ExtendShiftPattern::DoExtendShiftOpt(Insn &insn) +{ + if (!CheckAllOpndCondition(insn)) { + return; + } + Optimize(*curInsn); + if (optSuccess) { + DoExtendShiftOpt(*newInsn); + } +} + +void ExtendShiftPattern::SwapOpnd(Insn &insn) +{ + Insn *swapInsn = + &cgFunc.GetInsnBuilder()->BuildInsn(insn.GetMachineOpcode(), insn.GetOperand(kInsnFirstOpnd), + insn.GetOperand(kInsnThirdOpnd), insn.GetOperand(kInsnSecondOpnd)); + insn.GetBB()->ReplaceInsn(insn, *swapInsn); + optSsaInfo->ReplaceInsn(insn, *swapInsn); + curInsn = swapInsn; + replaceIdx = kInsnThirdOpnd; +} + +bool ExtendShiftPattern::CheckAllOpndCondition(Insn &insn) +{ + Init(); + SetLsMOpType(insn); + SetExMOpType(insn); + curInsn = &insn; + if (IsSwapInsn(insn)) { + if (CheckCondition(insn)) { + return true; + } + Init(); + SetLsMOpType(insn); + SetExMOpType(insn); + replaceIdx = kInsnSecondOpnd; + if (CheckCondition(insn)) { + SwapOpnd(insn); + return true; + } + } else { + return CheckCondition(insn); + } + return false; +} + +/* check and set: + * exMOpType, lsMOpType, extendOp, shiftOp, defInsn + */ +bool ExtendShiftPattern::CheckCondition(Insn &insn) +{ + if ((exMOpType == kExUndef) && (lsMOpType == kLsUndef)) { + return false; + } + auto ®Operand = static_cast(insn.GetOperand(replaceIdx)); + regno_t regNo = regOperand.GetRegisterNumber(); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(regNo); + defInsn = FindDefInsn(useVersion); + if (!defInsn || (useVersion->GetAllUseInsns().size() > 1)) { + return false; + } + SelectExtendOrShift(*defInsn); + /* defInsn must be shift or extend */ + if ((extendOp == ExtendShiftOperand::kUndef) && (shiftOp == BitShiftOperand::kUndef)) { + return false; + } + Operand &defSrcOpnd = defInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(defSrcOpnd.IsRegister(), "defSrcOpnd must be register!"); + auto ®DefSrc = static_cast(defSrcOpnd); + if (regDefSrc.IsPhysicalRegister()) { + return false; + } + /* + * has Implict cvt + * + * avoid cases as following: + * lsr x2, x2, #8 + * ubfx w2, x2, #0, #32 lsr x2, x2, #8 + * eor w0, w0, w2 ===> eor w0, w0, x2 ==\=> eor w0, w0, w2, LSR #8 + * + * the truncation causes the wrong value by shift right + * shift left does not matter + */ + if (useVersion->HasImplicitCvt() && shiftOp != BitShiftOperand::kUndef) { + return false; + } + if ((shiftOp == BitShiftOperand::kLSR || shiftOp == BitShiftOperand::kASR) && + (defSrcOpnd.GetSize() > regOperand.GetSize())) { + return false; + } + regno_t defSrcRegNo = regDefSrc.GetRegisterNumber(); + /* check regDefSrc */ + VRegVersion *replaceUseV = optSsaInfo->FindSSAVersion(defSrcRegNo); + CHECK_FATAL(replaceUseV != nullptr, "useVRegVersion must not be null based on ssa"); + if (replaceUseV->GetAllUseInsns().size() > 1) { + return false; + } + return true; +} + +void ExtendShiftPattern::Init() +{ + replaceOp = MOP_undef; + extendOp = ExtendShiftOperand::kUndef; + shiftOp = BitShiftOperand::kUndef; + defInsn = nullptr; + newInsn = nullptr; + replaceIdx = kInsnThirdOpnd; + optSuccess = false; + exMOpType = kExUndef; + lsMOpType = kLsUndef; +} + +void ExtendShiftPattern::Run() +{ + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + DoExtendShiftOpt(*insn); + } + } +} + +void ExtendMovPattern::Run() +{ + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool ExtendMovPattern::CheckSrcReg(regno_t srcRegNo, uint32 validNum) +{ + InsnSet srcDefSet; + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(srcRegNo); + CHECK_FATAL(useVersion != nullptr, "useVRegVersion must not be null based on ssa"); + DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); + if (defInfo == nullptr) { + return false; + } + Insn *insn = defInfo->GetInsn(); + srcDefSet.insert(insn); + /* reserve insn set for non ssa version. */ + for (auto defInsn : srcDefSet) { + CHECK_FATAL((defInsn != nullptr), "defInsn is null!"); + MOperator mOp = defInsn->GetMachineOpcode(); + switch (mOp) { + case MOP_wiorrri12: + case MOP_weorrri12: { + /* check immVal if mop is OR */ + ImmOperand &imm = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + uint32 bitNum = static_cast(imm.GetValue()); + if ((bitNum >> validNum) != 0) { + return false; + } + } + case MOP_wandrri12: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + regno_t defSrcRegNo = defSrcRegOpnd.GetRegisterNumber(); + if (!CheckSrcReg(defSrcRegNo, validNum)) { + return false; + } + break; + } + case MOP_wandrrr: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd1 = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + RegOperand &defSrcRegOpnd2 = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + regno_t defSrcRegNo1 = defSrcRegOpnd1.GetRegisterNumber(); + regno_t defSrcRegNo2 = defSrcRegOpnd2.GetRegisterNumber(); + if (!CheckSrcReg(defSrcRegNo1, validNum) && !CheckSrcReg(defSrcRegNo2, validNum)) { + return false; + } + break; + } + case MOP_wiorrrr: + case MOP_weorrrr: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd1 = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + RegOperand &defSrcRegOpnd2 = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + regno_t defSrcRegNo1 = defSrcRegOpnd1.GetRegisterNumber(); + regno_t defSrcRegNo2 = defSrcRegOpnd2.GetRegisterNumber(); + if (!CheckSrcReg(defSrcRegNo1, validNum) || !CheckSrcReg(defSrcRegNo2, validNum)) { + return false; + } + break; + } + case MOP_wldrb: { + if (validNum != k8BitSize) { + return false; + } + break; + } + case MOP_wldrh: { + if (validNum != k16BitSize) { + return false; + } + break; + } + default: + return false; + } + } + return true; +} + +bool ExtendMovPattern::BitNotAffected(const Insn &insn, uint32 validNum) +{ + RegOperand &firstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + RegOperand &secondOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + regno_t desRegNo = firstOpnd.GetRegisterNumber(); + regno_t srcRegNo = secondOpnd.GetRegisterNumber(); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(desRegNo); + CHECK_FATAL(useVersion != nullptr, "useVRegVersion must not be null based on ssa"); + DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); + if (defInfo == nullptr) { + return false; + } + if (!CheckSrcReg(srcRegNo, validNum)) { + return false; + } + replaceMop = MOP_wmovrr; + return true; +} + +bool ExtendMovPattern::CheckCondition(Insn &insn) +{ + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_xuxtb32: + return BitNotAffected(insn, k8BitSize); + case MOP_xuxth32: + return BitNotAffected(insn, k16BitSize); + default: + return false; + } +} + +/* No initialization required */ +void ExtendMovPattern::Init() +{ + replaceMop = MOP_undef; +} + +void ExtendMovPattern::Optimize(Insn &insn) +{ + insn.SetMOP(AArch64CG::kMd[replaceMop]); +} + +void CopyRegProp::Run() +{ + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool CopyRegProp::IsValidCopyProp(const RegOperand &dstReg, const RegOperand &srcReg) const +{ + DEBUG_ASSERT(destVersion != nullptr, "find destVersion failed"); + DEBUG_ASSERT(srcVersion != nullptr, "find srcVersion failed"); + LiveInterval *dstll = nullptr; + LiveInterval *srcll = nullptr; + if (destVersion->GetOriginalRegNO() == srcVersion->GetOriginalRegNO()) { + return true; + } + regno_t dstRegNO = dstReg.GetRegisterNumber(); + regno_t srcRegNO = srcReg.GetRegisterNumber(); + for (auto useDUInfoIt : destVersion->GetAllUseInsns()) { + if (useDUInfoIt.second == nullptr) { + continue; + } + Insn *useInsn = (useDUInfoIt.second)->GetInsn(); + if (useInsn == nullptr) { + continue; + } + + dstll = regll->GetLiveInterval(dstRegNO); + srcll = regll->GetLiveInterval(srcRegNO); + static_cast(regll)->CheckInterference(*dstll, *srcll); + BB *useBB = useInsn->GetBB(); + if (dstll->IsConflictWith(srcRegNO) && + /* support override value when the version is not transphi */ + (((useBB->IsInPhiDef(srcRegNO) || useBB->IsInPhiList(srcRegNO)) && useBB->HasCriticalEdge()) || + useBB->IsInPhiList(dstRegNO))) { + return false; + } + } + if (dstll && srcll) { + regll->CoalesceLiveIntervals(*dstll, *srcll); + } + return true; +} + +bool CopyRegProp::CheckCondition(Insn &insn) +{ + if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(insn)) { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp == MOP_xmovrr || mOp == MOP_wmovrr || mOp == MOP_xvmovs || mOp == MOP_xvmovd) { + Operand &destOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(destOpnd.IsRegister() && srcOpnd.IsRegister(), "must be"); + auto &destReg = static_cast(destOpnd); + auto &srcReg = static_cast(srcOpnd); + if (srcReg.GetRegisterNumber() == RZR) { + insn.SetMOP(AArch64CG::kMd[mOp == MOP_xmovrr ? MOP_xmovri64 : MOP_wmovri32]); + insn.SetOperand(kInsnSecondOpnd, cgFunc.CreateImmOperand(PTY_u64, 0)); + } + if (destReg.IsSSAForm() && srcReg.IsSSAForm()) { + /* case for ExplicitExtendProp */ + if (destReg.GetSize() != srcReg.GetSize()) { + VaildateImplicitCvt(destReg, srcReg, insn); + return false; + } + if (destReg.GetValidBitsNum() >= srcReg.GetValidBitsNum()) { + destReg.SetValidBitsNum(srcReg.GetValidBitsNum()); + } else { + MapleVector &propInsns = optSsaInfo->GetSafePropInsns(); + if (std::find(propInsns.begin(), propInsns.end(), insn.GetId()) == propInsns.end()) { + CHECK_FATAL(false, "do not support explicit extract bit in mov"); + return false; + } + } + destVersion = optSsaInfo->FindSSAVersion(destReg.GetRegisterNumber()); + DEBUG_ASSERT(destVersion != nullptr, "find Version failed"); + srcVersion = optSsaInfo->FindSSAVersion(srcReg.GetRegisterNumber()); + DEBUG_ASSERT(srcVersion != nullptr, "find Version failed"); + if (!IsValidCopyProp(destReg, srcReg)) { + return false; + } + return true; + } else { + /* should be eliminated by ssa peep */ + } + } + } + return false; +} + +void CopyRegProp::Optimize(Insn &insn) +{ + optSsaInfo->ReplaceAllUse(destVersion, srcVersion); + if (cgFunc.IsExtendReg(destVersion->GetSSAvRegOpnd()->GetRegisterNumber())) { + cgFunc.InsertExtendSet(srcVersion->GetSSAvRegOpnd()->GetRegisterNumber()); + } +} + +void CopyRegProp::VaildateImplicitCvt(RegOperand &destReg, const RegOperand &srcReg, Insn &movInsn) +{ + DEBUG_ASSERT(movInsn.GetMachineOpcode() == MOP_xmovrr || movInsn.GetMachineOpcode() == MOP_wmovrr, + "NIY explicit CVT"); + if (destReg.GetSize() == k64BitSize && srcReg.GetSize() == k32BitSize) { + movInsn.SetMOP(AArch64CG::kMd[MOP_xuxtw64]); + } else if (destReg.GetSize() == k32BitSize && srcReg.GetSize() == k64BitSize) { + movInsn.SetMOP(AArch64CG::kMd[MOP_xubfxrri6i6]); + movInsn.AddOperand(cgFunc.CreateImmOperand(PTY_i64, 0)); + movInsn.AddOperand(cgFunc.CreateImmOperand(PTY_i64, k32BitSize)); + } else { + CHECK_FATAL(false, " unknown explicit integer cvt, need implement in ssa prop "); + } + destReg.SetValidBitsNum(k32BitSize); +} + +void RedundantPhiProp::Run() +{ + FOR_ALL_BB(bb, &cgFunc) { + for (auto phiIt : bb->GetPhiInsns()) { + Init(); + if (!CheckCondition(*phiIt.second)) { + continue; + } + Optimize(*phiIt.second); + } + } +} + +void RedundantPhiProp::Optimize(Insn &insn) +{ + optSsaInfo->ReplaceAllUse(destVersion, srcVersion); +} + +bool RedundantPhiProp::CheckCondition(Insn &insn) +{ + DEBUG_ASSERT(insn.IsPhi(), "must be phi insn here"); + auto &phiOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (phiOpnd.IsRedundancy()) { + auto &phiDestReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + destVersion = optSsaInfo->FindSSAVersion(phiDestReg.GetRegisterNumber()); + DEBUG_ASSERT(destVersion != nullptr, "find Version failed"); + uint32 srcRegNO = phiOpnd.GetOperands().begin()->second->GetRegisterNumber(); + srcVersion = optSsaInfo->FindSSAVersion(srcRegNO); + DEBUG_ASSERT(srcVersion != nullptr, "find Version failed"); + return true; + } + return false; +} + +bool ValidBitNumberProp::CheckCondition(Insn &insn) +{ + /* extend to all shift pattern in future */ + RegOperand *destOpnd = nullptr; + RegOperand *srcOpnd = nullptr; + if (insn.GetMachineOpcode() == MOP_xuxtw64) { + destOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + srcOpnd = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + } + if (insn.GetMachineOpcode() == MOP_xubfxrri6i6) { + destOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + srcOpnd = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &lsb = static_cast(insn.GetOperand(kInsnThirdOpnd)); + auto &width = static_cast(insn.GetOperand(kInsnFourthOpnd)); + if ((lsb.GetValue() != 0) || (width.GetValue() != k32BitSize)) { + return false; + } + } + if (destOpnd != nullptr && destOpnd->IsSSAForm() && srcOpnd != nullptr && srcOpnd->IsSSAForm()) { + destVersion = optSsaInfo->FindSSAVersion(destOpnd->GetRegisterNumber()); + DEBUG_ASSERT(destVersion != nullptr, "find Version failed"); + srcVersion = optSsaInfo->FindSSAVersion(srcOpnd->GetRegisterNumber()); + DEBUG_ASSERT(srcVersion != nullptr, "find Version failed"); + if (destVersion->HasImplicitCvt()) { + return false; + } + for (auto destUseIt : destVersion->GetAllUseInsns()) { + Insn *useInsn = destUseIt.second->GetInsn(); + if (useInsn->GetMachineOpcode() == MOP_xuxtw64) { + return false; + } + /* if srcOpnd upper 32 bits are valid, it can not prop to mop_x */ + if (srcOpnd->GetSize() == k64BitSize && destOpnd->GetSize() == k64BitSize) { + const auto *useMD = useInsn->GetDesc(); + for (auto opndUseIt : destUseIt.second->GetOperands()) { + const OpndDesc *useProp = useMD->opndMD[opndUseIt.first]; + if (useProp->GetSize() == k64BitSize) { + return false; + } + } + } + } + srcVersion->SetImplicitCvt(); + return true; + } + return false; +} + +void ValidBitNumberProp::Optimize(Insn &insn) +{ + optSsaInfo->ReplaceAllUse(destVersion, srcVersion); + cgFunc.InsertExtendSet(srcVersion->GetSSAvRegOpnd()->GetRegisterNumber()); +} + +void ValidBitNumberProp::Run() +{ + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +void FpSpConstProp::Run() +{ + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool FpSpConstProp::CheckCondition(Insn &insn) +{ + std::set defRegs = insn.GetDefRegs(); + auto &a64CGFunc = static_cast(cgFunc); + if (defRegs.size() <= 1) { + if (insn.ScanReg(RSP)) { + fpSpBase = &a64CGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + /* not safe due to varied sp in alloca */ + if (cgFunc.HasVLAOrAlloca()) { + return false; + } + } + if (insn.ScanReg(RFP)) { + DEBUG_ASSERT(fpSpBase == nullptr, " unexpect for both sp fp using "); + fpSpBase = &a64CGFunc.GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); + } + if (fpSpBase == nullptr) { + return false; + } + if (insn.GetMachineOpcode() == MOP_xaddrri12) { + aT = kAArch64Add; + if (GetValidSSAInfo(insn.GetOperand(kInsnFirstOpnd))) { + shiftOpnd = &static_cast(insn.GetOperand(kInsnThirdOpnd)); + return true; + } + } else if (insn.GetMachineOpcode() == MOP_xsubrri12) { + aT = kAArch64Sub; + if (GetValidSSAInfo(insn.GetOperand(kInsnFirstOpnd))) { + shiftOpnd = &static_cast(insn.GetOperand(kInsnThirdOpnd)); + return true; + } + } + } + return false; +} + +bool FpSpConstProp::GetValidSSAInfo(Operand &opnd) +{ + if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + if (regOpnd.IsSSAForm()) { + replaced = optSsaInfo->FindSSAVersion(regOpnd.GetRegisterNumber()); + DEBUG_ASSERT(replaced != nullptr, "find ssa version failed in FpSpConstProp"); + return true; + } + } + return false; +} + +int64 FpSpConstProp::ArithmeticFold(int64 valInUse, ArithmeticType useAT) const +{ + int64 valInDef = shiftOpnd->GetValue(); + int64 returnVal = 0; + CHECK_FATAL(aT == kAArch64Add || aT == kAArch64Sub, "unsupport sp/fp arthimetic in aarch64"); + if (useAT == aT) { + returnVal = valInUse + valInDef; + } else { + returnVal = valInUse - valInDef; + } + return returnVal; +} + +void FpSpConstProp::PropInMem(DUInsnInfo &useDUInfo, Insn &useInsn) +{ + MOperator useMop = useInsn.GetMachineOpcode(); + if (useInsn.IsAtomic()) { + return; + } + if (useInsn.IsStore() || useInsn.IsLoad()) { + if (useDUInfo.GetOperands().size() == 1) { + auto useOpndIt = useDUInfo.GetOperands().begin(); + if (useOpndIt->first == kInsnSecondOpnd || useOpndIt->first == kInsnThirdOpnd) { + DEBUG_ASSERT(useOpndIt->second == 1, "multiple use in memory opnd"); + auto *a64memOpnd = static_cast(useInsn.GetMemOpnd()); + if (a64memOpnd->IsIntactIndexed() && a64memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) { + auto *ofstOpnd = static_cast(a64memOpnd->GetOffsetImmediate()); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffsetOpnd is null"); + int64 newVal = ArithmeticFold(ofstOpnd->GetValue(), kAArch64Add); + auto *newOfstImm = + &static_cast(cgFunc).CreateOfstOpnd(static_cast(newVal), k64BitSize); + if (ofstOpnd->GetVary() == kUnAdjustVary || shiftOpnd->GetVary() == kUnAdjustVary) { + newOfstImm->SetVary(kUnAdjustVary); + } + auto *newMem = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOi, a64memOpnd->GetSize(), *fpSpBase, nullptr, newOfstImm, nullptr); + if (static_cast(cgFunc).IsOperandImmValid(useMop, newMem, useOpndIt->first)) { + useInsn.SetMemOpnd(newMem); + useDUInfo.DecreaseDU(useOpndIt->first); + replaced->CheckDeadUse(useInsn); + } + } + } + } else { + /* + * case : store stack location on stack + * add x1, sp, #8 + * ... + * store x1 [x1, #16] + * not prop , not benefit to live range yet + */ + return; + } + } +} + +void FpSpConstProp::PropInArith(DUInsnInfo &useDUInfo, Insn &useInsn, ArithmeticType curAT) +{ + if (useDUInfo.GetOperands().size() == 1) { + auto &a64cgFunc = static_cast(cgFunc); + MOperator useMop = useInsn.GetMachineOpcode(); + DEBUG_ASSERT(useDUInfo.GetOperands().begin()->first == kInsnSecondOpnd, "NIY"); + DEBUG_ASSERT(useDUInfo.GetOperands().begin()->second == 1, "multiple use in add/sub"); + auto &curVal = static_cast(useInsn.GetOperand(kInsnThirdOpnd)); + ImmOperand &newVal = + a64cgFunc.CreateImmOperand(ArithmeticFold(curVal.GetValue(), curAT), curVal.GetSize(), false); + if (newVal.GetValue() < 0) { + newVal.Negate(); + useMop = A64ConstProp::GetReversalMOP(useMop); + } + if (curVal.GetVary() == kUnAdjustVary || shiftOpnd->GetVary() == kUnAdjustVary) { + newVal.SetVary(kUnAdjustVary); + } + if (static_cast(cgFunc).IsOperandImmValid(useMop, &newVal, kInsnThirdOpnd)) { + Insn &newInsn = + cgFunc.GetInsnBuilder()->BuildInsn(useMop, useInsn.GetOperand(kInsnFirstOpnd), *fpSpBase, newVal); + useInsn.GetBB()->ReplaceInsn(useInsn, newInsn); + optSsaInfo->ReplaceInsn(useInsn, newInsn); + } + } else { + CHECK_FATAL(false, "NYI"); + } +} + +void FpSpConstProp::PropInCopy(DUInsnInfo &useDUInfo, Insn &useInsn, MOperator oriMop) +{ + if (useDUInfo.GetOperands().size() == 1) { + DEBUG_ASSERT(useDUInfo.GetOperands().begin()->first == kInsnSecondOpnd, "NIY"); + DEBUG_ASSERT(useDUInfo.GetOperands().begin()->second == 1, "multiple use in add/sub"); + auto &newVal = *static_cast(shiftOpnd->Clone(*cgFunc.GetMemoryPool())); + Insn &newInsn = + cgFunc.GetInsnBuilder()->BuildInsn(oriMop, useInsn.GetOperand(kInsnFirstOpnd), *fpSpBase, newVal); + useInsn.GetBB()->ReplaceInsn(useInsn, newInsn); + optSsaInfo->ReplaceInsn(useInsn, newInsn); + } else { + CHECK_FATAL(false, "NYI"); + } +} + +void FpSpConstProp::Optimize(Insn &insn) +{ + for (auto &useInsnInfo : replaced->GetAllUseInsns()) { + Insn *useInsn = useInsnInfo.second->GetInsn(); + MOperator useMop = useInsn->GetMachineOpcode(); + PropInMem(*useInsnInfo.second, *useInsn); + switch (useMop) { + case MOP_xmovrr: + case MOP_wmovrr: + PropInCopy(*useInsnInfo.second, *useInsn, insn.GetMachineOpcode()); + break; + case MOP_xaddrri12: + PropInArith(*useInsnInfo.second, *useInsn, kAArch64Add); + break; + case MOP_xsubrri12: + PropInArith(*useInsnInfo.second, *useInsn, kAArch64Sub); + break; + default: + break; + } + } +} + +bool A64PregCopyPattern::DFSFindValidDefInsns(Insn *curDefInsn, RegOperand *lastPhiDef, + std::unordered_map &visited) +{ + if (curDefInsn == nullptr) { + return false; + } + /* + * avoid the case as following: + * R113 and R117 define each other. + * [BB5] ---------------------------- + * phi: R113, (R111<4>, R117<9>) | + * / \ | + * / \ | + * [BB6] ---- [BB7] | + * add R116, R113, #4 phi: R117, (R113<5>, R116<6>) | + * / \ | + * / \ | + * [BB8] [BB28] | + * / | + * / | + * [BB9] ------ [BB5] | + * mov R1, R117 -------------------------- + * + * but the cases as following is right: + * (1) + * [BB124] + * add R339, R336, #345 -------- is found twice + * / \ + * / \ + * / [BB125] + * \ / + * \ / + * [BB56] + * phi: R370, (R339<124>, R339<125>) + * | + * | + * [BB61] + * mov R0, R370 + * (2) + * [BB17] + * phi: R242, (R241<14>, R218<53>) ------- is found twice + * / \ + * / \ + * / [BB26] [BB32] + * \ \ / + * \ [BB27] + * \ phi: R273, (R242<26>, R320<32>) + * [BB25] / + * \ [BB42] + * \ / + * [BB43] + * phi: R321, (R242<25>, R273<42>) + * | + * [BB47] + * mov R0, R321 + */ + if (visited[curDefInsn->GetId()] && curDefInsn->IsPhi() && lastPhiDef != nullptr) { + auto &curPhiOpnd = static_cast(curDefInsn->GetOperand(kInsnSecondOpnd)); + for (auto &curPhiListIt : curPhiOpnd.GetOperands()) { + auto &curUseOpnd = static_cast(*curPhiListIt.second); + if (&curUseOpnd == lastPhiDef) { + return false; + } + } + } + if (visited[curDefInsn->GetId()]) { + return true; + } + visited[curDefInsn->GetId()] = true; + if (!curDefInsn->IsPhi()) { + CHECK_FATAL(curDefInsn->IsMachineInstruction(), "expect valid insn"); + (void)validDefInsns.emplace_back(curDefInsn); + return true; + } + auto &phiOpnd = static_cast(curDefInsn->GetOperand(kInsnSecondOpnd)); + for (auto &phiListIt : phiOpnd.GetOperands()) { + auto &useOpnd = static_cast(*phiListIt.second); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(useOpnd.GetRegisterNumber()); + Insn *defInsn = FindDefInsn(useVersion); + if (defInsn == nullptr) { + return false; + } + lastPhiDef = &static_cast(curDefInsn->GetOperand(kInsnFirstOpnd)); + if (!DFSFindValidDefInsns(defInsn, lastPhiDef, visited)) { + return false; + } + } + return true; +} + +bool A64PregCopyPattern::CheckMultiUsePoints(const Insn *defInsn) const +{ + Operand &dstOpnd = defInsn->GetOperand(kInsnFirstOpnd); + CHECK_FATAL(dstOpnd.IsRegister(), "dstOpnd must be register"); + VRegVersion *defVersion = optSsaInfo->FindSSAVersion(static_cast(dstOpnd).GetRegisterNumber()); + /* use: (phi) or (mov preg) */ + for (auto &useInfoIt : defVersion->GetAllUseInsns()) { + DUInsnInfo *useInfo = useInfoIt.second; + CHECK_FATAL(useInfo, "get useDUInfo failed"); + Insn *useInsn = useInfo->GetInsn(); + CHECK_FATAL(useInsn, "get useInsn failed"); + if (!useInsn->IsPhi() && useInsn->GetMachineOpcode() != MOP_wmovrr && + useInsn->GetMachineOpcode() != MOP_xmovrr) { + return false; + } + if ((useInsn->GetMachineOpcode() == MOP_wmovrr || useInsn->GetMachineOpcode() == MOP_xmovrr) && + !static_cast(useInsn->GetOperand(kInsnFirstOpnd)).IsPhysicalRegister()) { + return false; + } + } + return true; +} + +bool A64PregCopyPattern::CheckPhiCaseCondition(Insn &curInsn, Insn &defInsn) +{ + std::unordered_map visited; + RegOperand *lastPhiDef = + (defInsn.IsPhi() ? &static_cast(defInsn.GetOperand(kInsnFirstOpnd)) : nullptr); + if (!DFSFindValidDefInsns(&defInsn, lastPhiDef, visited)) { + return false; + } + if (!CheckValidDefInsn(validDefInsns[0])) { + return false; + } + MOperator defMop = validDefInsns[0]->GetMachineOpcode(); + uint32 defOpndNum = validDefInsns[0]->GetOperandSize(); + for (size_t i = 1; i < validDefInsns.size(); ++i) { + if (defMop != validDefInsns[i]->GetMachineOpcode()) { + return false; + } + if (!CheckMultiUsePoints(validDefInsns[i])) { + return false; + } + for (uint32 idx = 0; idx < defOpndNum; ++idx) { + if (validDefInsns[0]->OpndIsDef(idx) && validDefInsns[i]->OpndIsDef(idx)) { + continue; + } + Operand &opnd1 = validDefInsns[0]->GetOperand(idx); + Operand &opnd2 = validDefInsns[i]->GetOperand(idx); + if (!opnd1.Equals(opnd2) && differIdx == -1) { + differIdx = static_cast(idx); + if (!validDefInsns[0]->GetOperand(static_cast(differIdx)).IsRegister() || + !validDefInsns[i]->GetOperand(static_cast(differIdx)).IsRegister()) { + return false; + } + auto &differOpnd1 = + static_cast(validDefInsns[0]->GetOperand(static_cast(differIdx))); + auto &differOpnd2 = + static_cast(validDefInsns[1]->GetOperand(static_cast(differIdx))); + /* avoid cc reg */ + if (!differOpnd1.IsOfIntClass() || !differOpnd2.IsOfIntClass() || differOpnd1.IsPhysicalRegister() || + differOpnd2.IsPhysicalRegister()) { + return false; + } + VRegVersion *differVersion1 = optSsaInfo->FindSSAVersion(differOpnd1.GetRegisterNumber()); + VRegVersion *differVersion2 = optSsaInfo->FindSSAVersion(differOpnd2.GetRegisterNumber()); + if (!differVersion1 || !differVersion2) { + return false; + } + if (differVersion1->GetOriginalRegNO() != differVersion2->GetOriginalRegNO()) { + return false; + } + differOrigNO = differVersion1->GetOriginalRegNO(); + } else if (!opnd1.Equals(opnd2) && idx != differIdx) { + return false; + } + } + if (differIdx <= 0) { + return false; + } + } + return true; +} + +bool A64PregCopyPattern::CheckUselessDefInsn(const Insn *defInsn) const +{ + Operand &dstOpnd = defInsn->GetOperand(kInsnFirstOpnd); + CHECK_FATAL(dstOpnd.IsRegister(), "dstOpnd must be register"); + VRegVersion *defVersion = optSsaInfo->FindSSAVersion(static_cast(dstOpnd).GetRegisterNumber()); + if (defVersion->GetAllUseInsns().size() == 1) { + return true; + } + /* + * avoid the case as following + * In a loop: + * [BB43] + * phi: R356, (R345<42>, R377<63>) + * / \ + * / \ + * [BB44] \ + * add R377, R356, #1 / + * mov R1, R377 / + * bl / + * \ / + * \ / + * [BB63] + */ + for (auto &useInfoIt : defVersion->GetAllUseInsns()) { + DUInsnInfo *useInfo = useInfoIt.second; + CHECK_FATAL(useInfo, "get useDUInfo failed"); + Insn *useInsn = useInfo->GetInsn(); + CHECK_FATAL(useInsn, "get useInsn failed"); + if (useInsn->IsPhi()) { + auto &phiDefOpnd = static_cast(useInsn->GetOperand(kInsnFirstOpnd)); + uint32 opndNum = defInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (defInsn->OpndIsDef(i)) { + continue; + } + Operand &opnd = defInsn->GetOperand(i); + if (opnd.IsRegister() && + static_cast(opnd).GetRegisterNumber() == phiDefOpnd.GetRegisterNumber()) { + return false; + } + } + } + } + return true; +} + +bool A64PregCopyPattern::CheckValidDefInsn(const Insn *defInsn) +{ + const auto *md = defInsn->GetDesc(); + CHECK_FATAL(md != nullptr, "expect valid AArch64MD"); + /* this pattern applies to all basicOps */ + if (md->IsMove() || md->IsStore() || md->IsLoad() || md->IsLoadStorePair() || md->IsCall() || md->IsDMB() || + md->IsVectorOp() || md->IsCondDef() || md->IsCondBranch() || md->IsUnCondBranch()) { + return false; + } + uint32 opndNum = defInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = defInsn->GetOperand(i); + if (!opnd.IsRegister() && !opnd.IsImmediate() && !opnd.IsOpdShift() && !opnd.IsOpdExtend()) { + return false; + } + if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + if (cgFunc.IsSPOrFP(regOpnd) || regOpnd.IsPhysicalRegister() || + (!regOpnd.IsOfIntClass() && !regOpnd.IsOfFloatOrSIMDClass())) { + return false; + } + } + } + return true; +} + +bool A64PregCopyPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_xmovrr && curMop != MOP_wmovrr) { + return false; + } + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (!dstOpnd.IsPhysicalRegister()) { + return false; + } + regno_t useRegNO = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber(); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(useRegNO); + Insn *defInsn = FindDefInsn(useVersion); + if (defInsn == nullptr) { + return false; + } + Operand &defDstOpnd = defInsn->GetOperand(kInsnFirstOpnd); + /* avoid inline-asm */ + if (!defDstOpnd.IsRegister()) { + return false; + } + if (!CheckMultiUsePoints(defInsn)) { + return false; + } + if (defInsn->IsPhi()) { + isCrossPhi = true; + firstPhiInsn = defInsn; + return CheckPhiCaseCondition(insn, *defInsn); + } else { + if (!CheckValidDefInsn(defInsn)) { + return false; + } + if (!CheckUselessDefInsn(defInsn)) { + return false; + } + (void)validDefInsns.emplace_back(defInsn); + } + return true; +} + +Insn &A64PregCopyPattern::CreateNewPhiInsn(std::unordered_map &newPhiList, Insn *curInsn) +{ + CHECK_FATAL(!newPhiList.empty(), "empty newPhiList"); + RegOperand *differOrigOpnd = cgFunc.GetVirtualRegisterOperand(differOrigNO); + CHECK_FATAL(differOrigOpnd != nullptr, "get original opnd default"); + PhiOperand &phiList = optSsaInfo->CreatePhiOperand(); + for (auto &it : newPhiList) { + phiList.InsertOpnd(it.first, *it.second); + } + Insn &phiInsn = cgFunc.GetCG()->BuildPhiInsn(*differOrigOpnd, phiList); + optSsaInfo->CreateNewInsnSSAInfo(phiInsn); + BB *bb = curInsn->GetBB(); + (void)bb->InsertInsnBefore(*curInsn, phiInsn); + /* */ + bb->AddPhiInsn(static_cast(phiInsn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(), phiInsn); + return phiInsn; +} + +/* + * Check whether the required phi is available, do not insert phi repeatedly. + */ +RegOperand *A64PregCopyPattern::CheckAndGetExistPhiDef(Insn &phiInsn, std::vector &validDifferRegNOs) const +{ + MapleMap &phiInsns = phiInsn.GetBB()->GetPhiInsns(); + for (auto &phiIt : phiInsns) { + auto &def = static_cast(phiIt.second->GetOperand(kInsnFirstOpnd)); + VRegVersion *defVersion = optSsaInfo->FindSSAVersion(def.GetRegisterNumber()); + /* + * if the phi of the change point has been created (according to original regNO), return the phiDefOpnd. + * But, there is a problem: the phiDefOpnd of the same original regNO is not the required phi. + * For example: (in parentheses is the original regNO) + * add R110(R80), R106(R80), #1 add R122(R80), R118(R80), #1 + * \ / + * \ / + * (1) phi: R123(R80), [R110, R122] + * mov R0, R123 + * It will return R123 of phi(1) because the differOrigNO is 80, but that's not what we want, + * we need to create a new phi(2): R140(R80), [R106, R118]. + * so we need to check whether all phiOpnds have correct ssaRegNO. + */ + if (defVersion->GetOriginalRegNO() == differOrigNO) { + auto &phiOpnd = static_cast(phiIt.second->GetOperand(kInsnSecondOpnd)); + if (phiOpnd.GetOperands().size() == validDifferRegNOs.size()) { + bool exist = true; + for (auto &phiListIt : phiOpnd.GetOperands()) { + if (std::find(validDifferRegNOs.begin(), validDifferRegNOs.end(), + static_cast(phiListIt.second)->GetRegisterNumber()) == + validDifferRegNOs.end()) { + exist = false; + break; + } + } + if (exist) { + return &static_cast(phiIt.second->GetOperand(kInsnFirstOpnd)); + } + } + } + } + return nullptr; +} + +RegOperand &A64PregCopyPattern::DFSBuildPhiInsn(Insn *curInsn, std::unordered_map &visited) +{ + CHECK_FATAL(curInsn, "curInsn must not be null"); + if (visited[curInsn->GetId()] != nullptr) { + return *visited[curInsn->GetId()]; + } + if (!curInsn->IsPhi()) { + return static_cast(curInsn->GetOperand(static_cast(differIdx))); + } + std::unordered_map differPhiList; + std::vector validDifferRegNOs; + auto &phiOpnd = static_cast(curInsn->GetOperand(kInsnSecondOpnd)); + for (auto &phiListIt : phiOpnd.GetOperands()) { + auto &useOpnd = static_cast(*phiListIt.second); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(useOpnd.GetRegisterNumber()); + Insn *defInsn = FindDefInsn(useVersion); + CHECK_FATAL(defInsn != nullptr, "get defInsn failed"); + RegOperand &phiDefOpnd = DFSBuildPhiInsn(defInsn, visited); + (void)differPhiList.emplace(phiListIt.first, &phiDefOpnd); + (void)validDifferRegNOs.emplace_back(phiDefOpnd.GetRegisterNumber()); + } + /* + * The phi in control flow may already exists. + * For example: + * [BB26] [BB45] + * add R191, R103, R187 add R166, R103, R164 + * \ / + * \ / + * [BB27] + * phi: R192, (R191<26>, R166<45>) ------ curInsn + * phi: R194, (R187<26>, R164<45>) ------ the phi witch we need already exists + * / validDifferRegNOs : [187, 164] + * / + * [BB28] [BB46] + * add R215, R103, R211 / + * \ / + * \ / + * [BB29] + * phi: R216, (R215<28>, R192<46>) + * phi: R218, (R211<28>, R194<46>) ------ the phi witch we need already exists + * mov R0, R216 validDifferRegNOs : [211, 194] + */ + RegOperand *existPhiDef = CheckAndGetExistPhiDef(*curInsn, validDifferRegNOs); + if (existPhiDef == nullptr) { + Insn &phiInsn = CreateNewPhiInsn(differPhiList, curInsn); + visited[curInsn->GetId()] = &static_cast(phiInsn.GetOperand(kInsnFirstOpnd)); + existPhiDef = &static_cast(phiInsn.GetOperand(kInsnFirstOpnd)); + } + return *existPhiDef; +} + +void A64PregCopyPattern::Optimize(Insn &insn) +{ + Insn *defInsn = *validDefInsns.begin(); + MOperator newMop = defInsn->GetMachineOpcode(); + Operand &dstOpnd = insn.GetOperand(kInsnFirstOpnd); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newMop, AArch64CG::kMd[newMop]); + uint32 opndNum = defInsn->GetOperandSize(); + newInsn.ResizeOpnds(opndNum); + if (!isCrossPhi) { + for (uint32 i = 0; i < opndNum; ++i) { + if (defInsn->OpndIsDef(i)) { + newInsn.SetOperand(i, dstOpnd); + } else { + newInsn.SetOperand(i, defInsn->GetOperand(i)); + } + } + } else { + std::vector validDifferRegNOs; + for (Insn *vdInsn : validDefInsns) { + auto &vdOpnd = static_cast(vdInsn->GetOperand(static_cast(differIdx))); + (void)validDifferRegNOs.emplace_back(vdOpnd.GetRegisterNumber()); + } + RegOperand *differPhiDefOpnd = CheckAndGetExistPhiDef(*firstPhiInsn, validDifferRegNOs); + if (differPhiDefOpnd == nullptr) { + std::unordered_map visited; + differPhiDefOpnd = &DFSBuildPhiInsn(firstPhiInsn, visited); + } + CHECK_FATAL(differPhiDefOpnd, "get differPhiDefOpnd failed"); + for (uint32 i = 0; i < opndNum; ++i) { + if (defInsn->OpndIsDef(i)) { + newInsn.SetOperand(i, dstOpnd); + } else if (i == static_cast(differIdx)) { + newInsn.SetOperand(i, *differPhiDefOpnd); + } else { + newInsn.SetOperand(i, defInsn->GetOperand(i)); + } + } + } + insn.GetBB()->ReplaceInsn(insn, newInsn); + /* update ssa info */ + optSsaInfo->ReplaceInsn(insn, newInsn); + + if (PROP_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In A64PregCopyPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "======= ReplaceInsn :\n"; + insn.Dump(); + LogInfo::MapleLogger() << "======= NewInsn :\n"; + newInsn.Dump(); + } +} + +void A64PregCopyPattern::Run() +{ + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } + validDefInsns.clear(); + validDefInsns.shrink_to_fit(); +} + +void A64ReplaceRegOpndVisitor::Visit(RegOperand *v) +{ + (void)v; + insn->SetOperand(idx, *newReg); +} +void A64ReplaceRegOpndVisitor::Visit(MemOperand *a64memOpnd) +{ + bool changed = false; + CHECK_FATAL(a64memOpnd->IsIntactIndexed(), "NYI post/pre index model"); + StackMemPool tempMemPool(memPoolCtrler, "temp mempool for A64ReplaceRegOpndVisitor"); + auto *cpyMem = a64memOpnd->Clone(tempMemPool); + if (cpyMem->GetBaseRegister() != nullptr && + cpyMem->GetBaseRegister()->GetRegisterNumber() == oldReg->GetRegisterNumber()) { + cpyMem->SetBaseRegister(*static_cast(newReg)); + changed = true; + } + if (cpyMem->GetIndexRegister() != nullptr && + cpyMem->GetIndexRegister()->GetRegisterNumber() == oldReg->GetRegisterNumber()) { + CHECK_FATAL(!changed, "base reg is equal to index reg"); + cpyMem->SetIndexRegister(*newReg); + changed = true; + } + if (changed) { + insn->SetMemOpnd(&static_cast(cgFunc)->GetOrCreateMemOpnd(*cpyMem)); + } +} +void A64ReplaceRegOpndVisitor::Visit(ListOperand *v) +{ + for (auto &it : v->GetOperands()) { + if (it->GetRegisterNumber() == oldReg->GetRegisterNumber()) { + it = newReg; + } + } +} +void A64ReplaceRegOpndVisitor::Visit(PhiOperand *v) +{ + for (auto &it : v->GetOperands()) { + if (it.second->GetRegisterNumber() == oldReg->GetRegisterNumber()) { + it.second = newReg; + } + } + auto &phiDest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + if (phiDest.GetValidBitsNum() > v->GetLeastCommonValidBit()) { + phiDest.SetValidBitsNum(v->GetLeastCommonValidBit()); + } +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_ra_opt.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_ra_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..84bf0baf77ca4e7461ef67a1ccb6d4029a95515e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_ra_opt.cpp @@ -0,0 +1,550 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "loop.h" +#include "aarch64_ra_opt.h" + +namespace maplebe { +using namespace std; +bool RaX0Opt::PropagateX0CanReplace(Operand *opnd, regno_t replaceReg) const +{ + if (opnd != nullptr) { + RegOperand *regopnd = static_cast(opnd); + regno_t regCandidate = regopnd->GetRegisterNumber(); + if (regCandidate == replaceReg) { + return true; + } + } + return false; +} + +/* + * Replace replace_reg with rename_reg. + * return true if there is a redefinition that needs to terminate the propagation. + */ +bool RaX0Opt::PropagateRenameReg(Insn *nInsn, const X0OptInfo &optVal) const +{ + uint32 renameReg = static_cast(optVal.GetRenameOpnd())->GetRegisterNumber(); + const InsnDesc *md = nInsn->GetDesc(); + int32 lastOpndId = static_cast(nInsn->GetOperandSize() - 1); + for (int32_t i = lastOpndId; i >= 0; i--) { + Operand &opnd = nInsn->GetOperand(static_cast(i)); + + if (opnd.IsList()) { + /* call parameters */ + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &memopnd = static_cast(opnd); + if (PropagateX0CanReplace(memopnd.GetBaseRegister(), optVal.GetReplaceReg())) { + RegOperand *renameOpnd = static_cast(optVal.GetRenameOpnd()); + memopnd.SetBaseRegister(*renameOpnd); + } + if (PropagateX0CanReplace(memopnd.GetIndexRegister(), optVal.GetReplaceReg())) { + RegOperand *renameOpnd = static_cast(optVal.GetRenameOpnd()); + memopnd.SetIndexRegister(*renameOpnd); + } + } else if (opnd.IsRegister()) { + bool isdef = (md->GetOpndDes(i))->IsRegDef(); + RegOperand ®opnd = static_cast(opnd); + regno_t regCandidate = regopnd.GetRegisterNumber(); + if (isdef) { + /* Continue if both replace_reg & rename_reg are not redefined. */ + if (regCandidate == optVal.GetReplaceReg() || regCandidate == renameReg) { + return true; + } + } else { + if (regCandidate == optVal.GetReplaceReg()) { + nInsn->SetOperand(static_cast(i), *optVal.GetRenameOpnd()); + } + } + } + } + return false; /* false == no redefinition */ +} + +/* Propagate x0 from a call return value to a def of x0. + * This eliminates some local reloads under high register pressure, since + * the use has been replaced by x0. + */ +bool RaX0Opt::PropagateX0DetectX0(const Insn *insn, X0OptInfo &optVal) const +{ + if (insn->GetMachineOpcode() != MOP_xmovrr && insn->GetMachineOpcode() != MOP_wmovrr) { + return false; + } + RegOperand &movSrc = static_cast(insn->GetOperand(1)); + if (movSrc.GetRegisterNumber() != R0) { + return false; + } + + optVal.SetMovSrc(&movSrc); + return true; +} + +bool RaX0Opt::PropagateX0DetectRedefine(const InsnDesc *md, const Insn *ninsn, const X0OptInfo &optVal, + uint32 index) const +{ + bool isdef = (md->GetOpndDes(static_cast(index)))->IsRegDef(); + if (isdef) { + RegOperand &opnd = static_cast(ninsn->GetOperand(index)); + if (opnd.GetRegisterNumber() == optVal.GetReplaceReg()) { + return true; + } + } + return false; +} + +bool RaX0Opt::PropagateX0Optimize(const BB *bb, const Insn *insn, X0OptInfo &optVal) +{ + bool redefined = false; + for (Insn *ninsn = insn->GetNext(); (ninsn != nullptr) && ninsn != bb->GetLastInsn()->GetNext(); + ninsn = ninsn->GetNext()) { + if (!ninsn->IsMachineInstruction()) { + continue; + } + + if (ninsn->IsCall()) { + break; + } + + /* Will continue as long as the reg being replaced is not redefined. + * Does not need to check for x0 redefinition. The mov instruction src + * being replaced already defines x0 and will terminate this loop. + */ + const InsnDesc *md = ninsn->GetDesc(); + for (uint32 i = 0; i < ninsn->GetDefRegs().size(); i++) { + redefined = PropagateX0DetectRedefine(md, ninsn, optVal, i); + if (redefined) { + break; + } + } + if (redefined) { + break; + } + + /* Look for move where src is the register equivalent to x0. */ + if (ninsn->GetMachineOpcode() != MOP_xmovrr && ninsn->GetMachineOpcode() != MOP_wmovrr) { + continue; + } + + Operand *src = &ninsn->GetOperand(1); + RegOperand *srcreg = static_cast(src); + if (srcreg->GetRegisterNumber() != optVal.GetReplaceReg()) { + continue; + } + + /* Setup for the next optmization pattern. */ + Operand *dst = &ninsn->GetOperand(0); + RegOperand *dstreg = static_cast(dst); + if (dstreg->GetRegisterNumber() != R0) { + /* This is to set up for further propagation later. */ + if (srcreg->GetRegisterNumber() == optVal.GetReplaceReg()) { + if (optVal.GetRenameInsn() != nullptr) { + redefined = true; + break; + } else { + optVal.SetRenameInsn(ninsn); + optVal.SetRenameOpnd(dst); + optVal.SetRenameReg(dstreg->GetRegisterNumber()); + } + } + continue; + } + + if (redefined) { + break; + } + + /* x0 = x0 */ + ninsn->SetOperand(1, *optVal.GetMovSrc()); + break; + } + + return redefined; +} + +bool RaX0Opt::PropagateX0ForCurrBb(BB *bb, const X0OptInfo &optVal) +{ + bool redefined = false; + for (Insn *ninsn = optVal.GetRenameInsn()->GetNext(); (ninsn != nullptr) && ninsn != bb->GetLastInsn()->GetNext(); + ninsn = ninsn->GetNext()) { + if (!ninsn->IsMachineInstruction()) { + continue; + } + redefined = PropagateRenameReg(ninsn, optVal); + if (redefined) { + break; + } + } + if (!redefined) { + auto it = bb->GetLiveOutRegNO().find(optVal.GetReplaceReg()); + if (it != bb->GetLiveOutRegNO().end()) { + bb->EraseLiveOutRegNO(it); + } + uint32 renameReg = static_cast(optVal.GetRenameOpnd())->GetRegisterNumber(); + bb->InsertLiveOutRegNO(renameReg); + } + return redefined; +} + +void RaX0Opt::PropagateX0ForNextBb(BB *nextBb, const X0OptInfo &optVal) +{ + bool redefined = false; + for (Insn *ninsn = nextBb->GetFirstInsn(); ninsn != nextBb->GetLastInsn()->GetNext(); ninsn = ninsn->GetNext()) { + if (!ninsn->IsMachineInstruction()) { + continue; + } + redefined = PropagateRenameReg(ninsn, optVal); + if (redefined) { + break; + } + } + if (!redefined) { + auto it = nextBb->GetLiveOutRegNO().find(optVal.GetReplaceReg()); + if (it != nextBb->GetLiveOutRegNO().end()) { + nextBb->EraseLiveOutRegNO(it); + } + uint32 renameReg = static_cast(optVal.GetRenameOpnd())->GetRegisterNumber(); + nextBb->InsertLiveOutRegNO(renameReg); + } +} + +/* + * Perform optimization. + * First propagate x0 in a bb. + * Second propagation see comment in function. + */ +void RaX0Opt::PropagateX0() +{ + FOR_ALL_BB(bb, cgFunc) { + X0OptInfo optVal; + + Insn *insn = bb->GetFirstInsn(); + while ((insn != nullptr) && !insn->IsMachineInstruction()) { + insn = insn->GetNext(); + continue; + } + if (insn == nullptr) { + continue; + } + if (!PropagateX0DetectX0(insn, optVal)) { + continue; + } + + /* At this point the 1st insn is a mov from x0. */ + RegOperand &movDst = static_cast(insn->GetOperand(0)); + optVal.SetReplaceReg(movDst.GetRegisterNumber()); + optVal.ResetRenameInsn(); + bool redefined = PropagateX0Optimize(bb, insn, optVal); + if (redefined || (optVal.GetRenameInsn() == nullptr)) { + continue; + } + + /* Next pattern to help LSRA. Short cross bb live interval. + * Straight line code. Convert reg2 into bb local. + * bb1 + * mov reg2 <- x0 => mov reg2 <- x0 + * mov reg1 <- reg2 mov reg1 <- reg2 + * call call + * bb2 : livein< reg1 reg2 > + * use reg2 use reg1 + * .... + * reg2 not liveout + * + * Can allocate caller register for reg2. + * + * Further propagation of very short live interval cross bb reg + */ + if (optVal.GetRenameReg() < kMaxRegNum) { /* dont propagate physical reg */ + continue; + } + BB *nextBb = bb->GetNext(); + if (nextBb == nullptr) { + break; + } + if (bb->GetSuccs().size() != 1 || nextBb->GetPreds().size() != 1) { + continue; + } + if (bb->GetSuccs().front() != nextBb || nextBb->GetPreds().front() != bb) { + continue; + } + if (bb->GetLiveOutRegNO().find(optVal.GetReplaceReg()) == bb->GetLiveOutRegNO().end() || + bb->GetLiveOutRegNO().find(optVal.GetRenameReg()) == bb->GetLiveOutRegNO().end() || + nextBb->GetLiveOutRegNO().find(optVal.GetReplaceReg()) != nextBb->GetLiveOutRegNO().end()) { + continue; + } + /* Replace replace_reg by rename_reg. */ + redefined = PropagateX0ForCurrBb(bb, optVal); + if (redefined) { + continue; + } + PropagateX0ForNextBb(nextBb, optVal); + } +} + +void VregRename::PrintRenameInfo(regno_t regno) const +{ + VregRenameInfo *info = (regno <= maxRegnoSeen) ? renameInfo[regno] : nullptr; + if (info == nullptr || (info->numDefs == 0 && info->numUses == 0)) { + return; + } + LogInfo::MapleLogger() << "reg: " << regno; + if (info->firstBBLevelSeen) { + LogInfo::MapleLogger() << " fromLevel " << info->firstBBLevelSeen->GetInternalFlag2(); + } + if (info->lastBBLevelSeen) { + LogInfo::MapleLogger() << " toLevel " << info->lastBBLevelSeen->GetInternalFlag2(); + } + if (info->numDefs) { + LogInfo::MapleLogger() << " defs " << info->numDefs; + } + if (info->numUses) { + LogInfo::MapleLogger() << " uses " << info->numUses; + } + if (info->numDefs) { + LogInfo::MapleLogger() << " innerDefs " << info->numInnerDefs; + } + if (info->numUses) { + LogInfo::MapleLogger() << " innerUses " << info->numInnerUses; + } + LogInfo::MapleLogger() << "\n"; +} + +void VregRename::PrintAllRenameInfo() const +{ + for (uint32 regno = 0; regno < cgFunc->GetMaxRegNum(); ++regno) { + PrintRenameInfo(regno); + } +} + +bool VregRename::IsProfitableToRename(const VregRenameInfo *info) const +{ + if ((info->numInnerDefs == 0) && (info->numUses != info->numInnerUses)) { + return true; + } + return false; +} + +void VregRename::RenameProfitableVreg(RegOperand *ropnd, const CGFuncLoops *loop) +{ + regno_t vreg = ropnd->GetRegisterNumber(); + VregRenameInfo *info = (vreg <= maxRegnoSeen) ? renameInfo[vreg] : nullptr; + if ((info == nullptr) || loop->GetMultiEntries().size() || (!IsProfitableToRename(info))) { + return; + } + + uint32 size = (ropnd->GetSize() == k64BitSize) ? k8ByteSize : k4ByteSize; + regno_t newRegno = cgFunc->NewVReg(ropnd->GetRegisterType(), size); + RegOperand *renameVreg = &cgFunc->CreateVirtualRegisterOperand(newRegno); + + const BB *header = loop->GetHeader(); + for (auto pred : header->GetPreds()) { + if (find(loop->GetBackedge().begin(), loop->GetBackedge().end(), pred) != loop->GetBackedge().end()) { + continue; + } + MOperator mOp = (ropnd->GetRegisterType() == kRegTyInt) ? ((size == k8BitSize) ? MOP_xmovrr : MOP_wmovrr) + : ((size == k8BitSize) ? MOP_xvmovd : MOP_xvmovs); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *renameVreg, *ropnd); + Insn *last = pred->GetLastInsn(); + if (last) { + if (last->IsBranch()) { + last->GetBB()->InsertInsnBefore(*last, newInsn); + } else { + last->GetBB()->InsertInsnAfter(*last, newInsn); + } + } else { + pred->AppendInsn(newInsn); + } + } + + for (auto bb : loop->GetLoopMembers()) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction()) { + continue; + } + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + Operand *opnd = &insn->GetOperand(i); + if (opnd->IsList()) { + /* call parameters */ + } else if (opnd->IsMemoryAccessOperand()) { + MemOperand *memopnd = static_cast(opnd); + RegOperand *base = static_cast(memopnd->GetBaseRegister()); + MemOperand *newMemOpnd = nullptr; + if (base != nullptr && base->IsVirtualRegister() && base->GetRegisterNumber() == vreg) { + newMemOpnd = static_cast(memopnd->Clone(*cgFunc->GetMemoryPool())); + newMemOpnd->SetBaseRegister(*renameVreg); + insn->SetOperand(i, *newMemOpnd); + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr && offset->IsVirtualRegister() && offset->GetRegisterNumber() == vreg) { + if (newMemOpnd == nullptr) { + newMemOpnd = static_cast(memopnd->Clone(*cgFunc->GetMemoryPool())); + } + newMemOpnd->SetIndexRegister(*renameVreg); + insn->SetOperand(i, *newMemOpnd); + } + } else if (opnd->IsRegister() && static_cast(opnd)->IsVirtualRegister() && + static_cast(opnd)->GetRegisterNumber() == vreg) { + insn->SetOperand(i, *renameVreg); + } + } + } + } +} + +void VregRename::RenameFindLoopVregs(const CGFuncLoops *loop) +{ + for (auto *bb : loop->GetLoopMembers()) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction()) { + continue; + } + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + Operand *opnd = &insn->GetOperand(i); + if (opnd->IsList()) { + /* call parameters */ + } else if (opnd->IsMemoryAccessOperand()) { + MemOperand *memopnd = static_cast(opnd); + RegOperand *base = static_cast(memopnd->GetBaseRegister()); + if (base != nullptr && base->IsVirtualRegister()) { + RenameProfitableVreg(base, loop); + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr && offset->IsVirtualRegister()) { + RenameProfitableVreg(offset, loop); + } + } else if (opnd->IsRegister() && static_cast(opnd)->IsVirtualRegister() && + static_cast(opnd)->GetRegisterNumber() != ccRegno) { + RenameProfitableVreg(static_cast(opnd), loop); + } + } + } + } +} + +/* Only the bb level is important, not the bb itself. + * So if multiple bbs have the same level, only one bb represents the level + */ +void VregRename::UpdateVregInfo(regno_t vreg, BB *bb, bool isInner, bool isDef) +{ + VregRenameInfo *info = renameInfo[vreg]; + if (info == nullptr) { + info = memPool->New(); + renameInfo[vreg] = info; + if (vreg > maxRegnoSeen) { + maxRegnoSeen = vreg; + } + } + if (isDef) { + info->numDefs++; + if (isInner) { + info->numInnerDefs++; + } + } else { + info->numUses++; + if (isInner) { + info->numInnerUses++; + } + } + if (info->firstBBLevelSeen) { + if (info->firstBBLevelSeen->GetInternalFlag2() > bb->GetInternalFlag2()) { + info->firstBBLevelSeen = bb; + } + } else { + info->firstBBLevelSeen = bb; + } + if (info->lastBBLevelSeen) { + if (info->lastBBLevelSeen->GetInternalFlag2() < bb->GetInternalFlag2()) { + info->lastBBLevelSeen = bb; + } + } else { + info->lastBBLevelSeen = bb; + } +} + +void VregRename::RenameGetFuncVregInfo() +{ + FOR_ALL_BB(bb, cgFunc) { + bool isInner = bb->GetLoop() ? bb->GetLoop()->GetInnerLoops().empty() : false; + FOR_BB_INSNS(insn, bb) { + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction()) { + continue; + } + const InsnDesc *md = insn->GetDesc(); + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + Operand *opnd = &insn->GetOperand(i); + if (opnd->IsList()) { + /* call parameters */ + } else if (opnd->IsMemoryAccessOperand()) { + MemOperand *memopnd = static_cast(opnd); + RegOperand *base = static_cast(memopnd->GetBaseRegister()); + if (base != nullptr && base->IsVirtualRegister()) { + regno_t vreg = base->GetRegisterNumber(); + UpdateVregInfo(vreg, bb, isInner, false); + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr && offset->IsVirtualRegister()) { + regno_t vreg = offset->GetRegisterNumber(); + UpdateVregInfo(vreg, bb, isInner, false); + } + } else if (opnd->IsRegister() && static_cast(opnd)->IsVirtualRegister() && + static_cast(opnd)->GetRegisterNumber() != ccRegno) { + bool isdef = (md->opndMD[i])->IsRegDef(); + regno_t vreg = static_cast(opnd)->GetRegisterNumber(); + UpdateVregInfo(vreg, bb, isInner, isdef); + } + } + } + } +} + +void VregRename::RenameFindVregsToRename(const CGFuncLoops *loop) +{ + if (loop->GetInnerLoops().empty()) { + RenameFindLoopVregs(loop); + return; + } + for (auto inner : loop->GetInnerLoops()) { + RenameFindVregsToRename(inner); + } +} + +void VregRename::VregLongLiveRename() +{ + if (cgFunc->GetLoops().size() == 0) { + return; + } + RenameGetFuncVregInfo(); + for (const auto *lp : cgFunc->GetLoops()) { + RenameFindVregsToRename(lp); + } +} + +void AArch64RaOpt::Run() +{ + RaX0Opt x0Opt(cgFunc); + x0Opt.PropagateX0(); + + if (cgFunc->GetMirModule().GetSrcLang() == kSrcLangC && CGOptions::DoVregRename()) { + /* loop detection considers EH bb. That is not handled. So C only for now. */ + LoopFinder *lf = memPool->New(*cgFunc, *memPool); + lf->FormLoopHierarchy(); + VregRename rename(cgFunc, memPool); + Bfs localBfs(*cgFunc, *memPool); + rename.bfs = &localBfs; + rename.bfs->ComputeBlockOrder(); + rename.VregLongLiveRename(); + cgFunc->ClearLoopInfo(); + } +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_reaching.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9cd461a373ce85db73ee608e19d6fd1421271d59 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -0,0 +1,1278 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_reaching.h" +#include "aarch64_cg.h" +namespace maplebe { +/* MCC_ClearLocalStackRef clear 1 stack slot, and MCC_DecRefResetPair clear 2 stack slot, + * the stack positins cleared are recorded in callInsn->clearStackOffset + */ +constexpr short kFirstClearMemIndex = 0; +constexpr short kSecondClearMemIndex = 1; + +/* insert pseudo insn for parameters definition */ +void AArch64ReachingDefinition::InitStartGen() +{ + BB *bb = cgFunc->GetFirstBB(); + + /* Parameters should be define first. */ + CCImpl &parmLocator = *static_cast(cgFunc)->GetOrCreateLocator(cgFunc->GetCurCallConvKind()); + CCLocInfo pLoc; + for (uint32 i = 0; i < cgFunc->GetFunction().GetFormalCount(); ++i) { + MIRType *type = cgFunc->GetFunction().GetNthParamType(i); + (void)parmLocator.LocateNextParm(*type, pLoc, i == 0, &cgFunc->GetFunction()); + if (pLoc.reg0 == 0) { + /* If is a large frame, parameter addressing mode is based vreg:Vra. */ + continue; + } + + uint64 symSize = cgFunc->GetBecommon().GetTypeSize(type->GetTypeIndex()); + if ((cgFunc->GetMirModule().GetSrcLang() == kSrcLangC) && (symSize > k8ByteSize)) { + /* For C structure passing in one or two registers. */ + symSize = k8ByteSize; + } + RegType regType = (pLoc.reg0 < V0) ? kRegTyInt : kRegTyFloat; + uint32 srcBitSize = ((symSize < k4ByteSize) ? k4ByteSize : symSize) * kBitsPerByte; + + MOperator mOp; + if (regType == kRegTyInt) { + if (srcBitSize <= k32BitSize) { + mOp = MOP_pseudo_param_def_w; + } else { + mOp = MOP_pseudo_param_def_x; + } + } else { + if (srcBitSize <= k32BitSize) { + mOp = MOP_pseudo_param_def_s; + } else { + mOp = MOP_pseudo_param_def_d; + } + } + + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + + RegOperand ®Opnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(pLoc.reg0), srcBitSize, regType); + Insn &pseudoInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd); + bb->InsertInsnBegin(pseudoInsn); + pseudoInsns.emplace_back(&pseudoInsn); + if (pLoc.reg1) { + RegOperand ®Opnd1 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(pLoc.reg1), + srcBitSize, regType); + Insn &pseudoInsn1 = cgFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd1); + bb->InsertInsnBegin(pseudoInsn1); + pseudoInsns.emplace_back(&pseudoInsn1); + } + if (pLoc.reg2) { + RegOperand ®Opnd2 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(pLoc.reg2), + srcBitSize, regType); + Insn &pseudoInsn1 = cgFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd2); + bb->InsertInsnBegin(pseudoInsn1); + pseudoInsns.emplace_back(&pseudoInsn1); + } + if (pLoc.reg3) { + RegOperand ®Opnd3 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(pLoc.reg3), + srcBitSize, regType); + Insn &pseudoInsn1 = cgFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd3); + bb->InsertInsnBegin(pseudoInsn1); + pseudoInsns.emplace_back(&pseudoInsn1); + } + + { + /* + * define memory address since store param may be transfered to stp and which with the short offset range. + * we can not get the correct definition before RA. + * example: + * add x8, sp, #712 + * stp x0, x1, [x8] // store param: _this Reg40_R313644 + * stp x2, x3, [x8,#16] // store param: Reg41_R333743 Reg42_R333622 + * stp x4, x5, [x8,#32] // store param: Reg43_R401297 Reg44_R313834 + * str x7, [x8,#48] // store param: Reg46_R401297 + */ + MIRSymbol *sym = cgFunc->GetFunction().GetFormal(i); + if (!sym->IsPreg()) { + MIRSymbol *firstSym = cgFunc->GetFunction().GetFormal(i); + const AArch64SymbolAlloc *firstSymLoc = + static_cast(cgFunc->GetMemlayout()->GetSymAllocInfo(firstSym->GetStIndex())); + int32 stOffset = cgFunc->GetBaseOffset(*firstSymLoc); + MIRType *firstType = cgFunc->GetFunction().GetNthParamType(i); + uint32 firstSymSize = cgFunc->GetBecommon().GetTypeSize(firstType->GetTypeIndex()); + uint32 firstStackSize = firstSymSize < k4ByteSize ? k4ByteSize : firstSymSize; + + MemOperand *memOpnd = aarchCGFunc->CreateStackMemOpnd(RFP, stOffset, firstStackSize * kBitsPerByte); + MOperator mopTemp = firstStackSize <= k4ByteSize ? MOP_pseudo_param_store_w : MOP_pseudo_param_store_x; + Insn &pseudoInsnTemp = cgFunc->GetInsnBuilder()->BuildInsn(mopTemp, *memOpnd); + bb->InsertInsnBegin(pseudoInsnTemp); + pseudoInsns.emplace_back(&pseudoInsnTemp); + } + } + } + + /* if function has "bl MCC_InitializeLocalStackRef", should define corresponding memory. */ + AArch64CGFunc *a64CGFunc = static_cast(cgFunc); + + for (uint32 i = 0; i < a64CGFunc->GetRefCount(); ++i) { + MemOperand *memOpnd = a64CGFunc->CreateStackMemOpnd( + RFP, static_cast(a64CGFunc->GetBeginOffset() + i * k8BitSize), k64BitSize); + Insn &pseudoInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_ref_init_x, *memOpnd); + + bb->InsertInsnBegin(pseudoInsn); + pseudoInsns.emplace_back(&pseudoInsn); + } +} + +/* insert pseudoInsns for ehBB, R0 and R1 are defined in pseudoInsns */ +void AArch64ReachingDefinition::InitEhDefine(BB &bb) +{ + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + + /* Insert MOP_pseudo_eh_def_x R1. */ + RegOperand ®Opnd1 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); + Insn &pseudoInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_eh_def_x, regOpnd1); + bb.InsertInsnBegin(pseudoInsn); + pseudoInsns.emplace_back(&pseudoInsn); + + /* insert MOP_pseudo_eh_def_x R0. */ + RegOperand ®Opnd2 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + Insn &newPseudoInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_eh_def_x, regOpnd2); + bb.InsertInsnBegin(newPseudoInsn); + pseudoInsns.emplace_back(&newPseudoInsn); +} + +/* insert pseudoInsns for return value R0/V0 */ +void AArch64ReachingDefinition::AddRetPseudoInsn(BB &bb) +{ + AArch64reg regNO = static_cast(cgFunc)->GetReturnRegisterNumber(); + if (regNO == kInvalidRegNO) { + return; + } + + if (regNO == R0) { + RegOperand ®Opnd = + static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand(regNO, k64BitSize, kRegTyInt); + Insn &retInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, regOpnd); + bb.AppendInsn(retInsn); + pseudoInsns.emplace_back(&retInsn); + } else if (regNO == V0) { + RegOperand ®Opnd = + static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand(regNO, k64BitSize, kRegTyFloat); + Insn &retInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_float, regOpnd); + bb.AppendInsn(retInsn); + pseudoInsns.emplace_back(&retInsn); + } +} + +void AArch64ReachingDefinition::AddRetPseudoInsns() +{ + uint32 exitBBSize = cgFunc->GetExitBBsVec().size(); + if (exitBBSize == 0) { + if (cgFunc->GetLastBB()->GetPrev()->GetFirstStmt() == cgFunc->GetCleanupLabel() && + cgFunc->GetLastBB()->GetPrev()->GetPrev()) { + AddRetPseudoInsn(*cgFunc->GetLastBB()->GetPrev()->GetPrev()); + } else { + AddRetPseudoInsn(*cgFunc->GetLastBB()->GetPrev()); + } + } else { + for (uint32 i = 0; i < exitBBSize; ++i) { + AddRetPseudoInsn(*cgFunc->GetExitBB(i)); + } + } +} + +void AArch64ReachingDefinition::GenAllAsmDefRegs(BB &bb, Insn &insn, uint32 index) +{ + for (auto reg : static_cast(insn.GetOperand(index)).GetOperands()) { + regGen[bb.GetId()]->SetBit(static_cast(reg)->GetRegisterNumber()); + } +} + +void AArch64ReachingDefinition::GenAllAsmUseRegs(BB &bb, Insn &insn, uint32 index) +{ + for (auto reg : static_cast(insn.GetOperand(index)).GetOperands()) { + regUse[bb.GetId()]->SetBit(static_cast(reg)->GetRegisterNumber()); + } +} + +/* all caller saved register are modified by call insn */ +void AArch64ReachingDefinition::GenAllCallerSavedRegs(BB &bb, Insn &insn) +{ + if (CGOptions::DoIPARA()) { + std::set callerSaveRegs; + cgFunc->GetRealCallerSaveRegs(insn, callerSaveRegs); + for (auto i : callerSaveRegs) { + regGen[bb.GetId()]->SetBit(i); + } + } else { + for (uint32 i = R0; i <= V31; ++i) { + if (AArch64Abi::IsCallerSaveReg(static_cast(i))) { + regGen[bb.GetId()]->SetBit(i); + } + } + } +} + +/* reg killed killed by call insn */ +bool AArch64ReachingDefinition::IsRegKilledByCallInsn(const Insn &insn, regno_t regNO) const +{ + if (CGOptions::DoIPARA()) { + std::set callerSaveRegs; + cgFunc->GetRealCallerSaveRegs(insn, callerSaveRegs); + return callerSaveRegs.find(regNO) != callerSaveRegs.end(); + } else { + return AArch64Abi::IsCallerSaveReg(static_cast(regNO)); + } +} + +bool AArch64ReachingDefinition::KilledByCallBetweenInsnInSameBB(const Insn &startInsn, const Insn &endInsn, + regno_t regNO) const +{ + DEBUG_ASSERT(startInsn.GetBB() == endInsn.GetBB(), "two insns must be in same bb"); + if (CGOptions::DoIPARA()) { + for (const Insn *insn = &startInsn; insn != endInsn.GetNext(); insn = insn->GetNext()) { + if (insn->IsMachineInstruction() && insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + return true; + } + } + return false; + } else { + return HasCallBetweenInsnInSameBB(startInsn, endInsn); + } +} +/* + * find definition for register between startInsn and endInsn. + * startInsn and endInsn is not in same BB + * make sure that in path between startBB and endBB there is no redefine. + */ +std::vector AArch64ReachingDefinition::FindRegDefBetweenInsnGlobal(uint32 regNO, Insn *startInsn, + Insn *endInsn) const +{ + DEBUG_ASSERT(startInsn->GetBB() != endInsn->GetBB(), "call FindRegDefBetweenInsn please"); + std::vector defInsnVec; + if (startInsn == nullptr || endInsn == nullptr) { + return defInsnVec; + } + /* check startBB */ + BB *startBB = startInsn->GetBB(); + std::vector startBBdefInsnVec = FindRegDefBetweenInsn(regNO, startInsn->GetNext(), startBB->GetLastInsn()); + if (startBBdefInsnVec.size() == 1) { + defInsnVec.emplace_back(*startBBdefInsnVec.begin()); + } + if (startBBdefInsnVec.size() > 1 || (startBBdefInsnVec.empty() && regOut[startBB->GetId()]->TestBit(regNO))) { + defInsnVec.emplace_back(startInsn); + defInsnVec.emplace_back(endInsn); + return defInsnVec; + } + if (IsCallerSavedReg(regNO) && startInsn->GetNext() != nullptr && + KilledByCallBetweenInsnInSameBB(*startInsn->GetNext(), *startBB->GetLastInsn(), regNO)) { + defInsnVec.emplace_back(startInsn); + defInsnVec.emplace_back(endInsn); + return defInsnVec; + } + /* check endBB */ + BB *endBB = endInsn->GetBB(); + std::vector endBBdefInsnVec = FindRegDefBetweenInsn(regNO, endBB->GetFirstInsn(), endInsn->GetPrev()); + if (endBBdefInsnVec.size() == 1) { + defInsnVec.emplace_back(*endBBdefInsnVec.begin()); + } + if (endBBdefInsnVec.size() > 1 || (endBBdefInsnVec.empty() && regIn[endBB->GetId()]->TestBit(regNO))) { + defInsnVec.emplace_back(startInsn); + defInsnVec.emplace_back(endInsn); + return defInsnVec; + } + if (IsCallerSavedReg(regNO) && endInsn->GetPrev() != nullptr && + KilledByCallBetweenInsnInSameBB(*endBB->GetFirstInsn(), *endInsn->GetPrev(), regNO)) { + defInsnVec.emplace_back(startInsn); + defInsnVec.emplace_back(endInsn); + return defInsnVec; + } + InsnSet defInsnSet; + std::vector visitedBB(kMaxBBNum, kNotVisited); + visitedBB[endBB->GetId()] = kNormalVisited; + visitedBB[startBB->GetId()] = kNormalVisited; + std::list pathStatus; + if (DFSFindRegInfoBetweenBB(*startBB, *endBB, regNO, visitedBB, pathStatus, kDumpRegIn)) { + defInsnVec.emplace_back(endInsn); + } + return defInsnVec; +} + +static bool IsRegInAsmList(Insn *insn, uint32 index, uint32 regNO, InsnSet &insnSet) +{ + for (auto reg : static_cast(insn->GetOperand(index)).GetOperands()) { + if (static_cast(reg)->GetRegisterNumber() == regNO) { + insnSet.insert(insn); + return true; + } + } + return false; +} + +void AArch64ReachingDefinition::FindRegDefInBB(uint32 regNO, BB &bb, InsnSet &defInsnSet) const +{ + if (!regGen[bb.GetId()]->TestBit(regNO)) { + return; + } + + FOR_BB_INSNS(insn, (&bb)) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->GetMachineOpcode() == MOP_asm) { + if (IsRegInAsmList(insn, kAsmOutputListOpnd, regNO, defInsnSet)) { + continue; + } + IsRegInAsmList(insn, kAsmClobberListOpnd, regNO, defInsnSet); + continue; + } + if (insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + (void)defInsnSet.insert(insn); + continue; + } + if (insn->IsRegDefined(regNO)) { + (void)defInsnSet.insert(insn); + } + } +} + +/* check whether call insn changed the stack status or not. */ +bool AArch64ReachingDefinition::CallInsnClearDesignateStackRef(const Insn &callInsn, int64 offset) const +{ + return offset == callInsn.GetClearStackOffset(kFirstClearMemIndex) || + offset == callInsn.GetClearStackOffset(kSecondClearMemIndex); +} + +/* + * find definition for stack memory operand between startInsn and endInsn. + * startInsn and endInsn must be in same BB and startInsn and endInsn are included + * special case: + * MCC_ClearLocalStackRef clear designate stack position, the designate stack position is thought defined + * for example: + * add x0, x29, #24 + * bl MCC_ClearLocalStackRef + */ +std::vector AArch64ReachingDefinition::FindMemDefBetweenInsn(uint32 offset, const Insn *startInsn, + Insn *endInsn) const +{ + std::vector defInsnVec; + if (startInsn == nullptr || endInsn == nullptr) { + return defInsnVec; + } + + DEBUG_ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + DEBUG_ASSERT(endInsn->GetId() >= startInsn->GetId(), "two insns must be in a same BB"); + if (!memGen[startInsn->GetBB()->GetId()]->TestBit(offset / kMemZoomSize)) { + return defInsnVec; + } + + for (Insn *insn = endInsn; insn != nullptr && insn != startInsn->GetPrev(); insn = insn->GetPrev()) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->GetMachineOpcode() == MOP_asm) { + if (insn->IsAsmModMem()) { + defInsnVec.emplace_back(insn); + return defInsnVec; + } + continue; + } + + if (insn->IsCall()) { + if (CallInsnClearDesignateStackRef(*insn, offset)) { + defInsnVec.emplace_back(insn); + return defInsnVec; + } + continue; + } + + if (!(insn->IsStore() || AArch64isa::IsPseudoInstruction(insn->GetMachineOpcode()))) { + continue; + } + + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + + if (base == nullptr || !IsFrameReg(*base) || index != nullptr) { + break; + } + + if (!insn->IsSpillInsn() && cgFunc->IsAfterRegAlloc()) { + break; + } + + DEBUG_ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + int64 memOffset = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + if ((offset == memOffset) || + (insn->IsStorePair() && offset == memOffset + GetEachMemSizeOfPair(insn->GetMachineOpcode()))) { + defInsnVec.emplace_back(insn); + return defInsnVec; + } + } + } + } + return defInsnVec; +} + +void AArch64ReachingDefinition::FindMemDefInBB(uint32 offset, BB &bb, InsnSet &defInsnSet) const +{ + if (!memGen[bb.GetId()]->TestBit(offset / kMemZoomSize)) { + return; + } + + FOR_BB_INSNS(insn, (&bb)) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->IsCall()) { + if (insn->GetMachineOpcode() == MOP_asm) { + if (insn->IsAsmModMem()) { + (void)defInsnSet.insert(insn); + } + continue; + } + if (CallInsnClearDesignateStackRef(*insn, offset)) { + (void)defInsnSet.insert(insn); + } + continue; + } + + if (!(insn->IsStore() || AArch64isa::IsPseudoInstruction(insn->GetMachineOpcode()))) { + continue; + } + + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + + if (base == nullptr || !IsFrameReg(*base) || index != nullptr) { + break; + } + + DEBUG_ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + int64 memOffset = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + if (offset == memOffset) { + (void)defInsnSet.insert(insn); + break; + } + if (insn->IsStorePair() && offset == memOffset + GetEachMemSizeOfPair(insn->GetMachineOpcode())) { + (void)defInsnSet.insert(insn); + break; + } + } + } + } +} + +/* + * find defininition for register Iteratively. + * input: + * startBB: find definnition starting from startBB + * regNO: the No of register to be find + * visitedBB: record these visited BB + * defInsnSet: insn defining register is saved in this set + */ +void AArch64ReachingDefinition::DFSFindDefForRegOpnd(const BB &startBB, uint32 regNO, + std::vector &visitedBB, InsnSet &defInsnSet) const +{ + std::vector defInsnVec; + for (auto predBB : startBB.GetPreds()) { + if (visitedBB[predBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[predBB->GetId()] = kNormalVisited; + if (regGen[predBB->GetId()]->TestBit(regNO) || (regNO == kRFLAG && predBB->HasCall())) { + defInsnVec.clear(); + defInsnVec = FindRegDefBetweenInsn(regNO, predBB->GetFirstInsn(), predBB->GetLastInsn()); + defInsnSet.insert(defInsnVec.begin(), defInsnVec.end()); + } else if (regIn[predBB->GetId()]->TestBit(regNO)) { + DFSFindDefForRegOpnd(*predBB, regNO, visitedBB, defInsnSet); + } + } + + for (auto predEhBB : startBB.GetEhPreds()) { + if (visitedBB[predEhBB->GetId()] == kEHVisited) { + continue; + } + visitedBB[predEhBB->GetId()] = kEHVisited; + if (regGen[predEhBB->GetId()]->TestBit(regNO) || (regNO == kRFLAG && predEhBB->HasCall())) { + FindRegDefInBB(regNO, *predEhBB, defInsnSet); + } + + if (regIn[predEhBB->GetId()]->TestBit(regNO)) { + DFSFindDefForRegOpnd(*predEhBB, regNO, visitedBB, defInsnSet); + } + } +} + +/* + * find defininition for stack memory iteratively. + * input: + * startBB: find definnition starting from startBB + * offset: the offset of memory to be find + * visitedBB: record these visited BB + * defInsnSet: insn defining register is saved in this set + */ +void AArch64ReachingDefinition::DFSFindDefForMemOpnd(const BB &startBB, uint32 offset, + std::vector &visitedBB, InsnSet &defInsnSet) const +{ + std::vector defInsnVec; + for (auto predBB : startBB.GetPreds()) { + if (visitedBB[predBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[predBB->GetId()] = kNormalVisited; + if (memGen[predBB->GetId()]->TestBit(offset / kMemZoomSize)) { + defInsnVec.clear(); + defInsnVec = FindMemDefBetweenInsn(offset, predBB->GetFirstInsn(), predBB->GetLastInsn()); + DEBUG_ASSERT(!defInsnVec.empty(), "opnd must be defined in this bb"); + defInsnSet.insert(defInsnVec.begin(), defInsnVec.end()); + } else if (memIn[predBB->GetId()]->TestBit(offset / kMemZoomSize)) { + DFSFindDefForMemOpnd(*predBB, offset, visitedBB, defInsnSet); + } + } + + for (auto predEhBB : startBB.GetEhPreds()) { + if (visitedBB[predEhBB->GetId()] == kEHVisited) { + continue; + } + visitedBB[predEhBB->GetId()] = kEHVisited; + if (memGen[predEhBB->GetId()]->TestBit(offset / kMemZoomSize)) { + FindMemDefInBB(offset, *predEhBB, defInsnSet); + } + + if (memIn[predEhBB->GetId()]->TestBit(offset / kMemZoomSize)) { + DFSFindDefForMemOpnd(*predEhBB, offset, visitedBB, defInsnSet); + } + } +} + +/* + * find defininition for register. + * input: + * insn: the insn in which register is used + * indexOrRegNO: the index of register in insn or the No of register to be find + * isRegNO: if indexOrRegNO is index, this argument is false, else is true + * return: + * the set of definition insns for register + */ +InsnSet AArch64ReachingDefinition::FindDefForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO) const +{ + uint32 regNO = indexOrRegNO; + if (!isRegNO) { + Operand &opnd = insn.GetOperand(indexOrRegNO); + auto ®Opnd = static_cast(opnd); + regNO = regOpnd.GetRegisterNumber(); + } + + std::vector defInsnVec; + if (regGen[insn.GetBB()->GetId()]->TestBit(regNO)) { + defInsnVec = FindRegDefBetweenInsn(regNO, insn.GetBB()->GetFirstInsn(), insn.GetPrev()); + } + InsnSet defInsnSet; + if (!defInsnVec.empty()) { + defInsnSet.insert(defInsnVec.begin(), defInsnVec.end()); + return defInsnSet; + } + std::vector visitedBB(kMaxBBNum, kNotVisited); + if (insn.GetBB()->IsCleanup()) { + DFSFindDefForRegOpnd(*insn.GetBB(), regNO, visitedBB, defInsnSet); + if (defInsnSet.empty()) { + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsCleanup()) { + continue; + } + if (regGen[bb->GetId()]->TestBit(regNO)) { + FindRegDefInBB(regNO, *bb, defInsnSet); + } + } + } + } else { + DFSFindDefForRegOpnd(*insn.GetBB(), regNO, visitedBB, defInsnSet); + } + return defInsnSet; +} + +bool AArch64ReachingDefinition::FindRegUseBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn, + BB *movBB) const +{ + if (startInsn == nullptr || endInsn == nullptr) { + return false; + } + if (startInsn->GetBB() == endInsn->GetBB()) { + if (startInsn->GetNextMachineInsn() == endInsn) { + return false; + } else { + return FindRegUsingBetweenInsn(regNO, startInsn->GetNextMachineInsn(), endInsn->GetPreviousMachineInsn()); + } + } else { + /* check Start BB */ + BB *startBB = startInsn->GetBB(); + if (FindRegUsingBetweenInsn(regNO, startInsn->GetNextMachineInsn(), startBB->GetLastInsn())) { + return true; + } + /* check End BB */ + BB *endBB = endInsn->GetBB(); + if (FindRegUsingBetweenInsn(regNO, endBB->GetFirstInsn(), endInsn->GetPreviousMachineInsn())) { + return true; + } + /* Global : startBB cannot dominate BB which it doesn't dominate before */ + if (startBB == movBB) { + return false; /* it will not change dominate */ + } + std::vector visitedBB(kMaxBBNum, kNotVisited); + visitedBB[movBB->GetId()] = kNormalVisited; + visitedBB[startBB->GetId()] = kNormalVisited; + if (DFSFindRegDomianBetweenBB(*startBB, regNO, visitedBB)) { + return true; + } + } + return false; +} + +bool AArch64ReachingDefinition::HasRegDefBetweenInsnGlobal(uint32 regNO, Insn &startInsn, Insn &endInsn) const +{ + CHECK_FATAL((startInsn.GetBB() != endInsn.GetBB()), "Is same BB!"); + /* check Start BB */ + BB *startBB = startInsn.GetBB(); + auto startInsnSet = FindRegDefBetweenInsn(regNO, startInsn.GetNext(), startBB->GetLastInsn()); + if (!startInsnSet.empty()) { + return true; + } + /* check End BB */ + BB *endBB = endInsn.GetBB(); + auto endInsnSet = FindRegDefBetweenInsn(regNO, endBB->GetFirstInsn(), endInsn.GetPrev()); + if (!endInsnSet.empty()) { + return true; + } + if (!startBB->GetSuccs().empty()) { + for (auto *succ : startBB->GetSuccs()) { + if (succ == endBB) { + return (!startInsnSet.empty() && !endInsnSet.empty()); + } + } + } + /* check bb Between start and end */ + std::vector visitedBB(kMaxBBNum, kNotVisited); + visitedBB[startBB->GetId()] = kNormalVisited; + visitedBB[endBB->GetId()] = kNormalVisited; + return DFSFindRegDefBetweenBB(*startBB, *endBB, regNO, visitedBB); +} + +bool AArch64ReachingDefinition::DFSFindRegDefBetweenBB(const BB &startBB, const BB &endBB, uint32 regNO, + std::vector &visitedBB) const +{ + if (&startBB == &endBB) { + return false; + } + for (auto succBB : startBB.GetSuccs()) { + if (visitedBB[succBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[succBB->GetId()] = kNormalVisited; + if (regGen[succBB->GetId()]->TestBit(regNO)) { + return true; + } + if (DFSFindRegDefBetweenBB(*succBB, endBB, regNO, visitedBB)) { + return true; + } + } + return false; +} + +bool AArch64ReachingDefinition::DFSFindRegDomianBetweenBB(const BB startBB, uint32 regNO, + std::vector &visitedBB) const +{ + for (auto succBB : startBB.GetSuccs()) { + if (visitedBB[succBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[succBB->GetId()] = kNormalVisited; + if (regIn[succBB->GetId()]->TestBit(regNO)) { + return true; + } else if (regGen[succBB->GetId()]->TestBit(regNO)) { + continue; + } + if (DFSFindRegDomianBetweenBB(*succBB, regNO, visitedBB)) { + return true; + } + } + CHECK_FATAL(startBB.GetEhSuccs().empty(), "C Module have no eh"); + return false; +} + +bool AArch64ReachingDefinition::DFSFindRegInfoBetweenBB(const BB startBB, const BB &endBB, uint32 regNO, + std::vector &visitedBB, + std::list &pathStatus, DumpType infoType) const +{ + for (auto succBB : startBB.GetSuccs()) { + if (succBB == &endBB) { + for (auto status : pathStatus) { + if (!status) { + return true; + } + } + continue; + } + if (visitedBB[succBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[succBB->GetId()] = kNormalVisited; + /* path is no clean check regInfo */ + bool isPathClean = true; + switch (infoType) { + case kDumpRegUse: { + isPathClean = !regUse[succBB->GetId()]->TestBit(regNO); + break; + } + case kDumpRegGen: { + isPathClean = !regGen[succBB->GetId()]->TestBit(regNO); + break; + } + case kDumpRegIn: { + isPathClean = !(regIn[succBB->GetId()]->TestBit(regNO) || regGen[succBB->GetId()]->TestBit(regNO)); + break; + } + default: + CHECK_FATAL(false, "NIY"); + } + pathStatus.emplace_back(isPathClean); + if (DFSFindRegInfoBetweenBB(*succBB, endBB, regNO, visitedBB, pathStatus, infoType)) { + return true; + } + pathStatus.pop_back(); + } + CHECK_FATAL(startBB.GetEhSuccs().empty(), "C Module have no eh"); + return false; +} + +bool AArch64ReachingDefinition::FindRegUsingBetweenInsn(uint32 regNO, Insn *startInsn, const Insn *endInsn) const +{ + if (startInsn == nullptr || endInsn == nullptr) { + return false; + } + + DEBUG_ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + for (Insn *insn = startInsn; insn != nullptr && insn != endInsn->GetNext(); insn = insn->GetNext()) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetMachineOpcode() == MOP_asm) { + InsnSet Temp; + if (IsRegInAsmList(insn, kAsmInputListOpnd, regNO, Temp) || + IsRegInAsmList(insn, kAsmOutputListOpnd, regNO, Temp)) { + return true; + } + continue; + } + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto listElem : listOpnd.GetOperands()) { + RegOperand *regOpnd = static_cast(listElem); + DEBUG_ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + return true; + } + } + continue; + } + + auto *regProp = md->opndMD[i]; + if (!regProp->IsUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + if ((base != nullptr && base->GetRegisterNumber() == regNO) || + (index != nullptr && index->GetRegisterNumber() == regNO)) { + return true; + } + } else if (opnd.IsConditionCode()) { + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + if (rflagReg.GetRegisterNumber() == regNO) { + return true; + } + } else if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == regNO)) { + return true; + } + } + } + return false; +} + +/* + * find insn using register between startInsn and endInsn. + * startInsn and endInsn must be in same BB and startInsn and endInsn are included + */ +bool AArch64ReachingDefinition::FindRegUseBetweenInsn(uint32 regNO, Insn *startInsn, Insn *endInsn, + InsnSet ®UseInsnSet) const +{ + bool findFinish = false; + if (startInsn == nullptr || endInsn == nullptr) { + return findFinish; + } + + DEBUG_ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + for (Insn *insn = startInsn; insn != nullptr && insn != endInsn->GetNext(); insn = insn->GetNext()) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetMachineOpcode() == MOP_asm) { + IsRegInAsmList(insn, kAsmInputListOpnd, regNO, regUseInsnSet); + if (IsRegInAsmList(insn, kAsmOutputListOpnd, regNO, regUseInsnSet)) { + break; + } + continue; + } + /* if insn is call and regNO is caller-saved register, then regNO will not be used later */ + if (insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + findFinish = true; + } + + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto listElem : listOpnd.GetOperands()) { + RegOperand *regOpnd = static_cast(listElem); + DEBUG_ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + (void)regUseInsnSet.insert(insn); + } + } + continue; + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *baseOpnd = memOpnd.GetBaseRegister(); + if (baseOpnd != nullptr && (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed()) && baseOpnd->GetRegisterNumber() == regNO) { + findFinish = true; + } + } + + auto *regProp = md->opndMD[i]; + if (regProp->IsDef() && opnd.IsRegister() && + (static_cast(opnd).GetRegisterNumber() == regNO)) { + findFinish = true; + } + + if (!regProp->IsUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + if ((base != nullptr && base->GetRegisterNumber() == regNO) || + (index != nullptr && index->GetRegisterNumber() == regNO)) { + (void)regUseInsnSet.insert(insn); + } + } else if (opnd.IsConditionCode()) { + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + if (rflagReg.GetRegisterNumber() == regNO) { + (void)regUseInsnSet.insert(insn); + } + } else if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == regNO)) { + (void)regUseInsnSet.insert(insn); + } + } + + if (findFinish) { + break; + } + } + return findFinish; +} + +/* + * find insn using stack memory operand between startInsn and endInsn. + * startInsn and endInsn must be in same BB and startInsn and endInsn are included + */ +bool AArch64ReachingDefinition::FindMemUseBetweenInsn(uint32 offset, Insn *startInsn, const Insn *endInsn, + InsnSet &memUseInsnSet) const +{ + bool findFinish = false; + if (startInsn == nullptr || endInsn == nullptr) { + return findFinish; + } + + DEBUG_ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + DEBUG_ASSERT(endInsn->GetId() >= startInsn->GetId(), "end ID must be greater than or equal to start ID"); + + for (Insn *insn = startInsn; insn != nullptr && insn != endInsn->GetNext(); insn = insn->GetNext()) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->IsCall()) { + if (insn->GetMachineOpcode() == MOP_asm) { + return true; + } + if (CallInsnClearDesignateStackRef(*insn, offset)) { + return true; + } + continue; + } + + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (!opnd.IsMemoryAccessOperand()) { + continue; + } + + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if (base == nullptr || !IsFrameReg(*base)) { + continue; + } + + DEBUG_ASSERT(memOpnd.GetIndexRegister() == nullptr, "offset must not be Register for frame MemOperand"); + DEBUG_ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + int64 memOffset = memOpnd.GetOffsetImmediate()->GetValue(); + + if (insn->IsStore() || AArch64isa::IsPseudoInstruction(insn->GetMachineOpcode())) { + if (memOffset == offset) { + findFinish = true; + continue; + } + if (insn->IsStorePair() && offset == memOffset + GetEachMemSizeOfPair(insn->GetMachineOpcode())) { + findFinish = true; + continue; + } + } + + if (!md->opndMD[i]->IsUse()) { + continue; + } + + if (offset == memOffset) { + (void)memUseInsnSet.insert(insn); + } else if (insn->IsLoadPair() && offset == memOffset + GetEachMemSizeOfPair(insn->GetMachineOpcode())) { + (void)memUseInsnSet.insert(insn); + } + } + + if (findFinish) { + break; + } + } + return findFinish; +} + +/* find all definition for stack memory operand insn.opnd[index] */ +InsnSet AArch64ReachingDefinition::FindDefForMemOpnd(Insn &insn, uint32 indexOrOffset, bool isOffset) const +{ + InsnSet defInsnSet; + int64 memOffSet = 0; + if (!isOffset) { + Operand &opnd = insn.GetOperand(indexOrOffset); + DEBUG_ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be MemOperand"); + + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *indexReg = memOpnd.GetIndexRegister(); + + if (base == nullptr || !IsFrameReg(*base) || indexReg) { + return defInsnSet; + } + DEBUG_ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + memOffSet = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + } else { + memOffSet = indexOrOffset; + } + std::vector defInsnVec; + if (memGen[insn.GetBB()->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + defInsnVec = FindMemDefBetweenInsn(memOffSet, insn.GetBB()->GetFirstInsn(), insn.GetPrev()); + } + + if (!defInsnVec.empty()) { + defInsnSet.insert(defInsnVec.begin(), defInsnVec.end()); + return defInsnSet; + } + std::vector visitedBB(kMaxBBNum, kNotVisited); + if (insn.GetBB()->IsCleanup()) { + DFSFindDefForMemOpnd(*insn.GetBB(), memOffSet, visitedBB, defInsnSet); + if (defInsnSet.empty()) { + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsCleanup()) { + continue; + } + + if (memGen[bb->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + FindMemDefInBB(memOffSet, *bb, defInsnSet); + } + } + } + } else { + DFSFindDefForMemOpnd(*insn.GetBB(), memOffSet, visitedBB, defInsnSet); + } + + return defInsnSet; +} + +/* + * find all insn using stack memory operand insn.opnd[index] + * secondMem is used to represent the second stack memory opernad in store pair insn + */ +InsnSet AArch64ReachingDefinition::FindUseForMemOpnd(Insn &insn, uint8 index, bool secondMem) const +{ + Operand &opnd = insn.GetOperand(index); + DEBUG_ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be MemOperand"); + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + + InsnSet useInsnSet; + if (base == nullptr || !IsFrameReg(*base)) { + return useInsnSet; + } + + DEBUG_ASSERT(memOpnd.GetIndexRegister() == nullptr, "IndexRegister no nullptr"); + DEBUG_ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + int64 memOffSet = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + if (secondMem) { + DEBUG_ASSERT(insn.IsStorePair(), "second MemOperand can only be defined in stp insn"); + memOffSet += GetEachMemSizeOfPair(insn.GetMachineOpcode()); + } + /* memOperand may be redefined in current BB */ + bool findFinish = FindMemUseBetweenInsn(memOffSet, insn.GetNext(), insn.GetBB()->GetLastInsn(), useInsnSet); + std::vector visitedBB(kMaxBBNum, false); + if (findFinish || !memOut[insn.GetBB()->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + if (insn.GetBB()->GetEhSuccs().size() != 0) { + DFSFindUseForMemOpnd(*insn.GetBB(), memOffSet, visitedBB, useInsnSet, true); + } + } else { + DFSFindUseForMemOpnd(*insn.GetBB(), memOffSet, visitedBB, useInsnSet, false); + } + if (!insn.GetBB()->IsCleanup() && firstCleanUpBB) { + if (memUse[firstCleanUpBB->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + findFinish = FindMemUseBetweenInsn(memOffSet, firstCleanUpBB->GetFirstInsn(), firstCleanUpBB->GetLastInsn(), + useInsnSet); + if (findFinish || + !memOut[firstCleanUpBB->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + return useInsnSet; + } + } + DFSFindUseForMemOpnd(*firstCleanUpBB, memOffSet, visitedBB, useInsnSet, false); + } + return useInsnSet; +} + +/* + * initialize bb.gen and bb.use + * if it is not computed in first time, bb.gen and bb.use must be cleared firstly + */ +void AArch64ReachingDefinition::InitGenUse(BB &bb, bool firstTime) +{ + if (!firstTime && (mode & kRDRegAnalysis)) { + regGen[bb.GetId()]->ResetAllBit(); + regUse[bb.GetId()]->ResetAllBit(); + } + if (!firstTime && (mode & kRDMemAnalysis)) { + memGen[bb.GetId()]->ResetAllBit(); + memUse[bb.GetId()]->ResetAllBit(); + } + + if (bb.IsEmpty()) { + return; + } + + FOR_BB_INSNS(insn, (&bb)) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->GetMachineOpcode() == MOP_asm) { + GenAllAsmDefRegs(bb, *insn, kAsmOutputListOpnd); + GenAllAsmDefRegs(bb, *insn, kAsmClobberListOpnd); + GenAllAsmUseRegs(bb, *insn, kAsmInputListOpnd); + continue; + } + if (insn->IsCall() || insn->IsTailCall()) { + GenAllCallerSavedRegs(bb, *insn); + InitMemInfoForClearStackCall(*insn); + } + + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + auto *regProp = md->opndMD[i]; + if (opnd.IsList() && (mode & kRDRegAnalysis)) { + DEBUG_ASSERT(regProp->IsUse(), "ListOperand is used in insn"); + InitInfoForListOpnd(bb, opnd); + } else if (opnd.IsMemoryAccessOperand()) { + InitInfoForMemOperand(*insn, opnd, regProp->IsDef()); + } else if (opnd.IsConditionCode() && (mode & kRDRegAnalysis)) { + DEBUG_ASSERT(regProp->IsUse(), "condition code is used in insn"); + InitInfoForConditionCode(bb); + } else if (opnd.IsRegister() && (mode & kRDRegAnalysis)) { + InitInfoForRegOpnd(bb, opnd, regProp->IsDef()); + } + } + } +} + +void AArch64ReachingDefinition::InitMemInfoForClearStackCall(Insn &callInsn) +{ + if (!(mode & kRDMemAnalysis) || !callInsn.IsClearDesignateStackCall()) { + return; + } + int64 firstOffset = callInsn.GetClearStackOffset(kFirstClearMemIndex); + constexpr int64 defaultValOfClearMemOffset = -1; + if (firstOffset != defaultValOfClearMemOffset) { + memGen[callInsn.GetBB()->GetId()]->SetBit(firstOffset / kMemZoomSize); + } + int64 secondOffset = callInsn.GetClearStackOffset(kSecondClearMemIndex); + if (secondOffset != defaultValOfClearMemOffset) { + memGen[callInsn.GetBB()->GetId()]->SetBit(static_cast(secondOffset / kMemZoomSize)); + } +} + +void AArch64ReachingDefinition::InitInfoForMemOperand(Insn &insn, Operand &opnd, bool isDef) +{ + DEBUG_ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be MemOperand"); + MemOperand &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + + if (base == nullptr) { + return; + } + if ((mode & kRDMemAnalysis) && IsFrameReg(*base)) { + if (index != nullptr) { + SetAnalysisMode(kRDRegAnalysis); + return; + } + CHECK_FATAL(index == nullptr, "Existing [x29 + index] Memory Address"); + DEBUG_ASSERT(memOpnd.GetOffsetImmediate(), "offset must be a immediate value"); + int64 offsetVal = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + if ((offsetVal % kMemZoomSize) != 0) { + SetAnalysisMode(kRDRegAnalysis); + } + + if (!isDef) { + memUse[insn.GetBB()->GetId()]->SetBit(offsetVal / kMemZoomSize); + if (insn.IsLoadPair()) { + int64 nextMemOffset = offsetVal + GetEachMemSizeOfPair(insn.GetMachineOpcode()); + memUse[insn.GetBB()->GetId()]->SetBit(nextMemOffset / kMemZoomSize); + } + } else if (isDef) { + memGen[insn.GetBB()->GetId()]->SetBit(offsetVal / kMemZoomSize); + if (insn.IsStorePair()) { + int64 nextMemOffset = offsetVal + GetEachMemSizeOfPair(insn.GetMachineOpcode()); + memGen[insn.GetBB()->GetId()]->SetBit(nextMemOffset / kMemZoomSize); + } + } + } + + if ((mode & kRDRegAnalysis) != 0) { + regUse[insn.GetBB()->GetId()]->SetBit(base->GetRegisterNumber()); + if (index != nullptr) { + regUse[insn.GetBB()->GetId()]->SetBit(index->GetRegisterNumber()); + } + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { + /* Base operand has changed. */ + regGen[insn.GetBB()->GetId()]->SetBit(base->GetRegisterNumber()); + } + } +} + +void AArch64ReachingDefinition::InitInfoForListOpnd(const BB &bb, Operand &opnd) +{ + ListOperand *listOpnd = static_cast(&opnd); + for (auto listElem : listOpnd->GetOperands()) { + RegOperand *regOpnd = static_cast(listElem); + DEBUG_ASSERT(regOpnd != nullptr, "used Operand in call insn must be Register"); + regUse[bb.GetId()]->SetBit(regOpnd->GetRegisterNumber()); + } +} + +void AArch64ReachingDefinition::InitInfoForConditionCode(const BB &bb) +{ + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + regUse[bb.GetId()]->SetBit(rflagReg.GetRegisterNumber()); +} + +void AArch64ReachingDefinition::InitInfoForRegOpnd(const BB &bb, Operand &opnd, bool isDef) +{ + RegOperand *regOpnd = static_cast(&opnd); + if (!isDef) { + regUse[bb.GetId()]->SetBit(regOpnd->GetRegisterNumber()); + } else { + regGen[bb.GetId()]->SetBit(regOpnd->GetRegisterNumber()); + } +} + +int32 AArch64ReachingDefinition::GetStackSize() const +{ + const int sizeofFplr = kDivide2 * kIntregBytelen; + return static_cast(static_cast(cgFunc->GetMemlayout())->RealStackFrameSize() + + sizeofFplr); +} + +bool AArch64ReachingDefinition::IsCallerSavedReg(uint32 regNO) const +{ + return AArch64Abi::IsCallerSaveReg(static_cast(regNO)); +} + +int64 AArch64ReachingDefinition::GetEachMemSizeOfPair(MOperator opCode) const +{ + switch (opCode) { + case MOP_wstp: + case MOP_sstp: + case MOP_wstlxp: + case MOP_wldp: + case MOP_xldpsw: + case MOP_sldp: + case MOP_wldaxp: + return kWordByteNum; + case MOP_xstp: + case MOP_dstp: + case MOP_xstlxp: + case MOP_xldp: + case MOP_dldp: + case MOP_xldaxp: + return kDoubleWordByteNum; + default: + return 0; + } +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0df45e65c64066cf360245cd19db23ee03a555c0 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp @@ -0,0 +1,443 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_reg_coalesce.h" +#include "cg.h" +#include "cg_option.h" +#include "aarch64_isa.h" +#include "aarch64_insn.h" +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" + +/* + * This phase implements if-conversion optimization, + * which tries to convert conditional branches into cset/csel instructions + */ +namespace maplebe { + +#define REGCOAL_DUMP CG_DEBUG_FUNC(*cgFunc) + +bool AArch64LiveIntervalAnalysis::IsUnconcernedReg(const RegOperand ®Opnd) const +{ + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return true; + } + if (regOpnd.GetRegisterNumber() == RZR) { + return true; + } + if (!regOpnd.IsVirtualRegister()) { + return true; + } + return false; +} + +LiveInterval *AArch64LiveIntervalAnalysis::GetOrCreateLiveInterval(regno_t regNO) +{ + LiveInterval *lr = GetLiveInterval(regNO); + if (lr == nullptr) { + lr = memPool->New(alloc); + vregIntervals[regNO] = lr; + lr->SetRegNO(regNO); + } + return lr; +} + +void AArch64LiveIntervalAnalysis::UpdateCallInfo() +{ + for (auto vregNO : vregLive) { + LiveInterval *lr = GetLiveInterval(vregNO); + if (lr == nullptr) { + return; + } + lr->IncNumCall(); + } +} + +void AArch64LiveIntervalAnalysis::SetupLiveIntervalByOp(Operand &op, Insn &insn, bool isDef) +{ + if (!op.IsRegister()) { + return; + } + auto ®Opnd = static_cast(op); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (IsUnconcernedReg(regOpnd)) { + return; + } + LiveInterval *lr = GetOrCreateLiveInterval(regNO); + uint32 point = isDef ? insn.GetId() : (insn.GetId() - 1); + lr->AddRange(insn.GetBB()->GetId(), point, vregLive.find(regNO) != vregLive.end()); + if (lr->GetRegType() == kRegTyUndef) { + lr->SetRegType(regOpnd.GetRegisterType()); + } + if (candidates.find(regNO) != candidates.end()) { + lr->AddRefPoint(&insn, isDef); + } + if (isDef) { + vregLive.erase(regNO); + } else { + vregLive.insert(regNO); + } +} + +void AArch64LiveIntervalAnalysis::ComputeLiveIntervalsForEachDefOperand(Insn &insn) +{ + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.GetMachineOpcode() == MOP_asm && (i == kAsmOutputListOpnd || i == kAsmClobberListOpnd)) { + for (auto opnd : static_cast(insn.GetOperand(i)).GetOperands()) { + SetupLiveIntervalByOp(*static_cast(opnd), insn, true); + } + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + if (!memOpnd.IsIntactIndexed()) { + SetupLiveIntervalByOp(opnd, insn, true); + } + } + if (!md->GetOpndDes(i)->IsRegDef()) { + continue; + } + SetupLiveIntervalByOp(opnd, insn, true); + } +} + +void AArch64LiveIntervalAnalysis::ComputeLiveIntervalsForEachUseOperand(Insn &insn) +{ + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.GetMachineOpcode() == MOP_asm && i == kAsmInputListOpnd) { + for (auto opnd : static_cast(insn.GetOperand(i)).GetOperands()) { + SetupLiveIntervalByOp(*static_cast(opnd), insn, false); + } + continue; + } + if (md->GetOpndDes(i)->IsRegDef() && !md->GetOpndDes(i)->IsRegUse()) { + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + SetupLiveIntervalByOp(*op, insn, false); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + SetupLiveIntervalByOp(*base, insn, false); + } + if (offset != nullptr) { + SetupLiveIntervalByOp(*offset, insn, false); + } + } else if (opnd.IsPhi()) { + auto &phiOpnd = static_cast(opnd); + for (auto opIt : phiOpnd.GetOperands()) { + SetupLiveIntervalByOp(*opIt.second, insn, false); + } + } else { + SetupLiveIntervalByOp(opnd, insn, false); + } + } +} + +/* handle live range for bb->live_out */ +void AArch64LiveIntervalAnalysis::SetupLiveIntervalInLiveOut(regno_t liveOut, const BB &bb, uint32 currPoint) +{ + --currPoint; + + if (liveOut >= kAllRegNum) { + (void)vregLive.insert(liveOut); + LiveInterval *lr = GetOrCreateLiveInterval(liveOut); + if (lr == nullptr) { + return; + } + lr->AddRange(bb.GetId(), currPoint, false); + return; + } +} + +void AArch64LiveIntervalAnalysis::CollectCandidate() +{ + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + + FOR_BB_INSNS_SAFE(insn, bb, ninsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (IsRegistersCopy(*insn)) { + RegOperand ®Dest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + RegOperand ®Src = static_cast(insn->GetOperand(kInsnSecondOpnd)); + if (regDest.GetRegisterNumber() == regSrc.GetRegisterNumber()) { + continue; + } + if (regDest.IsVirtualRegister()) { + candidates.insert(regDest.GetRegisterNumber()); + } + if (regSrc.IsVirtualRegister()) { + candidates.insert(regSrc.GetRegisterNumber()); + } + } + } + } +} + +bool AArch64LiveIntervalAnalysis::IsRegistersCopy(Insn &insn) +{ + MOperator mOp = insn.GetMachineOpcode(); + if (mOp == MOP_xmovrr || mOp == MOP_wmovrr || mOp == MOP_xvmovs || mOp == MOP_xvmovd) { + return true; + } + return false; +} + +void AArch64LiveIntervalAnalysis::ComputeLiveIntervals() +{ + /* colloct refpoints and build interfere only for cands. */ + CollectCandidate(); + + uint32 currPoint = + static_cast(cgFunc->GetTotalNumberOfInstructions()) + static_cast(bfs->sortedBBs.size()); + /* distinguish use/def */ + CHECK_FATAL(currPoint < (INT_MAX >> 2), "integer overflow check"); + currPoint = currPoint << 2; + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + + vregLive.clear(); + for (auto liveOut : bb->GetLiveOutRegNO()) { + SetupLiveIntervalInLiveOut(liveOut, *bb, currPoint); + } + --currPoint; + + if (bb->GetLastInsn() != nullptr && bb->GetLastInsn()->IsMachineInstruction() && bb->GetLastInsn()->IsCall()) { + UpdateCallInfo(); + } + + FOR_BB_INSNS_REV_SAFE(insn, bb, ninsn) { + if (!runAnalysis) { + insn->SetId(currPoint); + } + if (!insn->IsMachineInstruction() && !insn->IsPhi()) { + --currPoint; + if (ninsn != nullptr && ninsn->IsMachineInstruction() && ninsn->IsCall()) { + UpdateCallInfo(); + } + continue; + } + + ComputeLiveIntervalsForEachDefOperand(*insn); + ComputeLiveIntervalsForEachUseOperand(*insn); + + if (ninsn != nullptr && ninsn->IsMachineInstruction() && ninsn->IsCall()) { + UpdateCallInfo(); + } + + /* distinguish use/def */ + currPoint -= 2; + } + for (auto lin : bb->GetLiveInRegNO()) { + if (lin >= kAllRegNum) { + LiveInterval *li = GetLiveInterval(lin); + if (li != nullptr) { + li->AddRange(bb->GetId(), currPoint, currPoint); + } + } + } + /* move one more step for each BB */ + --currPoint; + } + + if (REGCOAL_DUMP) { + LogInfo::MapleLogger() << "\nAfter ComputeLiveIntervals\n"; + Dump(); + } +} + +void AArch64LiveIntervalAnalysis::CheckInterference(LiveInterval &li1, LiveInterval &li2) const +{ + auto ranges1 = li1.GetRanges(); + auto ranges2 = li2.GetRanges(); + bool conflict = false; + for (auto range : ranges1) { + auto bbid = range.first; + auto posVec1 = range.second; + auto it = ranges2.find(bbid); + if (it == ranges2.end()) { + continue; + } else { + /* check overlap */ + auto posVec2 = it->second; + for (auto pos1 : posVec1) { + for (auto pos2 : posVec2) { + if (!((pos1.first < pos2.first && pos1.second < pos2.first) || + (pos2.first < pos1.second && pos2.second < pos1.first))) { + conflict = true; + break; + } + } + } + } + } + if (conflict) { + li1.AddConflict(li2.GetRegNO()); + li2.AddConflict(li1.GetRegNO()); + } + return; +} + +/* replace regDest with regSrc. */ +void AArch64LiveIntervalAnalysis::CoalesceRegPair(RegOperand ®Dest, RegOperand ®Src) +{ + LiveInterval *lrDest = GetLiveInterval(regDest.GetRegisterNumber()); + if (lrDest == nullptr) { + return; + } + LiveInterval *lrSrc = GetLiveInterval(regSrc.GetRegisterNumber()); + /* replace dest with src */ + if (regDest.GetSize() != regSrc.GetSize()) { + CHECK_FATAL(cgFunc->IsExtendReg(regDest.GetRegisterNumber()) || cgFunc->IsExtendReg(regSrc.GetRegisterNumber()), + "expect equal size in reg coalesce"); + cgFunc->InsertExtendSet(regSrc.GetRegisterNumber()); + } + + regno_t destNO = regDest.GetRegisterNumber(); + /* replace all refPoints */ + for (auto insn : lrDest->GetDefPoint()) { + cgFunc->ReplaceOpndInInsn(regDest, regSrc, *insn, destNO); + } + for (auto insn : lrDest->GetUsePoint()) { + cgFunc->ReplaceOpndInInsn(regDest, regSrc, *insn, destNO); + } + + DEBUG_ASSERT(lrDest && lrSrc, "get live interval failed"); + CoalesceLiveIntervals(*lrDest, *lrSrc); +} + +void AArch64LiveIntervalAnalysis::CollectMoveForEachBB(BB &bb, std::vector &movInsns) const +{ + FOR_BB_INSNS_SAFE(insn, &bb, ninsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (IsRegistersCopy(*insn)) { + auto ®Dest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + auto ®Src = static_cast(insn->GetOperand(kInsnSecondOpnd)); + if (!regSrc.IsVirtualRegister() || !regDest.IsVirtualRegister()) { + continue; + } + if (regSrc.GetRegisterNumber() == regDest.GetRegisterNumber()) { + continue; + } + movInsns.emplace_back(insn); + } + } +} + +void AArch64LiveIntervalAnalysis::CoalesceMoves(std::vector &movInsns, bool phiOnly) +{ + AArch64CGFunc *a64CGFunc = static_cast(cgFunc); + bool changed = false; + do { + changed = false; + for (auto insn : movInsns) { + RegOperand ®Dest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + RegOperand ®Src = static_cast(insn->GetOperand(kInsnSecondOpnd)); + if (regSrc.GetRegisterNumber() == regDest.GetRegisterNumber()) { + continue; + } + if (!insn->IsPhiMovInsn() && phiOnly) { + continue; + } + if (a64CGFunc->IsRegRematCand(regDest) != a64CGFunc->IsRegRematCand(regSrc)) { + if (insn->IsPhiMovInsn()) { + a64CGFunc->ClearRegRematInfo(regDest); + a64CGFunc->ClearRegRematInfo(regSrc); + } else { + continue; + } + } + if (a64CGFunc->IsRegRematCand(regDest) && a64CGFunc->IsRegRematCand(regSrc) && + !a64CGFunc->IsRegSameRematInfo(regDest, regSrc)) { + if (insn->IsPhiMovInsn()) { + a64CGFunc->ClearRegRematInfo(regDest); + a64CGFunc->ClearRegRematInfo(regSrc); + } else { + continue; + } + } + LiveInterval *li1 = GetLiveInterval(regDest.GetRegisterNumber()); + LiveInterval *li2 = GetLiveInterval(regSrc.GetRegisterNumber()); + if (li1 == nullptr || li2 == nullptr) { + return; + } + CheckInterference(*li1, *li2); + if (!li1->IsConflictWith(regSrc.GetRegisterNumber()) || + (li1->GetDefPoint().size() == 1 && li2->GetDefPoint().size() == 1)) { + if (REGCOAL_DUMP) { + LogInfo::MapleLogger() << "try to coalesce: " << regDest.GetRegisterNumber() << " <- " + << regSrc.GetRegisterNumber() << std::endl; + } + CoalesceRegPair(regDest, regSrc); + changed = true; + } else { + if (insn->IsPhiMovInsn() && phiOnly && REGCOAL_DUMP) { + LogInfo::MapleLogger() << "fail to coalesce: " << regDest.GetRegisterNumber() << " <- " + << regSrc.GetRegisterNumber() << std::endl; + } + } + } + } while (changed); +} + +void AArch64LiveIntervalAnalysis::CoalesceRegisters() +{ + std::vector movInsns; + AArch64CGFunc *a64CGFunc = static_cast(cgFunc); + if (REGCOAL_DUMP) { + cgFunc->DumpCFGToDot("regcoal-"); + LogInfo::MapleLogger() << "handle function: " << a64CGFunc->GetFunction().GetName() << std::endl; + } + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + + if (!bb->GetCritical()) { + continue; + } + CollectMoveForEachBB(*bb, movInsns); + } + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + + if (bb->GetCritical()) { + continue; + } + CollectMoveForEachBB(*bb, movInsns); + } + + /* handle phi move first. */ + CoalesceMoves(movInsns, true); + + /* clean up dead mov */ + a64CGFunc->CleanupDeadMov(REGCOAL_DUMP); +} + +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_reg_info.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_reg_info.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f36d982d6f8a69d5409038ddab0eb43cd661efb6 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_reg_info.cpp @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" +#include "becommon.h" + +namespace maplebe { +using namespace maple; + +void AArch64RegInfo::Init() +{ + for (regno_t regNO = kRinvalid; regNO < kMaxRegNum; ++regNO) { + /* when yieldpoint is enabled, x19 is reserved. */ + if (IsYieldPointReg(regNO)) { + continue; + } + if (regNO == R29 && !GetCurrFunction()->UseFP()) { + AddToAllRegs(regNO); + continue; + } + if (!AArch64Abi::IsAvailableReg(static_cast(regNO))) { + continue; + } + if (AArch64isa::IsGPRegister(static_cast(regNO))) { + AddToIntRegs(regNO); + } else { + AddToFpRegs(regNO); + } + AddToAllRegs(regNO); + } + return; +} + +void AArch64RegInfo::Fini() +{ + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + a64CGFunc->AddtoCalleeSaved(RFP); + a64CGFunc->AddtoCalleeSaved(RLR); + a64CGFunc->NoteFPLRAddedToCalleeSavedList(); +} + +void AArch64RegInfo::SaveCalleeSavedReg(MapleSet savedRegs) +{ + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + for (auto reg : savedRegs) { + a64CGFunc->AddtoCalleeSaved(static_cast(reg)); + } +} + +bool AArch64RegInfo::IsSpecialReg(regno_t regno) const +{ + AArch64reg reg = static_cast(regno); + if ((reg == RLR) || (reg == RSP)) { + return true; + } + + /* when yieldpoint is enabled, the dedicated register can not be allocated. */ + if (IsYieldPointReg(reg)) { + return true; + } + + return false; +} +bool AArch64RegInfo::IsSpillRegInRA(regno_t regNO, bool has3RegOpnd) +{ + return AArch64Abi::IsSpillRegInRA(static_cast(regNO), has3RegOpnd); +} +bool AArch64RegInfo::IsCalleeSavedReg(regno_t regno) const +{ + return AArch64Abi::IsCalleeSavedReg(static_cast(regno)); +} +bool AArch64RegInfo::IsYieldPointReg(regno_t regno) const +{ + /* when yieldpoint is enabled, x19 is reserved. */ + if (GetCurrFunction()->GetCG()->GenYieldPoint()) { + return (static_cast(regno) == RYP); + } + return false; +} +bool AArch64RegInfo::IsUnconcernedReg(regno_t regNO) const +{ + /* RFP = 32, RLR = 31, RSP = 33, RZR = 34, ccReg */ + if ((regNO >= RLR && regNO <= RZR) || regNO == RFP) { + return true; + } + + /* when yieldpoint is enabled, the RYP(x19) can not be used. */ + if (IsYieldPointReg(regNO)) { + return true; + } + return false; +} + +bool AArch64RegInfo::IsUnconcernedReg(const RegOperand ®Opnd) const +{ + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return true; + } + uint32 regNO = regOpnd.GetRegisterNumber(); + if (regNO == RZR) { + return true; + } + return IsUnconcernedReg(regNO); +} + +RegOperand *AArch64RegInfo::GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, maplebe::RegType kind, uint32 flag) +{ + AArch64CGFunc *aarch64CgFunc = static_cast(GetCurrFunction()); + return &aarch64CgFunc->GetOrCreatePhysicalRegisterOperand(static_cast(regNO), size, kind, flag); +} + +ListOperand *AArch64RegInfo::CreateListOperand() +{ + AArch64CGFunc *aarch64CgFunc = static_cast(GetCurrFunction()); + return (aarch64CgFunc->CreateListOpnd(*aarch64CgFunc->GetFuncScopeAllocator())); +} + +Insn *AArch64RegInfo::BuildMovInstruction(Operand &opnd0, Operand &opnd1) +{ + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + MOperator mop = + a64CGFunc->PickMovInsn(static_cast(opnd0), static_cast(opnd1)); + Insn *newInsn = &a64CGFunc->GetInsnBuilder()->BuildInsn(mop, opnd0, opnd1); + return newInsn; +} + +Insn *AArch64RegInfo::BuildStrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) +{ + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + return &a64CGFunc->GetInsnBuilder()->BuildInsn(a64CGFunc->PickStInsn(regSize, stype), phyOpnd, memOpnd); +} + +Insn *AArch64RegInfo::BuildLdrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) +{ + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + return &a64CGFunc->GetInsnBuilder()->BuildInsn(a64CGFunc->PickLdInsn(regSize, stype), phyOpnd, memOpnd); +} + +Insn *AArch64RegInfo::BuildCommentInsn(const std::string &comment) +{ + return &(static_cast(GetCurrFunction())->CreateCommentInsn("split around loop begin")); +} + +MemOperand *AArch64RegInfo::GetOrCreatSpillMem(regno_t vrNum, uint32 bitSize) +{ + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + return a64CGFunc->GetOrCreatSpillMem(vrNum); +} +MemOperand *AArch64RegInfo::AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, bool isDest, + Insn &insn, regno_t regNum, bool &isOutOfRange) +{ + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + return a64CGFunc->AdjustMemOperandIfOffsetOutOfRange(memOpnd, static_cast(vrNum), isDest, insn, + static_cast(regNum), isOutOfRange); +} +void AArch64RegInfo::FreeSpillRegMem(regno_t vrNum) +{ + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + return a64CGFunc->FreeSpillRegMem(vrNum); +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_regsaves.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_regsaves.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8d685995f63669fc13e8422321aad990b61d80b9 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_regsaves.cpp @@ -0,0 +1,869 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_regsaves.h" +#include "aarch64_cg.h" +#include "aarch64_live.h" +#include "aarch64_cg.h" +#include "aarch64_proepilog.h" +#include "cg_dominance.h" +#include "cg_ssa_pre.h" +#include "cg_ssu_pre.h" + +namespace maplebe { + +#define RS_DUMP GetEnabledDebug() +#define RS_EXTRA (RS_DUMP && true) +#define mLog LogInfo::MapleLogger() +#define threshold 8 +#define ONE_REG_AT_A_TIME 0 + +using BBId = uint32; + +void AArch64RegSavesOpt::InitData() +{ + calleeBitsDef = cgFunc->GetMemoryPool()->NewArray(cgFunc->NumBBs()); + errno_t retDef = memset_s(calleeBitsDef, cgFunc->NumBBs() * sizeof(CalleeBitsType), 0, + cgFunc->NumBBs() * sizeof(CalleeBitsType)); + calleeBitsUse = cgFunc->GetMemoryPool()->NewArray(cgFunc->NumBBs()); + errno_t retUse = memset_s(calleeBitsUse, cgFunc->NumBBs() * sizeof(CalleeBitsType), 0, + cgFunc->NumBBs() * sizeof(CalleeBitsType)); + CHECK_FATAL(retDef == EOK && retUse == EOK, "memset_s of calleesBits failed"); + + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + const MapleVector &sp = aarchCGFunc->GetCalleeSavedRegs(); + if (!sp.empty()) { + if (std::find(sp.begin(), sp.end(), RFP) != sp.end()) { + aarchCGFunc->GetProEpilogSavedRegs().push_back(RFP); + } + if (std::find(sp.begin(), sp.end(), RLR) != sp.end()) { + aarchCGFunc->GetProEpilogSavedRegs().push_back(RLR); + } + } + + for (auto bb : bfs->sortedBBs) { + SetId2bb(bb); + } +} + +void AArch64RegSavesOpt::CollectLiveInfo(const BB &bb, const Operand &opnd, bool isDef, bool isUse) +{ + if (!opnd.IsRegister()) { + return; + } + const RegOperand ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + if (!AArch64Abi::IsCalleeSavedReg(static_cast(regNO)) || (regNO >= R29 && regNO <= R31)) { + return; /* check only callee-save registers */ + } + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyVary) { + return; + } + if (isDef) { + /* First def */ + if (!IsCalleeBitSet(GetCalleeBitsDef(), bb.GetId(), regNO)) { + SetCalleeBit(GetCalleeBitsDef(), bb.GetId(), regNO); + } + } + if (isUse) { + /* Last use */ + SetCalleeBit(GetCalleeBitsUse(), bb.GetId(), regNO); + } +} + +void AArch64RegSavesOpt::GenerateReturnBBDefUse(const BB &bb) +{ + PrimType returnType = cgFunc->GetFunction().GetReturnType()->GetPrimType(); + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + if (IsPrimitiveFloat(returnType)) { + Operand &phyOpnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(V0), k64BitSize, kRegTyFloat); + CollectLiveInfo(bb, phyOpnd, false, true); + } else if (IsPrimitiveInteger(returnType)) { + Operand &phyOpnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(R0), k64BitSize, kRegTyInt); + CollectLiveInfo(bb, phyOpnd, false, true); + } +} + +void AArch64RegSavesOpt::ProcessAsmListOpnd(const BB &bb, Operand &opnd, uint32 idx) +{ + bool isDef = false; + bool isUse = false; + switch (idx) { + case kAsmOutputListOpnd: + case kAsmClobberListOpnd: { + isDef = true; + break; + } + case kAsmInputListOpnd: { + isUse = true; + break; + } + default: + return; + } + ListOperand &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + CollectLiveInfo(bb, *op, isDef, isUse); + } +} + +void AArch64RegSavesOpt::ProcessListOpnd(const BB &bb, Operand &opnd) +{ + ListOperand &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + CollectLiveInfo(bb, *op, false, true); + } +} + +void AArch64RegSavesOpt::ProcessMemOpnd(const BB &bb, Operand &opnd) +{ + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + CollectLiveInfo(bb, *base, !memOpnd.IsIntactIndexed(), true); + } + if (offset != nullptr) { + CollectLiveInfo(bb, *offset, false, true); + } +} + +void AArch64RegSavesOpt::ProcessCondOpnd(const BB &bb) +{ + Operand &rflag = cgFunc->GetOrCreateRflag(); + CollectLiveInfo(bb, rflag, false, true); +} + +/* Record in each local BB the 1st def and the last use of a callee-saved + register */ +void AArch64RegSavesOpt::GetLocalDefUse() +{ + for (auto bbp : bfs->sortedBBs) { + BB &bb = *bbp; + if (bb.GetKind() == BB::kBBReturn) { + GenerateReturnBBDefUse(bb); + } + if (bb.IsEmpty()) { + continue; + } + + FOR_BB_INSNS(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + + bool isAsm = (insn->GetMachineOpcode() == MOP_asm); + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + auto *regProp = md->opndMD[i]; + bool isDef = regProp->IsRegDef(); + bool isUse = regProp->IsRegUse(); + if (opnd.IsList()) { + if (isAsm) { + ProcessAsmListOpnd(bb, opnd, i); + } else { + ProcessListOpnd(bb, opnd); + } + } else if (opnd.IsMemoryAccessOperand()) { + ProcessMemOpnd(bb, opnd); + } else if (opnd.IsConditionCode()) { + ProcessCondOpnd(bb); + } else { + CollectLiveInfo(bb, opnd, isDef, isUse); + } + } /* for all operands */ + } /* for all insns */ + } /* for all sortedBBs */ + + if (RS_DUMP) { + for (uint32 i = 0; i < cgFunc->NumBBs(); ++i) { + mLog << i << " : " << calleeBitsDef[i] << " " << calleeBitsUse[i] << "\n"; + ; + } + } +} + +void AArch64RegSavesOpt::PrintBBs() const +{ + mLog << "RegSaves LiveIn/Out of BFS nodes:\n"; + for (auto *bb : bfs->sortedBBs) { + mLog << "< === > "; + mLog << bb->GetId(); + mLog << " pred:["; + for (auto *predBB : bb->GetPreds()) { + mLog << " " << predBB->GetId(); + } + mLog << "] succs:["; + for (auto *succBB : bb->GetSuccs()) { + mLog << " " << succBB->GetId(); + } + mLog << "]\n LiveIn of [" << bb->GetId() << "]: "; + for (auto liveIn : bb->GetLiveInRegNO()) { + mLog << liveIn << " "; + } + mLog << "\n LiveOut of [" << bb->GetId() << "]: "; + for (auto liveOut : bb->GetLiveOutRegNO()) { + mLog << liveOut << " "; + } + mLog << "\n"; + } +} + +/* 1st def MUST not have preceding save in dominator list. Each dominator + block must not have livein or liveout of the register */ +int32 AArch64RegSavesOpt::CheckCriteria(BB *bb, regno_t reg) const +{ + /* Already a site to save */ + SavedRegInfo *sp = bbSavedRegs[bb->GetId()]; + if (sp != nullptr && sp->ContainSaveReg(reg)) { + return 1; + } + + /* This preceding block has livein OR liveout of reg */ + MapleSet &liveIn = bb->GetLiveInRegNO(); + MapleSet &liveOut = bb->GetLiveOutRegNO(); + if (liveIn.find(reg) != liveIn.end() || liveOut.find(reg) != liveOut.end()) { + return 2; + } + + return 0; +} + +/* Return true if reg is already to be saved in its dominator list */ +bool AArch64RegSavesOpt::AlreadySavedInDominatorList(const BB *bb, regno_t reg) const +{ + BB *aBB = GetDomInfo()->GetDom(bb->GetId()); + + if (RS_DUMP) { + mLog << "Checking dom list starting " << bb->GetId() << " for saved R" << (reg - 1) << ":\n "; + } + while (!aBB->GetPreds().empty()) { /* can't go beyond prolog */ + if (RS_DUMP) { + mLog << aBB->GetId() << " "; + } + if (int t = CheckCriteria(aBB, reg)) { + if (RS_DUMP) { + if (t == 1) { + mLog << " --R" << (reg - 1) << " saved here, skip!\n"; + } else { + mLog << " --R" << (reg - 1) << " has livein/out, skip!\n"; + } + } + return true; /* previously saved, inspect next reg */ + } + aBB = GetDomInfo()->GetDom(aBB->GetId()); + } + return false; /* not previously saved, to save at bb */ +} + +/* Determine callee-save regs save locations and record them in bbSavedRegs. + Save is needed for a 1st def callee-save register at its dominator block + outside any loop. */ +void AArch64RegSavesOpt::DetermineCalleeSaveLocationsDoms() +{ + if (RS_DUMP) { + mLog << "Determining regsave sites using dom list for " << cgFunc->GetName() << ":\n"; + } + for (auto *bb : bfs->sortedBBs) { + if (RS_DUMP) { + mLog << "BB: " << bb->GetId() << "\n"; + } + CalleeBitsType c = GetBBCalleeBits(GetCalleeBitsDef(), bb->GetId()); + if (c == 0) { + continue; + } + CalleeBitsType mask = 1; + for (uint32 i = 0; i < static_cast(sizeof(CalleeBitsType) << 3); ++i) { + if ((c & mask) != 0) { + MapleSet &liveIn = bb->GetLiveInRegNO(); + regno_t reg = ReverseRegBitMap(i); + if (oneAtaTime && oneAtaTimeReg != reg) { + mask <<= 1; + continue; + } + if (liveIn.find(reg) == liveIn.end()) { /* not livein */ + BB *bbDom = bb; /* start from current BB */ + bool done = false; + while (bbDom->GetLoop() != nullptr) { + bbDom = GetDomInfo()->GetDom(bbDom->GetId()); + if (CheckCriteria(bbDom, reg)) { + done = true; + break; + } + DEBUG_ASSERT(bbDom, "Can't find dominator for save location"); + } + if (done) { + mask <<= 1; + continue; + } + + /* Check if a dominator of bbDom was already a location to save */ + if (AlreadySavedInDominatorList(bbDom, reg)) { + mask <<= 1; + continue; /* no need to save again, next reg */ + } + + /* Check if the newly found block is a dominator of block(s) in the current + to be saved list. If so, remove these blocks from bbSavedRegs */ + uint32 creg = i; + SavedBBInfo *sp = regSavedBBs[creg]; + if (sp == nullptr) { + regSavedBBs[creg] = memPool->New(alloc); + } else { + for (BB *sbb : sp->GetBBList()) { + for (BB *abb = sbb; !abb->GetPreds().empty();) { + if (abb->GetId() == bbDom->GetId()) { + /* Found! Don't plan to save in abb */ + sp->RemoveBB(sbb); + bbSavedRegs[sbb->GetId()]->RemoveSaveReg(reg); + if (RS_DUMP) { + mLog << " --R" << (reg - 1) << " save removed from BB" << sbb->GetId() << "\n"; + } + break; + } + abb = GetDomInfo()->GetDom(abb->GetId()); + } + } + } + regSavedBBs[creg]->InsertBB(bbDom); + + uint32 bid = bbDom->GetId(); + if (RS_DUMP) { + mLog << " --R" << (reg - 1); + mLog << " to save in " << bid << "\n"; + } + SavedRegInfo *ctx = GetbbSavedRegsEntry(bid); + if (!ctx->ContainSaveReg(reg)) { + ctx->InsertSaveReg(reg); + } + } + } + mask <<= 1; + CalleeBitsType t = c; + t >>= 1; + if (t == 0) { + break; /* short cut */ + } + } + } +} + +void AArch64RegSavesOpt::DetermineCalleeSaveLocationsPre() +{ + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + MapleAllocator sprealloc(memPool); + if (RS_DUMP) { + mLog << "Determining regsave sites using ssa_pre for " << cgFunc->GetName() << ":\n"; + } + const MapleVector &callees = aarchCGFunc->GetCalleeSavedRegs(); + for (auto reg : callees) { + if (reg >= R29 && reg < V8) { + continue; /* save/restore in prologue, epilogue */ + } + if (oneAtaTime && oneAtaTimeReg != reg) { + continue; + } + + SsaPreWorkCand wkCand(&sprealloc); + for (uint32 bid = 1; bid < static_cast(bbSavedRegs.size()); ++bid) { + /* Set the BB occurrences of this callee-saved register */ + if (IsCalleeBitSet(GetCalleeBitsDef(), bid, reg) || IsCalleeBitSet(GetCalleeBitsUse(), bid, reg)) { + (void)wkCand.occBBs.insert(bid); + } + } + DoSavePlacementOpt(cgFunc, GetDomInfo(), &wkCand); + if (wkCand.saveAtEntryBBs.empty()) { + /* something gone wrong, skip this reg */ + wkCand.saveAtProlog = true; + } + if (wkCand.saveAtProlog) { + /* Save cannot be applied, skip this reg and place save/restore + in prolog/epilog */ + MapleVector &pe = aarchCGFunc->GetProEpilogSavedRegs(); + if (std::find(pe.begin(), pe.end(), reg) == pe.end()) { + pe.push_back(reg); + } + if (RS_DUMP) { + mLog << "Save R" << (reg - 1) << " n/a, do in Pro/Epilog\n"; + } + continue; + } + if (!wkCand.saveAtEntryBBs.empty()) { + for (uint32 entBB : wkCand.saveAtEntryBBs) { + if (RS_DUMP) { + std::string r = reg <= R28 ? "r" : "v"; + mLog << "BB " << entBB << " save: " << r << (reg - 1) << "\n"; + } + GetbbSavedRegsEntry(entBB)->InsertSaveReg(reg); + } + } + } +} + +/* Determine calleesave regs restore locations by calling ssu-pre, + previous bbSavedRegs memory is cleared and restore locs recorded in it */ +bool AArch64RegSavesOpt::DetermineCalleeRestoreLocations() +{ + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + MapleAllocator sprealloc(memPool); + if (RS_DUMP) { + mLog << "Determining Callee Restore Locations:\n"; + } + const MapleVector &callees = aarchCGFunc->GetCalleeSavedRegs(); + for (auto reg : callees) { + if (reg >= R29 && reg < V8) { + continue; /* save/restore in prologue, epilogue */ + } + if (oneAtaTime && oneAtaTimeReg != reg) { + MapleVector &pe = aarchCGFunc->GetProEpilogSavedRegs(); + if (std::find(pe.begin(), pe.end(), reg) == pe.end()) { + pe.push_back(reg); + } + continue; + } + + SPreWorkCand wkCand(&sprealloc); + for (uint32 bid = 1; bid < static_cast(bbSavedRegs.size()); ++bid) { + /* Set the saved BB locations of this callee-saved register */ + SavedRegInfo *sp = bbSavedRegs[bid]; + if (sp != nullptr) { + if (sp->ContainSaveReg(reg)) { + (void)wkCand.saveBBs.insert(bid); + } + } + /* Set the BB occurrences of this callee-saved register */ + if (IsCalleeBitSet(GetCalleeBitsDef(), bid, reg) || IsCalleeBitSet(GetCalleeBitsUse(), bid, reg)) { + (void)wkCand.occBBs.insert(bid); + } + } + DoRestorePlacementOpt(cgFunc, GetPostDomInfo(), &wkCand); + if (wkCand.saveBBs.empty()) { + /* something gone wrong, skip this reg */ + wkCand.restoreAtEpilog = true; + } + /* splitted empty block for critical edge present, skip function */ + MapleSet rset = wkCand.restoreAtEntryBBs; + for (auto bbid : wkCand.restoreAtExitBBs) { + (void)rset.insert(bbid); + } + for (auto bbid : rset) { + BB *bb = GetId2bb(bbid); + if (bb->GetKind() == BB::kBBGoto && bb->NumInsn() == 1) { + aarchCGFunc->GetProEpilogSavedRegs().clear(); + const MapleVector &calleesNew = aarchCGFunc->GetCalleeSavedRegs(); + for (auto areg : calleesNew) { + aarchCGFunc->GetProEpilogSavedRegs().push_back(areg); + } + return false; + } + } + if (wkCand.restoreAtEpilog) { + /* Restore cannot b3 applied, skip this reg and place save/restore + in prolog/epilog */ + for (size_t bid = 1; bid < bbSavedRegs.size(); ++bid) { + SavedRegInfo *sp = bbSavedRegs[bid]; + if (sp != nullptr && !sp->GetSaveSet().empty()) { + if (sp->ContainSaveReg(reg)) { + sp->RemoveSaveReg(reg); + } + } + } + MapleVector &pe = aarchCGFunc->GetProEpilogSavedRegs(); + if (std::find(pe.begin(), pe.end(), reg) == pe.end()) { + pe.push_back(reg); + } + if (RS_DUMP) { + mLog << "Restore R" << (reg - 1) << " n/a, do in Pro/Epilog\n"; + } + continue; + } + if (!wkCand.restoreAtEntryBBs.empty() || !wkCand.restoreAtExitBBs.empty()) { + for (uint32 entBB : wkCand.restoreAtEntryBBs) { + if (RS_DUMP) { + std::string r = reg <= R28 ? "r" : "v"; + mLog << "BB " << entBB << " restore: " << r << (reg - 1) << "\n"; + } + GetbbSavedRegsEntry(entBB)->InsertEntryReg(reg); + } + for (uint32 exitBB : wkCand.restoreAtExitBBs) { + BB *bb = GetId2bb(exitBB); + if (bb->GetKind() == BB::kBBIgoto) { + CHECK_FATAL(false, "igoto detected"); + } + Insn *lastInsn = bb->GetLastInsn(); + if (lastInsn != nullptr && lastInsn->IsBranch() && + (!lastInsn->GetOperand(0).IsRegister() || /* not a reg OR */ + (!AArch64Abi::IsCalleeSavedReg( /* reg but not cs */ + static_cast( + static_cast(lastInsn->GetOperand(0)) + .GetRegisterNumber()))))) { + /* To insert in this block - 1 instr */ + SavedRegInfo *sp = GetbbSavedRegsEntry(exitBB); + sp->InsertExitReg(reg); + sp->insertAtLastMinusOne = true; + } else if (bb->GetSuccs().size() > 1) { + for (BB *sbb : bb->GetSuccs()) { + if (sbb->GetPreds().size() > 1) { + CHECK_FATAL(false, "critical edge detected"); + } + /* To insert at all succs */ + GetbbSavedRegsEntry(sbb->GetId())->InsertEntryReg(reg); + } + } else { + /* otherwise, BB_FT etc */ + GetbbSavedRegsEntry(exitBB)->InsertExitReg(reg); + } + if (RS_DUMP) { + std::string r = reg <= R28 ? "R" : "V"; + mLog << "BB " << exitBB << " restore: " << r << (reg - 1) << "\n"; + } + } + } + } + return true; +} + +int32 AArch64RegSavesOpt::FindNextOffsetForCalleeSave() const +{ + int32 offset = static_cast( + static_cast(cgFunc->GetMemlayout())->RealStackFrameSize() - + (static_cast(cgFunc)->SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen) /* FP/LR */) - + cgFunc->GetMemlayout()->SizeOfArgsToStackPass() - cgFunc->GetFunction().GetFrameReseverdSlot()); + + if (cgFunc->GetFunction().GetAttr(FUNCATTR_varargs)) { + /* GR/VR save areas are above the callee save area */ + AArch64MemLayout *ml = static_cast(cgFunc->GetMemlayout()); + int saveareasize = static_cast(RoundUp(ml->GetSizeOfGRSaveArea(), GetPointerSize() * k2BitSize) + + RoundUp(ml->GetSizeOfVRSaveArea(), GetPointerSize() * k2BitSize)); + offset -= saveareasize; + } + return offset; +} + +void AArch64RegSavesOpt::InsertCalleeSaveCode() +{ + uint32 bid = 0; + BB *saveBB = cgFunc->GetCurBB(); + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + + if (RS_DUMP) { + mLog << "Inserting Save: \n"; + } + int32 offset = FindNextOffsetForCalleeSave(); + offset += + static_cast((aarchCGFunc->GetProEpilogSavedRegs().size() - 2) << 3); // 2 for R29,RLR 3 for 8 bytes + for (BB *bb : bfs->sortedBBs) { + bid = bb->GetId(); + aarchCGFunc->SetSplitBaseOffset(0); + if (bbSavedRegs[bid] != nullptr && !bbSavedRegs[bid]->GetSaveSet().empty()) { + aarchCGFunc->GetDummyBB()->ClearInsns(); + cgFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); + AArch64reg intRegFirstHalf = kRinvalid; + AArch64reg fpRegFirstHalf = kRinvalid; + for (auto areg : bbSavedRegs[bid]->GetSaveSet()) { + AArch64reg reg = static_cast(areg); + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64reg &firstHalf = AArch64isa::IsGPRegister(reg) ? intRegFirstHalf : fpRegFirstHalf; + std::string r = reg <= R28 ? "R" : "V"; + /* If reg not seen before, record offset and then update */ + if (regOffset.find(areg) == regOffset.end()) { + regOffset[areg] = static_cast(offset); + offset += static_cast(kIntregBytelen); + } + if (firstHalf == kRinvalid) { + /* 1st half in reg pair */ + firstHalf = reg; + if (RS_DUMP && reg > 0) { + mLog << r << (reg - 1) << " save in BB" << bid << " Offset = " << regOffset[reg] << "\n"; + } + } else { + if (regOffset[reg] == (regOffset[firstHalf] + k8ByteSize)) { + /* firstHalf & reg consecutive, make regpair */ + AArch64GenProEpilog::AppendInstructionPushPair(*cgFunc, firstHalf, reg, regType, + static_cast(regOffset[firstHalf])); + } else if (regOffset[firstHalf] == (regOffset[reg] + k8ByteSize)) { + /* reg & firstHalf consecutive, make regpair */ + AArch64GenProEpilog::AppendInstructionPushPair(*cgFunc, reg, firstHalf, regType, + static_cast(regOffset[reg])); + } else { + /* regs cannot be paired */ + AArch64GenProEpilog::AppendInstructionPushSingle(*cgFunc, firstHalf, regType, + static_cast(regOffset[firstHalf])); + AArch64GenProEpilog::AppendInstructionPushSingle(*cgFunc, reg, regType, + static_cast(regOffset[reg])); + } + firstHalf = kRinvalid; + if (RS_DUMP) { + mLog << r << (reg - 1) << " save in BB" << bid << " Offset = " << regOffset[reg] << "\n"; + } + } + } + + if (intRegFirstHalf != kRinvalid) { + AArch64GenProEpilog::AppendInstructionPushSingle(*cgFunc, intRegFirstHalf, kRegTyInt, + static_cast(regOffset[intRegFirstHalf])); + } + + if (fpRegFirstHalf != kRinvalid) { + AArch64GenProEpilog::AppendInstructionPushSingle(*cgFunc, fpRegFirstHalf, kRegTyFloat, + static_cast(regOffset[fpRegFirstHalf])); + } + bb->InsertAtBeginning(*aarchCGFunc->GetDummyBB()); + } + } + cgFunc->SetCurBB(*saveBB); +} + +/* DFS to verify the save/restore are in pair(s) within a path */ +void AArch64RegSavesOpt::Verify(regno_t reg, BB *bb, std::set *visited, BBId *s, BBId *r) +{ + (void)visited->insert(bb); + BBId bid = bb->GetId(); + if (RS_EXTRA) { + mLog << bid << ","; /* path trace can be long */ + } + + if (bbSavedRegs[bid]) { + bool entryRestoreMet = false; + if (bbSavedRegs[bid]->ContainEntryReg(reg)) { + if (RS_EXTRA) { + mLog << "[^" << bid << "],"; // entry restore found + } + if (*s == 0) { + mLog << "Alert: nR@" << bid << " found w/o save\n"; + return; + } + /* complete s/xR found, continue */ + mLog << "(" << *s << "," << bid << ") "; + *r = bid; + entryRestoreMet = true; + } + if (bbSavedRegs[bid]->ContainSaveReg(reg)) { + if (RS_EXTRA) { + mLog << "[" << bid << "],"; // save found + } + if (*s != 0 && !entryRestoreMet) { + /* another save found before last save restored */ + mLog << "Alert: save@" << bid << " found after save@" << *s << "\n"; + return; + } + if (entryRestoreMet) { + *r = 0; + } + *s = bid; + } + if (bbSavedRegs[bid]->ContainExitReg(reg)) { + if (RS_EXTRA) { + mLog << "[" << bid << "$],"; // exit restore found + } + if (*s == 0) { + mLog << "Alert: xR@" << bid << " found w/o save\n"; + return; + } + /* complete s/xR found, continue */ + mLog << "(" << *s << "," << bid << ") "; + *r = bid; + } + } + + if (bb->GetSuccs().size() == 0) { + if (*s != 0 && *r == 0) { + mLog << "Alert: save@" << *s << " w/o restore reaches end"; + } + mLog << " " << bid << " ended>\n"; + *r = 0; + } + for (BB *sBB : bb->GetSuccs()) { + if (visited->count(sBB) == 0) { + Verify(reg, sBB, visited, s, r); + } + } + if (*s == bid) { + /* clear only when returned from previous calls to the orig save site */ + /* clear savebid since all of its succs already visited */ + *s = 0; + } + if (*r == bid) { + /* clear restorebid if all of its preds already visited */ + bool clear = true; + for (BB *pBB : bb->GetPreds()) { + if (visited->count(pBB) == 0) { + clear = false; + break; + } + } + if (clear) { + *r = 0; + } + } +} + +void AArch64RegSavesOpt::InsertCalleeRestoreCode() +{ + uint32 bid = 0; + BB *saveBB = cgFunc->GetCurBB(); + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + + if (RS_DUMP) { + mLog << "Inserting Restore: \n"; + } + int32 offset = FindNextOffsetForCalleeSave(); + for (BB *bb : bfs->sortedBBs) { + bid = bb->GetId(); + aarchCGFunc->SetSplitBaseOffset(0); + SavedRegInfo *sp = bbSavedRegs[bid]; + if (sp != nullptr) { + if (sp->GetEntrySet().empty() && sp->GetExitSet().empty()) { + continue; + } + + aarchCGFunc->GetDummyBB()->ClearInsns(); + cgFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); + for (auto areg : sp->GetEntrySet()) { + AArch64reg reg = static_cast(areg); + offset = static_cast(regOffset[areg]); + if (RS_DUMP) { + std::string r = reg <= R28 ? "R" : "V"; + mLog << r << (reg - 1) << " entry restore in BB " << bid << " Saved Offset = " << offset << "\n"; + if (RS_EXTRA) { + mLog << " for save @BB [ "; + for (size_t b = 1; b < bbSavedRegs.size(); ++b) { + if (bbSavedRegs[b] != nullptr && bbSavedRegs[b]->ContainSaveReg(reg)) { + mLog << b << " "; + } + } + mLog << "]\n"; + } + } + + /* restore is always the same from saved offset */ + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64GenProEpilog::AppendInstructionPopSingle(*cgFunc, reg, regType, offset); + } + FOR_BB_INSNS(insn, aarchCGFunc->GetDummyBB()) { + insn->SetDoNotRemove(true); /* do not let ebo remove these restores */ + } + bb->InsertAtBeginning(*aarchCGFunc->GetDummyBB()); + + aarchCGFunc->GetDummyBB()->ClearInsns(); + cgFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); + for (auto areg : sp->GetExitSet()) { + AArch64reg reg = static_cast(areg); + offset = static_cast(regOffset[areg]); + if (RS_DUMP) { + std::string r = reg <= R28 ? "R" : "V"; + mLog << r << (reg - 1) << " exit restore in BB " << bid << " Offset = " << offset << "\n"; + mLog << " for save @BB [ "; + for (size_t b = 1; b < bbSavedRegs.size(); ++b) { + if (bbSavedRegs[b] != nullptr && bbSavedRegs[b]->ContainSaveReg(reg)) { + mLog << b << " "; + } + } + mLog << "]\n"; + } + + /* restore is always single from saved offset */ + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64GenProEpilog::AppendInstructionPopSingle(*cgFunc, reg, regType, offset); + } + FOR_BB_INSNS(insn, aarchCGFunc->GetDummyBB()) { + insn->SetDoNotRemove(true); + } + if (sp->insertAtLastMinusOne) { + bb->InsertAtEndMinus1(*aarchCGFunc->GetDummyBB()); + } else { + bb->InsertAtEnd(*aarchCGFunc->GetDummyBB()); + } + } + } + cgFunc->SetCurBB(*saveBB); +} + +/* Callee-save registers save/restore placement optimization */ +void AArch64RegSavesOpt::Run() +{ + if (Globals::GetInstance()->GetOptimLevel() <= CGOptions::kLevel1) { + return; + } + +#if ONE_REG_AT_A_TIME + /* only do reg placement on the following register, others in pro/epilog */ + oneAtaTime = true; + oneAtaTimeReg = R25; +#endif + + Bfs localBfs(*cgFunc, *memPool); + bfs = &localBfs; + bfs->ComputeBlockOrder(); + if (RS_DUMP) { + mLog << "##Calleeregs Placement for: " << cgFunc->GetName() << "\n"; + PrintBBs(); + } + +#ifdef REDUCE_COMPLEXITY + CGOptions::EnableRegSavesOpt(); + for (auto bb : bfs->sortedBBs) { + if (bb->GetSuccs().size() > threshold) { + CGOptions::DisableRegSavesOpt(); + return; + } + } +#endif + + /* Determined 1st def and last use of all callee-saved registers used + for all BBs */ + InitData(); + GetLocalDefUse(); + + /* Determine save sites at dominators of 1st def with no live-in and + not within loop */ + if (CGOptions::UseSsaPreSave()) { + DetermineCalleeSaveLocationsPre(); + } else { + DetermineCalleeSaveLocationsDoms(); + } + + /* Determine restore sites */ + if (!DetermineCalleeRestoreLocations()) { + return; + } + +#ifdef VERIFY + /* Verify saves/restores are in pair */ + if (RS_DUMP) { + std::vector rlist = {R19, R20, R21, R22, R23, R24, R25, R26, R27, R28}; + for (auto reg : rlist) { + mLog << "Verify calleeregs_placement data for R" << (reg - 1) << ":\n"; + std::set visited; + uint32 saveBid = 0; + uint32 restoreBid = 0; + Verify(reg, cgFunc->GetFirstBB(), &visited, &saveBid, &restoreBid); + mLog << "\nVerify Done\n"; + } + } +#endif + + /* Generate callee save instrs at found sites */ + InsertCalleeSaveCode(); + + /* Generate callee restores at found sites */ + InsertCalleeRestoreCode(); +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_schedule.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_schedule.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4744ccb29edd7961766d3afc0f2a60065de75003 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_schedule.cpp @@ -0,0 +1,1575 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_schedule.h" +#include +#include "aarch64_cg.h" +#include "aarch64_operand.h" +#include "aarch64_dependence.h" +#include "pressure.h" + +/* + * This phase is Instruction Scheduling. + * There is a local list scheduling, it is scheduling in basic block. + * The entry is AArch64Schedule::ListScheduling, will traversal all basic block, + * for a basic block: + * 1. build a dependence graph; + * 2. combine clinit pairs and str&ldr pairs; + * 3. reorder instructions. + */ +namespace maplebe { +namespace { +constexpr uint32 kClinitAdvanceCycle = 10; +constexpr uint32 kAdrpLdrAdvanceCycle = 2; +constexpr uint32 kClinitTailAdvanceCycle = 4; +constexpr uint32 kSecondToLastNode = 2; +} // namespace + +uint32 AArch64Schedule::maxUnitIndex = 0; +/* reserve two register for special purpose */ +int AArch64Schedule::intRegPressureThreshold = static_cast(R27 - R0); +int AArch64Schedule::fpRegPressureThreshold = static_cast(V30 - V0); +int AArch64Schedule::intCalleeSaveThresholdBase = static_cast(R29 - R19); +int AArch64Schedule::intCalleeSaveThresholdEnhance = static_cast(R30 - R19); +int AArch64Schedule::fpCalleeSaveThreshold = static_cast(R16 - R8); +/* Init schedule's data struction. */ +void AArch64Schedule::Init() +{ + readyList.clear(); + nodeSize = nodes.size(); + lastSeparatorIndex = 0; + mad->ReleaseAllUnits(); + DepNode *node = nodes[0]; + + DEBUG_ASSERT(node->GetType() == kNodeTypeSeparator, + "CG internal error, the first node should be a separator node."); + + if (CGOptions::IsDruteForceSched() || CGOptions::IsSimulateSched()) { + for (auto nodeTemp : nodes) { + nodeTemp->SetVisit(0); + nodeTemp->SetState(kNormal); + nodeTemp->SetSchedCycle(0); + nodeTemp->SetEStart(0); + nodeTemp->SetLStart(0); + } + } + + readyList.emplace_back(node); + node->SetState(kReady); + + /* Init validPredsSize and validSuccsSize. */ + for (auto nodeTemp : nodes) { + nodeTemp->SetValidPredsSize(nodeTemp->GetPreds().size()); + nodeTemp->SetValidSuccsSize(nodeTemp->GetSuccs().size()); + } +} + +/* + * A insn which can be combine should meet this conditions: + * 1. it is str/ldr insn; + * 2. address mode is kAddrModeBOi, [baseReg, offset]; + * 3. the register operand size equal memory operand size; + * 4. if define USE_32BIT_REF, register operand size should be 4 byte; + * 5. for stp/ldp, the imm should be within -512 and 504(64bit), or -256 and 252(32bit); + * 6. pair instr for 8/4 byte registers must have multiple of 8/4 for imm. + * If insn can be combine, return true. + */ +bool AArch64Schedule::CanCombine(const Insn &insn) const +{ + MOperator opCode = insn.GetMachineOpcode(); + if ((opCode != MOP_xldr) && (opCode != MOP_wldr) && (opCode != MOP_dldr) && (opCode != MOP_sldr) && + (opCode != MOP_xstr) && (opCode != MOP_wstr) && (opCode != MOP_dstr) && (opCode != MOP_sstr)) { + return false; + } + + DEBUG_ASSERT(insn.GetOperand(1).IsMemoryAccessOperand(), "expects mem operands"); + auto &memOpnd = static_cast(insn.GetOperand(1)); + MemOperand::AArch64AddressingMode addrMode = memOpnd.GetAddrMode(); + if ((addrMode != MemOperand::kAddrModeBOi) || !memOpnd.IsIntactIndexed()) { + return false; + } + + auto ®Opnd = static_cast(insn.GetOperand(0)); + if (regOpnd.GetSize() != memOpnd.GetSize()) { + return false; + } + + uint32 size = regOpnd.GetSize() >> kLog2BitsPerByte; +#ifdef USE_32BIT_REF + if (insn.IsAccessRefField() && (size > (kIntregBytelen >> 1))) { + return false; + } +#endif /* USE_32BIT_REF */ + + OfstOperand *offset = memOpnd.GetOffsetImmediate(); + if (offset == nullptr) { + return false; + } + int32 offsetValue = static_cast(offset->GetOffsetValue()); + if (size == kIntregBytelen) { /* 64 bit */ + if ((offsetValue <= kStpLdpImm64LowerBound) || (offsetValue >= kStpLdpImm64UpperBound)) { + return false; + } + } else if (size == (kIntregBytelen >> 1)) { /* 32 bit */ + if ((offsetValue <= kStpLdpImm32LowerBound) || (offsetValue >= kStpLdpImm32UpperBound)) { + return false; + } + } + + /* pair instr for 8/4 byte registers must have multiple of 8/4 for imm */ + if ((static_cast(offsetValue) % size) != 0) { + return false; + } + return true; +} + +/* After building dependence graph, combine str&ldr pairs. */ +void AArch64Schedule::MemoryAccessPairOpt() +{ + Init(); + std::vector memList; + + while ((!readyList.empty()) || !memList.empty()) { + DepNode *readNode = nullptr; + if (!readyList.empty()) { + readNode = readyList[0]; + readyList.erase(readyList.begin()); + } else { + if (memList[0]->GetType() != kNodeTypeEmpty) { + FindAndCombineMemoryAccessPair(memList); + } + readNode = memList[0]; + memList.erase(memList.begin()); + } + + /* schedule readNode */ + CHECK_FATAL(readNode != nullptr, "readNode is null in MemoryAccessPairOpt"); + readNode->SetState(kScheduled); + + /* add readNode's succs to readyList or memList. */ + for (auto succLink : readNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + succNode.DescreaseValidPredsSize(); + if (succNode.GetValidPredsSize() == 0) { + DEBUG_ASSERT(succNode.GetState() == kNormal, "schedule state should be kNormal"); + succNode.SetState(kReady); + DEBUG_ASSERT(succNode.GetInsn() != nullptr, "insn can't be nullptr!"); + if (CanCombine(*succNode.GetInsn())) { + memList.emplace_back(&succNode); + } else { + readyList.emplace_back(&succNode); + } + } + } + } + + for (auto node : nodes) { + node->SetVisit(0); + node->SetState(kNormal); + } +} + +/* Find and combine correct MemoryAccessPair for memList[0]. */ +void AArch64Schedule::FindAndCombineMemoryAccessPair(const std::vector &memList) +{ + DEBUG_ASSERT(!memList.empty(), "memList should not be empty"); + CHECK_FATAL(memList[0]->GetInsn() != nullptr, "memList[0]'s insn should not be nullptr"); + MemOperand *currMemOpnd = static_cast(memList[0]->GetInsn()->GetMemOpnd()); + DEBUG_ASSERT(currMemOpnd != nullptr, "opnd should not be nullptr"); + DEBUG_ASSERT(currMemOpnd->IsMemoryAccessOperand(), "opnd should be memOpnd"); + int32 currOffsetVal = static_cast(currMemOpnd->GetOffsetImmediate()->GetOffsetValue()); + MOperator currMop = memList[0]->GetInsn()->GetMachineOpcode(); + /* find a depNode to combine with memList[0], and break; */ + for (auto it = std::next(memList.begin(), 1); it != memList.end(); ++it) { + DEBUG_ASSERT((*it)->GetInsn() != nullptr, "null ptr check"); + + if (currMop == (*it)->GetInsn()->GetMachineOpcode()) { + MemOperand *nextMemOpnd = static_cast((*it)->GetInsn()->GetMemOpnd()); + CHECK_FATAL(nextMemOpnd != nullptr, "opnd should not be nullptr"); + CHECK_FATAL(nextMemOpnd->IsMemoryAccessOperand(), "opnd should be MemOperand"); + int32 nextOffsetVal = static_cast(nextMemOpnd->GetOffsetImmediate()->GetOffsetValue()); + uint32 size = currMemOpnd->GetSize() >> kLog2BitsPerByte; + if ((nextMemOpnd->GetBaseRegister() == currMemOpnd->GetBaseRegister()) && + (nextMemOpnd->GetSize() == currMemOpnd->GetSize()) && + (static_cast(abs(nextOffsetVal - currOffsetVal)) == size)) { + /* + * In ARM Architecture Reference Manual ARMv8, for ARMv8-A architecture profile + * LDP on page K1-6125 declare that ldp can't use same reg + */ + if (((currMop == MOP_xldr) || (currMop == MOP_sldr) || (currMop == MOP_dldr) || + (currMop == MOP_wldr)) && + &(memList[0]->GetInsn()->GetOperand(0)) == &((*it)->GetInsn()->GetOperand(0))) { + continue; + } + if (static_cast((*it)->GetInsn()->GetOperand(0)).GetRegisterType() != + static_cast(memList[0]->GetInsn()->GetOperand(0)).GetRegisterType()) { + continue; + } + + if (LIST_SCHED_DUMP_REF) { + LogInfo::MapleLogger() << "Combine insn: " + << "\n"; + memList[0]->GetInsn()->Dump(); + (*it)->GetInsn()->Dump(); + } + depAnalysis->CombineMemoryAccessPair(*memList[0], **it, nextOffsetVal > currOffsetVal); + if (LIST_SCHED_DUMP_REF) { + LogInfo::MapleLogger() << "To: " + << "\n"; + memList[0]->GetInsn()->Dump(); + } + break; + } + } + } +} + +/* combine clinit pairs. */ +void AArch64Schedule::ClinitPairOpt() +{ + for (auto it = nodes.begin(); it != nodes.end(); ++it) { + auto nextIt = std::next(it, 1); + if (nextIt == nodes.end()) { + return; + } + + if ((*it)->GetInsn()->GetMachineOpcode() == MOP_adrp_ldr) { + if ((*nextIt)->GetInsn()->GetMachineOpcode() == MOP_clinit_tail) { + depAnalysis->CombineClinit(**it, **(nextIt), false); + } else if ((*nextIt)->GetType() == kNodeTypeSeparator) { + nextIt = std::next(nextIt, 1); + if (nextIt == nodes.end()) { + return; + } + if ((*nextIt)->GetInsn()->GetMachineOpcode() == MOP_clinit_tail) { + /* Do something. */ + depAnalysis->CombineClinit(**it, **(nextIt), true); + } + } + } + } +} + +/* Return the next node's index who is kNodeTypeSeparator. */ +uint32 AArch64Schedule::GetNextSepIndex() const +{ + return ((lastSeparatorIndex + kMaxDependenceNum) < nodeSize) ? (lastSeparatorIndex + kMaxDependenceNum) + : (nodes.size() - 1); +} + +/* Do register pressure schduling. */ +void AArch64Schedule::RegPressureScheduling(BB &bb, MapleVector &nodes) +{ + RegPressureSchedule *regSchedule = memPool.New(cgFunc, alloc); + /* + * Get physical register amount currently + * undef, Int Reg, Float Reg, Flag Reg + */ + const std::vector kRegNumVec = {0, V0, (kMaxRegNum - V0) + 1, 1}; + regSchedule->InitBBInfo(bb, memPool, nodes); + regSchedule->BuildPhyRegInfo(kRegNumVec); + regSchedule->DoScheduling(nodes); +} + +/* + * Compute earliest start of the node, + * return value : the maximum estart. + */ +uint32 AArch64Schedule::ComputeEstart(uint32 cycle) +{ + std::vector readyNodes; + uint32 maxIndex = GetNextSepIndex(); + + if (CGOptions::IsDebugSched()) { + /* Check validPredsSize. */ + for (uint32 i = lastSeparatorIndex; i <= maxIndex; ++i) { + DepNode *node = nodes[i]; + [[maybe_unused]] int32 schedNum = 0; + for (const auto *predLink : node->GetPreds()) { + if (predLink->GetFrom().GetState() == kScheduled) { + ++schedNum; + } + } + DEBUG_ASSERT((node->GetPreds().size() - schedNum) == node->GetValidPredsSize(), "validPredsSize error."); + } + } + + DEBUG_ASSERT(nodes[maxIndex]->GetType() == kNodeTypeSeparator, + "CG internal error, nodes[maxIndex] should be a separator node."); + + (void)readyNodes.insert(readyNodes.begin(), readyList.begin(), readyList.end()); + + uint32 maxEstart = cycle; + for (uint32 i = lastSeparatorIndex; i <= maxIndex; ++i) { + DepNode *node = nodes[i]; + node->SetVisit(0); + } + + for (auto *node : readyNodes) { + DEBUG_ASSERT(node->GetState() == kReady, "CG internal error, all nodes in ready list should be ready."); + if (node->GetEStart() < cycle) { + node->SetEStart(cycle); + } + } + + while (!readyNodes.empty()) { + DepNode *node = readyNodes.front(); + readyNodes.erase(readyNodes.begin()); + + for (const auto *succLink : node->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + if (succNode.GetType() == kNodeTypeSeparator) { + continue; + } + + if (succNode.GetEStart() < (node->GetEStart() + succLink->GetLatency())) { + succNode.SetEStart(node->GetEStart() + succLink->GetLatency()); + } + maxEstart = (maxEstart < succNode.GetEStart() ? succNode.GetEStart() : maxEstart); + succNode.IncreaseVisit(); + if ((succNode.GetVisit() >= succNode.GetValidPredsSize()) && (succNode.GetType() != kNodeTypeSeparator)) { + readyNodes.emplace_back(&succNode); + } + DEBUG_ASSERT(succNode.GetVisit() <= succNode.GetValidPredsSize(), "CG internal error."); + } + } + + return maxEstart; +} + +/* Compute latest start of the node. */ +void AArch64Schedule::ComputeLstart(uint32 maxEstart) +{ + /* std::vector is better than std::queue in run time */ + std::vector readyNodes; + uint32 maxIndex = GetNextSepIndex(); + + DEBUG_ASSERT(nodes[maxIndex]->GetType() == kNodeTypeSeparator, + "CG internal error, nodes[maxIndex] should be a separator node."); + + for (uint32 i = lastSeparatorIndex; i <= maxIndex; ++i) { + DepNode *node = nodes[i]; + node->SetLStart(maxEstart); + node->SetVisit(0); + } + + readyNodes.emplace_back(nodes[maxIndex]); + while (!readyNodes.empty()) { + DepNode *node = readyNodes.front(); + readyNodes.erase(readyNodes.begin()); + for (const auto *predLink : node->GetPreds()) { + DepNode &predNode = predLink->GetFrom(); + if (predNode.GetState() == kScheduled) { + continue; + } + + if (predNode.GetLStart() > (node->GetLStart() - predLink->GetLatency())) { + predNode.SetLStart(node->GetLStart() - predLink->GetLatency()); + } + predNode.IncreaseVisit(); + if ((predNode.GetVisit() >= predNode.GetValidSuccsSize()) && (predNode.GetType() != kNodeTypeSeparator)) { + readyNodes.emplace_back(&predNode); + } + + DEBUG_ASSERT(predNode.GetVisit() <= predNode.GetValidSuccsSize(), "CG internal error."); + } + } +} + +/* Compute earliest start and latest start of the node that is in readyList and not be scheduled. */ +void AArch64Schedule::UpdateELStartsOnCycle(uint32 cycle) +{ + ComputeLstart(ComputeEstart(cycle)); +} + +/* + * If all unit of this node need when it be scheduling is free, this node can be scheduled, + * Return true. + */ +bool DepNode::CanBeScheduled() const +{ + for (uint32 i = 0; i < unitNum; ++i) { + Unit *unit = units[i]; + if (unit != nullptr) { + if (!unit->IsFree(i)) { + return false; + } + } + } + return true; +} + +/* Mark those unit that this node need occupy unit when it is being scheduled. */ +void DepNode::OccupyUnits() +{ + for (uint32 i = 0; i < unitNum; ++i) { + Unit *unit = units[i]; + if (unit != nullptr) { + unit->Occupy(*insn, i); + } + } +} + +/* Get unit kind of this node's units[0]. */ +uint32 DepNode::GetUnitKind() const +{ + uint32 retValue = 0; + if ((units == nullptr) || (units[0] == nullptr)) { + return retValue; + } + + switch (units[0]->GetUnitId()) { + case kUnitIdSlotD: + retValue |= kUnitKindSlot0; + break; + case kUnitIdAgen: + case kUnitIdSlotSAgen: + retValue |= kUnitKindAgen; + break; + case kUnitIdSlotDAgen: + retValue |= kUnitKindAgen; + retValue |= kUnitKindSlot0; + break; + case kUnitIdHazard: + case kUnitIdSlotSHazard: + retValue |= kUnitKindHazard; + break; + case kUnitIdCrypto: + retValue |= kUnitKindCrypto; + break; + case kUnitIdMul: + case kUnitIdSlotSMul: + retValue |= kUnitKindMul; + break; + case kUnitIdDiv: + retValue |= kUnitKindDiv; + break; + case kUnitIdBranch: + case kUnitIdSlotSBranch: + retValue |= kUnitKindBranch; + break; + case kUnitIdStAgu: + retValue |= kUnitKindStAgu; + break; + case kUnitIdLdAgu: + retValue |= kUnitKindLdAgu; + break; + case kUnitIdFpAluS: + case kUnitIdFpAluD: + retValue |= kUnitKindFpAlu; + break; + case kUnitIdFpMulS: + case kUnitIdFpMulD: + retValue |= kUnitKindFpMul; + break; + case kUnitIdFpDivS: + case kUnitIdFpDivD: + retValue |= kUnitKindFpDiv; + break; + case kUnitIdSlot0LdAgu: + retValue |= kUnitKindSlot0; + retValue |= kUnitKindLdAgu; + break; + case kUnitIdSlot0StAgu: + retValue |= kUnitKindSlot0; + retValue |= kUnitKindStAgu; + break; + default: + break; + } + + return retValue; +} + +/* Count unit kinds to an array. Each element of the array indicates the unit kind number of a node set. */ +void AArch64Schedule::CountUnitKind(const DepNode &depNode, uint32 array[], const uint32 arraySize) const +{ + (void)arraySize; + DEBUG_ASSERT(arraySize >= kUnitKindLast, "CG internal error. unit kind number is not correct."); + uint32 unitKind = depNode.GetUnitKind(); + int32 index = static_cast(__builtin_ffs(unitKind)); + while (index) { + DEBUG_ASSERT(index < kUnitKindLast, "CG internal error. index error."); + ++array[index]; + unitKind &= ~(1u << (index - 1u)); + index = __builtin_ffs(unitKind); + } +} + +/* Check if a node use a specific unit kind. */ +bool AArch64Schedule::IfUseUnitKind(const DepNode &depNode, uint32 index) +{ + uint32 unitKind = depNode.GetUnitKind(); + int32 idx = static_cast(__builtin_ffs(unitKind)); + while (idx) { + DEBUG_ASSERT(index < kUnitKindLast, "CG internal error. index error."); + if (idx == static_cast(index)) { + return true; + } + unitKind &= ~(1u << (idx - 1u)); + idx = __builtin_ffs(unitKind); + } + + return false; +} + +/* A sample schedule according dependence graph only, to verify correctness of dependence graph. */ +void AArch64Schedule::RandomTest() +{ + Init(); + nodes.clear(); + + while (!readyList.empty()) { + DepNode *currNode = readyList.back(); + currNode->SetState(kScheduled); + readyList.pop_back(); + nodes.emplace_back(currNode); + + for (auto succLink : currNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + bool ready = true; + for (auto predLink : succNode.GetPreds()) { + DepNode &predNode = predLink->GetFrom(); + if (predNode.GetState() != kScheduled) { + ready = false; + break; + } + } + + if (ready) { + DEBUG_ASSERT(succNode.GetState() == kNormal, "succNode must be kNormal"); + readyList.emplace_back(&succNode); + succNode.SetState(kReady); + } + } + } +} + +/* Remove target from readyList. */ +void AArch64Schedule::EraseNodeFromReadyList(const DepNode &target) +{ + EraseNodeFromNodeList(target, readyList); +} + +/* Remove target from nodeList. */ +void AArch64Schedule::EraseNodeFromNodeList(const DepNode &target, MapleVector &nodeList) +{ + for (auto it = nodeList.begin(); it != nodeList.end(); ++it) { + if ((*it) == &target) { + nodeList.erase(it); + return; + } + } + + DEBUG_ASSERT(false, "CG internal error, erase node fail."); +} + +/* Dump all node of availableReadyList schedule information in current cycle. */ +void AArch64Schedule::DumpDebugInfo(const ScheduleProcessInfo &scheduleInfo) +{ + LogInfo::MapleLogger() << "Current cycle[ " << scheduleInfo.GetCurrCycle() << " ], Available in readyList is : \n"; + for (auto node : scheduleInfo.GetAvailableReadyList()) { + LogInfo::MapleLogger() << "NodeIndex[ " << node->GetIndex() << " ], Estart[ " << node->GetEStart() + << " ], Lstart[ "; + LogInfo::MapleLogger() << node->GetLStart() << " ], slot[ "; + LogInfo::MapleLogger() << (node->GetReservation() == nullptr ? "SlotNone" + : node->GetReservation()->GetSlotName()) + << " ], "; + LogInfo::MapleLogger() << "succNodeNum[ " << node->GetSuccs().size() << " ], "; + node->GetInsn()->Dump(); + LogInfo::MapleLogger() << '\n'; + } +} + +/* + * Select a node from availableReadyList according to some heuristic rules, then: + * 1. change targetNode's schedule information; + * 2. try to add successors of targetNode to readyList; + * 3. update unscheduled node set, when targetNode is last kNodeTypeSeparator; + * 4. update AdvanceCycle. + */ +void AArch64Schedule::SelectNode(AArch64ScheduleProcessInfo &scheduleInfo) +{ + auto &availableReadyList = scheduleInfo.GetAvailableReadyList(); + auto it = availableReadyList.begin(); + DepNode *targetNode = *it; + if (availableReadyList.size() > 1) { + CalculateMaxUnitKindCount(scheduleInfo); + if (GetConsiderRegPressure()) { + UpdateReleaseRegInfo(scheduleInfo); + } + ++it; + for (; it != availableReadyList.end(); ++it) { + if (CompareDepNode(**it, *targetNode, scheduleInfo)) { + targetNode = *it; + } + } + } + /* The priority of free-reg node is higher than pipeline */ + while (!targetNode->CanBeScheduled()) { + scheduleInfo.IncCurrCycle(); + mad->AdvanceCycle(); + } + if (GetConsiderRegPressure() && !scheduleInfo.IsFirstSeparator()) { + UpdateLiveRegSet(scheduleInfo, *targetNode); + } + /* push target node into scheduled nodes and turn it into kScheduled state */ + scheduleInfo.PushElemIntoScheduledNodes(targetNode); + + EraseNodeFromReadyList(*targetNode); + + if (CGOptions::IsDebugSched()) { + LogInfo::MapleLogger() << "TargetNode : "; + targetNode->GetInsn()->Dump(); + LogInfo::MapleLogger() << "\n"; + } + + /* Update readyList. */ + UpdateReadyList(*targetNode, readyList, true); + + if (targetNode->GetType() == kNodeTypeSeparator) { + /* If target node is separator node, update lastSeparatorIndex and calculate those depNodes's estart and lstart + * between current separator node and new Separator node. + */ + if (!scheduleInfo.IsFirstSeparator()) { + lastSeparatorIndex += kMaxDependenceNum; + UpdateELStartsOnCycle(scheduleInfo.GetCurrCycle()); + } else { + scheduleInfo.ResetIsFirstSeparator(); + } + } + + UpdateAdvanceCycle(scheduleInfo, *targetNode); +} + +void AArch64Schedule::UpdateAdvanceCycle(AArch64ScheduleProcessInfo &scheduleInfo, const DepNode &targetNode) +{ + switch (targetNode.GetInsn()->GetLatencyType()) { + case kLtClinit: + scheduleInfo.SetAdvanceCycle(kClinitAdvanceCycle); + break; + case kLtAdrpLdr: + scheduleInfo.SetAdvanceCycle(kAdrpLdrAdvanceCycle); + break; + case kLtClinitTail: + scheduleInfo.SetAdvanceCycle(kClinitTailAdvanceCycle); + break; + default: + break; + } + + if ((scheduleInfo.GetAdvanceCycle() == 0) && mad->IsFullIssued()) { + if (targetNode.GetEStart() > scheduleInfo.GetCurrCycle()) { + scheduleInfo.SetAdvanceCycle(1 + targetNode.GetEStart() - scheduleInfo.GetCurrCycle()); + } else { + scheduleInfo.SetAdvanceCycle(1); + } + } +} + +/* + * Advance mad's cycle until info's advanceCycle equal zero, + * and then clear info's availableReadyList. + */ +void AArch64Schedule::UpdateScheduleProcessInfo(AArch64ScheduleProcessInfo &info) +{ + while (info.GetAdvanceCycle() > 0) { + info.IncCurrCycle(); + mad->AdvanceCycle(); + info.DecAdvanceCycle(); + } + info.ClearAvailableReadyList(); +} + +/* + * Forward traversal readyList, if a node in readyList can be Schedule, add it to availableReadyList. + * Return true, if availableReadyList is not empty. + */ +bool AArch64Schedule::CheckSchedulable(AArch64ScheduleProcessInfo &info) const +{ + for (auto node : readyList) { + if (GetConsiderRegPressure()) { + info.PushElemIntoAvailableReadyList(node); + } else { + if (node->CanBeScheduled() && node->GetEStart() <= info.GetCurrCycle()) { + info.PushElemIntoAvailableReadyList(node); + } + } + } + return info.AvailableReadyListIsEmpty() ? false : true; +} + +/* + * Calculate estimated machine cycle count for an input node series + */ +int AArch64Schedule::CalSeriesCycles(const MapleVector &nodes) +{ + int currentCycle = 0; + /* after an instruction is issued, the minimum cycle count for the next instruction is 1 */ + int instructionBaseCycleCount = 1; + std::map scheduledCycleMap; + for (auto node : nodes) { + int latencyCycle = 0; + /* calculate the latest begin time of this node based on its predecessor's issue time and latency */ + for (auto pred : node->GetPreds()) { + DepNode &from = pred->GetFrom(); + int latency = static_cast(pred->GetLatency()); + int fromCycle = scheduledCycleMap[&from]; + if (fromCycle + latency > latencyCycle) { + latencyCycle = fromCycle + latency; + } + } + /* the issue time of this node is the max value between the next cycle and latest begin time */ + if (currentCycle + instructionBaseCycleCount >= latencyCycle) { + currentCycle = currentCycle + instructionBaseCycleCount; + } else { + currentCycle = latencyCycle; + } + /* record this node's issue cycle */ + scheduledCycleMap[node] = currentCycle; + } + return currentCycle; +} + +/* After building dependence graph, schedule insns. */ +uint32 AArch64Schedule::DoSchedule() +{ + AArch64ScheduleProcessInfo scheduleInfo(nodeSize); + Init(); + UpdateELStartsOnCycle(scheduleInfo.GetCurrCycle()); + InitLiveRegSet(scheduleInfo); + while (!readyList.empty()) { + UpdateScheduleProcessInfo(scheduleInfo); + /* Check if schedulable */ + if (!CheckSchedulable(scheduleInfo)) { + /* Advance cycle. */ + scheduleInfo.SetAdvanceCycle(1); + continue; + } + + if (scheduleInfo.GetLastUpdateCycle() < scheduleInfo.GetCurrCycle()) { + scheduleInfo.SetLastUpdateCycle(scheduleInfo.GetCurrCycle()); + } + + if (CGOptions::IsDebugSched()) { + DumpDebugInfo(scheduleInfo); + } + + /* Select a node to scheduling */ + SelectNode(scheduleInfo); + } + + DEBUG_ASSERT(scheduleInfo.SizeOfScheduledNodes() == nodes.size(), "CG internal error, Not all nodes scheduled."); + + nodes.clear(); + (void)nodes.insert(nodes.begin(), scheduleInfo.GetScheduledNodes().begin(), scheduleInfo.GetScheduledNodes().end()); + /* the second to last node is the true last node, because the last is kNodeTypeSeparator node */ + DEBUG_ASSERT(nodes.size() - 2 >= 0, "size of nodes should be greater than or equal 2"); + return (nodes[nodes.size() - 2]->GetSchedCycle()); +} + +struct RegisterInfoUnit { + RegisterInfoUnit() : intRegNum(0), fpRegNum(0), ccRegNum(0) {} + uint32 intRegNum = 0; + uint32 fpRegNum = 0; + uint32 ccRegNum = 0; +}; + +RegisterInfoUnit GetDepNodeDefType(const DepNode &depNode, CGFunc &f) +{ + RegisterInfoUnit rIU; + for (auto defRegNO : depNode.GetDefRegnos()) { + RegType defRegTy = AArch64ScheduleProcessInfo::GetRegisterType(f, defRegNO); + if (defRegTy == kRegTyInt) { + rIU.intRegNum++; + } else if (defRegTy == kRegTyFloat) { + rIU.fpRegNum++; + } else if (defRegTy == kRegTyCc) { + rIU.ccRegNum++; + DEBUG_ASSERT(rIU.ccRegNum <= 1, "spill cc reg?"); + } else { + CHECK_FATAL(false, "NIY aarch64 register type"); + } + } + /* call node will not increase reg def pressure */ + if (depNode.GetInsn() != nullptr && depNode.GetInsn()->IsCall()) { + rIU.intRegNum = 0; + rIU.fpRegNum = 0; + } + return rIU; +} + +AArch64Schedule::CSRResult AArch64Schedule::DoCSR(DepNode &node1, DepNode &node2, + AArch64ScheduleProcessInfo &scheduleInfo) const +{ + RegisterInfoUnit defRIU1 = GetDepNodeDefType(node1, cgFunc); + RegisterInfoUnit defRIU2 = GetDepNodeDefType(node2, cgFunc); + /* do not increase callee save pressure before call */ + if (static_cast(scheduleInfo.SizeOfCalleeSaveLiveRegister(true)) >= intCalleeSaveThreshold) { + if (defRIU1.intRegNum > 0 && defRIU2.intRegNum > 0) { + CSRResult csrInfo = ScheduleCrossCall(node1, node2); + if ((csrInfo == kNode1 && defRIU1.intRegNum >= scheduleInfo.GetFreeIntRegs(node1)) || + (csrInfo == kNode2 && defRIU2.intRegNum >= scheduleInfo.GetFreeIntRegs(node2))) { + return csrInfo; + } + } + } + if (static_cast(scheduleInfo.SizeOfCalleeSaveLiveRegister(false)) >= fpCalleeSaveThreshold) { + if (defRIU1.fpRegNum > 0 && defRIU2.fpRegNum > 0) { + CSRResult csrInfo = ScheduleCrossCall(node1, node2); + if ((csrInfo == kNode1 && defRIU1.fpRegNum >= scheduleInfo.GetFreeFpRegs(node1)) || + (csrInfo == kNode2 && defRIU2.fpRegNum >= scheduleInfo.GetFreeFpRegs(node2))) { + return csrInfo; + } + } + } + auto FindFreeRegNode = [&](bool isInt) -> CSRResult { + auto freeRegNodes = isInt ? scheduleInfo.GetFreeIntRegNodeSet() : scheduleInfo.GetFreeFpRegNodeSet(); + if (freeRegNodes.find(&node1) != freeRegNodes.end() && freeRegNodes.find(&node2) == freeRegNodes.end()) { + return kNode1; + } + if (freeRegNodes.find(&node1) == freeRegNodes.end() && freeRegNodes.find(&node2) != freeRegNodes.end()) { + return kNode2; + } + return kDoCSP; + }; + if (static_cast(scheduleInfo.SizeOfIntLiveRegSet()) >= intRegPressureThreshold) { + if (FindFreeRegNode(true) != kDoCSP) { + return FindFreeRegNode(true); + } + } + if (static_cast(scheduleInfo.SizeOfFpLiveRegSet()) >= fpRegPressureThreshold) { + if (FindFreeRegNode(false) != kDoCSP) { + return FindFreeRegNode(false); + } + } + + bool canDoCSPFurther = false; + if (static_cast(scheduleInfo.SizeOfIntLiveRegSet()) >= intRegPressureThreshold) { + if (defRIU1.intRegNum != defRIU2.intRegNum) { + return defRIU1.intRegNum < defRIU2.intRegNum ? kNode1 : kNode2; + } else { + canDoCSPFurther = defRIU1.intRegNum == 0; + } + } + if (static_cast(scheduleInfo.SizeOfFpLiveRegSet()) >= fpRegPressureThreshold) { + if (defRIU1.fpRegNum != defRIU2.fpRegNum) { + return defRIU1.fpRegNum < defRIU2.fpRegNum ? kNode1 : kNode2; + } else { + canDoCSPFurther = (defRIU1.fpRegNum == 0 && canDoCSPFurther); + } + } + /* if both nodes are going to increase reg pressure, do not do CSP further */ + return canDoCSPFurther ? kDoCSP : (node1.GetInsn()->GetId() < node2.GetInsn()->GetId() ? kNode1 : kNode2); +} + +AArch64Schedule::CSRResult AArch64Schedule::ScheduleCrossCall(const DepNode &node1, const DepNode &node2) const +{ + uint32 node1ID = node1.GetInsn()->GetId(); + uint32 node2ID = node2.GetInsn()->GetId(); + bool order = node1ID < node2ID; /* true -- node1 before node2 false -- node1 after node2 */ + Insn *beginInsn = order ? node1.GetInsn() : node2.GetInsn(); + uint32 finialId = order ? node2ID : node1ID; + for (Insn *checkInsn = beginInsn; (checkInsn != nullptr && checkInsn->GetId() <= finialId); + checkInsn = checkInsn->GetNextMachineInsn()) { + if (checkInsn->IsCall()) { + return order ? kNode1 : kNode2; + } + } + return kDoCSP; +}; + +/* + * Comparing priorities of node1 and node2 according to some heuristic rules + * return true if node1's priority is higher + * crp -- consider reg pressure + */ +bool AArch64Schedule::CompareDepNode(DepNode &node1, DepNode &node2, AArch64ScheduleProcessInfo &scheduleInfo) const +{ + /* + * strategy CSR -- code schedule for register pressure + * if pressure is above the threshold, select the node which can reduce register pressure + */ + if (GetConsiderRegPressure()) { + switch (DoCSR(node1, node2, scheduleInfo)) { + case kNode1: + return true; + case kNode2: + return false; + default: + break; + } + } + /* strategy CSP -- code schedule for CPU pipeline */ + /* less LStart first */ + if (node1.GetLStart() != node2.GetLStart()) { + return node1.GetLStart() < node2.GetLStart(); + } + + /* max unit kind use */ + bool use1 = IfUseUnitKind(node1, maxUnitIndex); + bool use2 = IfUseUnitKind(node2, maxUnitIndex); + if (use1 != use2) { + return use1; + } + + /* slot0 first */ + SlotType slotType1 = node1.GetReservation()->GetSlot(); + SlotType slotType2 = node2.GetReservation()->GetSlot(); + if (slotType1 == kSlots) { + slotType1 = kSlot0; + } + if (slotType2 == kSlots) { + slotType2 = kSlot0; + } + if (slotType1 != slotType2) { + return slotType1 < slotType2; + } + + /* more succNodes fisrt */ + if (node1.GetSuccs().size() != node2.GetSuccs().size()) { + return node1.GetSuccs().size() > node2.GetSuccs().size(); + } + + /* default order */ + return node1.GetInsn()->GetId() < node2.GetInsn()->GetId(); +} + +/* + * Calculate number of every unit that used by avaliableReadyList's nodes and save the max in maxUnitIndex + */ +void AArch64Schedule::CalculateMaxUnitKindCount(ScheduleProcessInfo &scheduleInfo) +{ + uint32 unitKindCount[kUnitKindLast] = {0}; + for (auto node : scheduleInfo.GetAvailableReadyList()) { + CountUnitKind(*node, unitKindCount, kUnitKindLast); + } + + uint32 maxCount = 0; + maxUnitIndex = 0; + for (size_t i = 1; i < kUnitKindLast; ++i) { + if (maxCount < unitKindCount[i]) { + maxCount = unitKindCount[i]; + maxUnitIndex = i; + } + } +} + +/* + * Update the release reg node set + * When node in this set is scheduled, register pressure can be reduced + */ +void AArch64Schedule::UpdateReleaseRegInfo(AArch64ScheduleProcessInfo &scheduleInfo) +{ + auto &availableReadyList = scheduleInfo.GetAvailableReadyList(); + scheduleInfo.ClearALLFreeRegNodeSet(); + /* Traverse availableReadyList and add those can reduce register pressure to release reg node set */ + for (auto node : availableReadyList) { + std::set freeRegNO = CanFreeRegister(*node); + if (!freeRegNO.empty()) { + scheduleInfo.VaryFreeRegSet(cgFunc, freeRegNO, *node); + } + } +} + +/* + * return registers which an instruction can release after being scheduled + */ +std::set AArch64Schedule::CanFreeRegister(const DepNode &node) const +{ + std::set freeRegSet; + for (auto reg : node.GetUseRegnos()) { + if (RegPressureSchedule::IsLastUse(node, reg)) { + freeRegSet.emplace(reg); + } + } + return freeRegSet; +} + +/* + * After an instruction is scheduled, update live reg set + */ +void AArch64Schedule::UpdateLiveRegSet(AArch64ScheduleProcessInfo &scheduleInfo, const DepNode &node) +{ + /* dealing with def reg, add def reg into the live reg set */ + size_t i = 1; + for (auto &defReg : node.GetDefRegnos()) { + if (scheduleInfo.FindIntLiveReg(defReg) == 0 && scheduleInfo.FindFpLiveReg(defReg) == 0) { + scheduleInfo.VaryLiveRegSet(cgFunc, defReg, true); + } + /* delete dead def reg from live reg set because its live range is only 1 cycle */ + if (node.GetRegDefs(i) == nullptr && liveOutRegNo.find(defReg) == liveOutRegNo.end()) { + scheduleInfo.VaryLiveRegSet(cgFunc, defReg, false); + } + ++i; + } + /* dealing with use reg, delete use reg from live reg set if this instruction is last use of it */ + for (auto &useReg : node.GetUseRegnos()) { + if (RegPressureSchedule::IsLastUse(node, useReg)) { + if ((scheduleInfo.FindIntLiveReg(useReg) != 0 || scheduleInfo.FindFpLiveReg(useReg) != 0) && + liveOutRegNo.find(useReg) == liveOutRegNo.end()) { + scheduleInfo.VaryLiveRegSet(cgFunc, useReg, false); + } + } + } +} + +/* + * Initialize the live reg set based on the live in reg information + */ +void AArch64Schedule::InitLiveRegSet(AArch64ScheduleProcessInfo &scheduleInfo) +{ + if (GetConsiderRegPressure()) { + for (auto reg : liveInRegNo) { + scheduleInfo.VaryLiveRegSet(cgFunc, reg, true); + } + } +} + +/* + * A simulated schedule: + * scheduling instruction in original order to calculate original execute cycles. + */ +uint32 AArch64Schedule::SimulateOnly() +{ + uint32 currCycle = 0; + uint32 advanceCycle = 0; + Init(); + + for (uint32 i = 0; i < nodes.size();) { + while (advanceCycle > 0) { + ++currCycle; + mad->AdvanceCycle(); + --advanceCycle; + } + + DepNode *targetNode = nodes[i]; + if ((currCycle >= targetNode->GetEStart()) && targetNode->CanBeScheduled()) { + targetNode->SetSimulateCycle(currCycle); + targetNode->OccupyUnits(); + + /* Update estart. */ + for (auto succLink : targetNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + uint32 eStart = currCycle + succLink->GetLatency(); + if (succNode.GetEStart() < eStart) { + succNode.SetEStart(eStart); + } + } + + if (CGOptions::IsDebugSched()) { + LogInfo::MapleLogger() << "[Simulate] TargetNode : "; + targetNode->GetInsn()->Dump(); + LogInfo::MapleLogger() << "\n"; + } + + switch (targetNode->GetInsn()->GetLatencyType()) { + case kLtClinit: + advanceCycle = kClinitAdvanceCycle; + break; + case kLtAdrpLdr: + advanceCycle = kAdrpLdrAdvanceCycle; + break; + case kLtClinitTail: + advanceCycle = kClinitTailAdvanceCycle; + break; + default: + break; + } + + ++i; + } else { + advanceCycle = 1; + } + } + /* the second to last node is the true last node, because the last is kNodeTypeSeparator nod */ + DEBUG_ASSERT(nodes.size() - kSecondToLastNode >= 0, "size of nodes should be greater than or equal 2"); + return (nodes[nodes.size() - kSecondToLastNode]->GetSimulateCycle()); +} + +/* Restore dependence graph to normal CGIR. */ +void AArch64Schedule::FinalizeScheduling(BB &bb, const DepAnalysis &depAnalysis) +{ + bb.ClearInsns(); + + const Insn *prevLocInsn = (bb.GetPrev() != nullptr ? bb.GetPrev()->GetLastLoc() : nullptr); + for (auto node : nodes) { + /* Append comments first. */ + for (auto comment : node->GetComments()) { + if (comment->GetPrev() != nullptr && comment->GetPrev()->IsDbgInsn()) { + bb.AppendInsn(*comment->GetPrev()); + } + bb.AppendInsn(*comment); + } + /* Append insn. */ + if (!node->GetClinitInsns().empty()) { + for (auto clinit : node->GetClinitInsns()) { + bb.AppendInsn(*clinit); + } + } else if (node->GetType() == kNodeTypeNormal) { + if (node->GetInsn()->GetPrev() != nullptr && node->GetInsn()->GetPrev()->IsDbgInsn()) { + bb.AppendInsn(*node->GetInsn()->GetPrev()); + } + bb.AppendInsn(*node->GetInsn()); + } + + /* Append cfi instructions. */ + for (auto cfi : node->GetCfiInsns()) { + bb.AppendInsn(*cfi); + } + } + bb.SetLastLoc(prevLocInsn); + + for (auto lastComment : depAnalysis.GetLastComments()) { + bb.AppendInsn(*lastComment); + } +} + +/* For every node of nodes, update it's bruteForceSchedCycle. */ +void AArch64Schedule::UpdateBruteForceSchedCycle() +{ + for (auto node : nodes) { + node->SetBruteForceSchedCycle(node->GetSchedCycle()); + } +} + +/* Recursively schedule all of the possible node. */ +void AArch64Schedule::IterateBruteForce(DepNode &targetNode, MapleVector &readyList, uint32 currCycle, + MapleVector &scheduledNodes, uint32 &maxCycleCount, + MapleVector &optimizedScheduledNodes) +{ + /* Save states. */ + constexpr int32 unitSize = 31; + DEBUG_ASSERT(unitSize == mad->GetAllUnitsSize(), "CG internal error."); + std::vector occupyTable; + occupyTable.resize(unitSize, 0); + mad->SaveStates(occupyTable, unitSize); + + /* Schedule targetNode first. */ + targetNode.SetState(kScheduled); + targetNode.SetSchedCycle(currCycle); + scheduledNodes.emplace_back(&targetNode); + + MapleVector tempList = readyList; + EraseNodeFromNodeList(targetNode, tempList); + targetNode.OccupyUnits(); + + /* Update readyList. */ + UpdateReadyList(targetNode, tempList, true); + + if (targetNode.GetType() == kNodeTypeSeparator) { + /* If target node is separator node, update lastSeparatorIndex. */ + lastSeparatorIndex += kMaxDependenceNum; + } + + if (tempList.empty()) { + DEBUG_ASSERT(scheduledNodes.size() == nodes.size(), "CG internal error, Not all nodes scheduled."); + if (currCycle < maxCycleCount) { + maxCycleCount = currCycle; + UpdateBruteForceSchedCycle(); + optimizedScheduledNodes = scheduledNodes; + } + } else { + uint32 advanceCycle = 0; + switch (targetNode.GetInsn()->GetLatencyType()) { + case kLtClinit: + advanceCycle = kClinitAdvanceCycle; + break; + case kLtAdrpLdr: + advanceCycle = kAdrpLdrAdvanceCycle; + break; + case kLtClinitTail: + advanceCycle = kClinitTailAdvanceCycle; + break; + default: + break; + } + + do { + std::vector availableReadyList; + std::vector tempAvailableList; + while (advanceCycle > 0) { + ++currCycle; + mad->AdvanceCycle(); + --advanceCycle; + } + /* Check EStart. */ + for (auto node : tempList) { + if (node->GetEStart() <= currCycle) { + tempAvailableList.emplace_back(node); + } + } + + if (tempAvailableList.empty()) { + /* Advance cycle. */ + advanceCycle = 1; + continue; + } + + /* Check if schedulable */ + for (auto node : tempAvailableList) { + if (node->CanBeScheduled()) { + availableReadyList.emplace_back(node); + } + } + + if (availableReadyList.empty()) { + /* Advance cycle. */ + advanceCycle = 1; + continue; + } + + for (auto node : availableReadyList) { + IterateBruteForce(*node, tempList, currCycle, scheduledNodes, maxCycleCount, optimizedScheduledNodes); + } + + break; + } while (true); + } + + /* + * Recover states. + * Restore targetNode first. + */ + targetNode.SetState(kReady); + targetNode.SetSchedCycle(0); + scheduledNodes.pop_back(); + mad->RestoreStates(occupyTable, unitSize); + + /* Update readyList. */ + for (auto succLink : targetNode.GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + succNode.IncreaseValidPredsSize(); + succNode.SetState(kNormal); + } + + if (targetNode.GetType() == kNodeTypeSeparator) { + /* If target node is separator node, update lastSeparatorIndex. */ + lastSeparatorIndex -= kMaxDependenceNum; + } +} + +/* + * Brute force schedule: + * Finding all possibile schedule list of current bb, and calculate every list's execute cycles, + * return the optimal schedule list and it's cycles. + */ +uint32 AArch64Schedule::DoBruteForceSchedule() +{ + MapleVector scheduledNodes(alloc.Adapter()); + MapleVector optimizedScheduledNodes(alloc.Adapter()); + + uint32 currCycle = 0; + uint32 maxCycleCount = 0xFFFFFFFF; + Init(); + + /* Schedule First separator. */ + DepNode *targetNode = readyList.front(); + targetNode->SetState(kScheduled); + targetNode->SetSchedCycle(currCycle); + scheduledNodes.emplace_back(targetNode); + readyList.clear(); + + /* Update readyList. */ + UpdateReadyList(*targetNode, readyList, false); + + DEBUG_ASSERT(targetNode->GetType() == kNodeTypeSeparator, "The first node should be separator node."); + DEBUG_ASSERT(!readyList.empty(), "readyList should not be empty."); + + for (auto targetNodeTemp : readyList) { + IterateBruteForce(*targetNodeTemp, readyList, currCycle, scheduledNodes, maxCycleCount, + optimizedScheduledNodes); + } + + nodes = optimizedScheduledNodes; + return maxCycleCount; +} + +/* + * Update ready list after the targetNode has been scheduled. + * For every targetNode's successor, if it's all predecessors have been scheduled, + * add it to ready list and update it's information (like state, estart). + */ +void AArch64Schedule::UpdateReadyList(DepNode &targetNode, MapleVector &readyList, bool updateEStart) +{ + for (auto succLink : targetNode.GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + succNode.DescreaseValidPredsSize(); + if (succNode.GetValidPredsSize() == 0) { + readyList.emplace_back(&succNode); + succNode.SetState(kReady); + + /* Set eStart. */ + if (updateEStart) { + uint32 maxEstart = 0; + for (auto predLink : succNode.GetPreds()) { + DepNode &predNode = predLink->GetFrom(); + uint32 eStart = predNode.GetSchedCycle() + predLink->GetLatency(); + maxEstart = (maxEstart < eStart ? eStart : maxEstart); + } + succNode.SetEStart(maxEstart); + } + } + } +} + +/* For every node of nodes, dump it's Depdence information. */ +void AArch64Schedule::DumpDepGraph(const MapleVector &nodes) const +{ + for (auto node : nodes) { + depAnalysis->DumpDepNode(*node); + LogInfo::MapleLogger() << "---------- preds ----------" + << "\n"; + for (auto pred : node->GetPreds()) { + depAnalysis->DumpDepLink(*pred, &(pred->GetFrom())); + } + LogInfo::MapleLogger() << "---------- succs ----------" + << "\n"; + for (auto succ : node->GetSuccs()) { + depAnalysis->DumpDepLink(*succ, &(succ->GetTo())); + } + LogInfo::MapleLogger() << "---------------------------" + << "\n"; + } +} + +/* For every node of nodes, dump it's schedule time according simulate type and instruction information. */ +void AArch64Schedule::DumpScheduleResult(const MapleVector &nodes, SimulateType type) const +{ + for (auto node : nodes) { + LogInfo::MapleLogger() << "cycle[ "; + switch (type) { + case kListSchedule: + LogInfo::MapleLogger() << node->GetSchedCycle(); + break; + case kBruteForce: + LogInfo::MapleLogger() << node->GetBruteForceSchedCycle(); + break; + case kSimulateOnly: + LogInfo::MapleLogger() << node->GetSimulateCycle(); + break; + } + LogInfo::MapleLogger() << " ] "; + node->GetInsn()->Dump(); + LogInfo::MapleLogger() << "\n"; + } +} + +/* Print bb's dependence dot graph information to a file. */ +void AArch64Schedule::GenerateDot(const BB &bb, const MapleVector &nodes) const +{ + std::streambuf *coutBuf = std::cout.rdbuf(); /* keep original cout buffer */ + std::ofstream dgFile; + std::streambuf *buf = dgFile.rdbuf(); + std::cout.rdbuf(buf); + + /* construct the file name */ + std::string fileName; + fileName.append(phaseName); + fileName.append("_"); + fileName.append(cgFunc.GetName()); + fileName.append("_BB"); + auto str = std::to_string(bb.GetId()); + fileName.append(str); + fileName.append("_dep_graph.dot"); + + dgFile.open(fileName.c_str(), std::ios::trunc); + if (!dgFile.is_open()) { + LogInfo::MapleLogger(kLlWarn) << "fileName:" << fileName << " open failure.\n"; + return; + } + dgFile << "digraph {\n"; + for (auto node : nodes) { + for (auto succ : node->GetSuccs()) { + dgFile << "insn" << node->GetInsn() << " -> " + << "insn" << succ->GetTo().GetInsn(); + dgFile << " ["; + if (succ->GetDepType() == kDependenceTypeTrue) { + dgFile << "color=red,"; + } + dgFile << "label= \"" << succ->GetLatency() << "\""; + dgFile << "];\n"; + } + } + + for (auto node : nodes) { + MOperator mOp = node->GetInsn()->GetMachineOpcode(); + const InsnDesc *md = &AArch64CG::kMd[mOp]; + dgFile << "insn" << node->GetInsn() << "["; + dgFile << "shape=box,label= \" " << node->GetInsn()->GetId() << ":\n"; + dgFile << "{ "; + dgFile << md->name << "\n"; + dgFile << "}\"];\n"; + } + dgFile << "}\n"; + dgFile.flush(); + dgFile.close(); + std::cout.rdbuf(coutBuf); +} + +RegType AArch64ScheduleProcessInfo::GetRegisterType(CGFunc &f, regno_t regNO) +{ + if (AArch64isa::IsPhysicalRegister(regNO)) { + if (AArch64isa::IsGPRegister(static_cast(regNO))) { + return kRegTyInt; + } else if (AArch64isa::IsFPSIMDRegister(static_cast(regNO))) { + return kRegTyFloat; + } else { + CHECK_FATAL(false, "unknown physical reg"); + } + } else { + RegOperand *curRegOpnd = f.GetVirtualRegisterOperand(regNO); + DEBUG_ASSERT(curRegOpnd != nullptr, "register which is not physical and virtual"); + return curRegOpnd->GetRegisterType(); + } +} + +void AArch64ScheduleProcessInfo::VaryLiveRegSet(CGFunc &f, regno_t regNO, bool isInc) +{ + RegType registerTy = GetRegisterType(f, regNO); + if (registerTy == kRegTyInt || registerTy == kRegTyVary) { + isInc ? IncIntLiveRegSet(regNO) : DecIntLiveRegSet(regNO); + } else if (registerTy == kRegTyFloat) { + isInc ? IncFpLiveRegSet(regNO) : DecFpLiveRegSet(regNO); + } + /* consider other type register */ +} + +void AArch64ScheduleProcessInfo::VaryFreeRegSet(CGFunc &f, std::set regNOs, DepNode &node) +{ + for (auto regNO : regNOs) { + RegType registerTy = GetRegisterType(f, regNO); + if (registerTy == kRegTyInt || registerTy == kRegTyVary /* memory base register must be int */) { + IncFreeIntRegNode(node); + } else if (registerTy == kRegTyFloat) { + IncFreeFpRegNode(node); + } else if (registerTy == kRegTyCc) { + /* do not count CC reg */ + return; + } else { + /* consider other type register */ + CHECK_FATAL(false, "do not support this type of register"); + } + } +} + +/* Do brute force scheduling and dump scheduling information */ +void AArch64Schedule::BruteForceScheduling(const BB &bb) +{ + LogInfo::MapleLogger() << "\n\n$$ Function: " << cgFunc.GetName(); + LogInfo::MapleLogger() << "\n BB id = " << bb.GetId() << "; nodes.size = " << nodes.size() << "\n"; + + constexpr uint32 maxBruteForceNum = 50; + if (nodes.size() < maxBruteForceNum) { + GenerateDot(bb, nodes); + uint32 maxBruteForceCycle = DoBruteForceSchedule(); + MapleVector bruteNodes = nodes; + uint32 maxSchedCycle = DoSchedule(); + if (maxBruteForceCycle < maxSchedCycle) { + LogInfo::MapleLogger() << "maxBruteForceCycle = " << maxBruteForceCycle << "; maxSchedCycle = "; + LogInfo::MapleLogger() << maxSchedCycle << "\n"; + LogInfo::MapleLogger() << "\n ## Dump dependence graph ## " + << "\n"; + DumpDepGraph(nodes); + LogInfo::MapleLogger() << "\n ** Dump bruteForce scheduling result." + << "\n"; + DumpScheduleResult(bruteNodes, kBruteForce); + LogInfo::MapleLogger() << "\n ^^ Dump list scheduling result." + << "\n"; + DumpScheduleResult(nodes, kListSchedule); + } + } else { + LogInfo::MapleLogger() << "Skip BruteForce scheduling." + << "\n"; + DoSchedule(); + } +} + +/* Do simulate scheduling and dump scheduling information */ +void AArch64Schedule::SimulateScheduling(const BB &bb) +{ + uint32 originCycle = SimulateOnly(); + MapleVector oldNodes = nodes; + uint32 schedCycle = DoSchedule(); + if (originCycle < schedCycle) { + LogInfo::MapleLogger() << "Worse cycle [ " << (schedCycle - originCycle) << " ]; "; + LogInfo::MapleLogger() << "originCycle = " << originCycle << "; schedCycle = "; + LogInfo::MapleLogger() << schedCycle << "; nodes.size = " << nodes.size(); + LogInfo::MapleLogger() << "; $$ Function: " << cgFunc.GetName(); + LogInfo::MapleLogger() << "; BB id = " << bb.GetId() << "\n"; + LogInfo::MapleLogger() << "\n ** Dump original result." + << "\n"; + DumpScheduleResult(oldNodes, kSimulateOnly); + LogInfo::MapleLogger() << "\n ^^ Dump list scheduling result." + << "\n"; + DumpScheduleResult(nodes, kListSchedule); + } else if (originCycle > schedCycle) { + LogInfo::MapleLogger() << "Advance cycle [ " << (originCycle - schedCycle) << " ]; "; + LogInfo::MapleLogger() << "originCycle = " << originCycle << "; schedCycle = "; + LogInfo::MapleLogger() << schedCycle << "; nodes.size = " << nodes.size(); + LogInfo::MapleLogger() << "; $$ Function: " << cgFunc.GetName(); + LogInfo::MapleLogger() << "; BB id = " << bb.GetId() << "\n"; + } else { + LogInfo::MapleLogger() << "Equal cycle [ 0 ]; originCycle = " << originCycle; + LogInfo::MapleLogger() << " ], ignore. nodes.size = " << nodes.size() << "\n"; + } +} + +/* + * A local list scheduling. + * Schedule insns in basic blocks. + */ +void AArch64Schedule::ListScheduling(bool beforeRA) +{ + InitIDAndLoc(); + + mad = Globals::GetInstance()->GetMAD(); + if (beforeRA) { + RegPressure::SetMaxRegClassNum(kRegisterLast); + } + depAnalysis = memPool.New(cgFunc, memPool, *mad, beforeRA); + + FOR_ALL_BB(bb, &cgFunc) { + depAnalysis->Run(*bb, nodes); + + if (LIST_SCHED_DUMP_REF) { + GenerateDot(*bb, nodes); + DumpDepGraph(nodes); + } + if (beforeRA) { + liveInRegNo = bb->GetLiveInRegNO(); + liveOutRegNo = bb->GetLiveOutRegNO(); + if (bb->GetKind() != BB::kBBReturn) { + SetConsiderRegPressure(); + DoSchedule(); + } else { + RegPressureScheduling(*bb, nodes); + } + } else { + ClinitPairOpt(); + MemoryAccessPairOpt(); + if (CGOptions::IsDruteForceSched()) { + BruteForceScheduling(*bb); + } else if (CGOptions::IsSimulateSched()) { + SimulateScheduling(*bb); + } else { + DoSchedule(); + } + } + + FinalizeScheduling(*bb, *depAnalysis); + } +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_ssa.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_ssa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e5174302bd8c95d99effd015937d9426a5c0ba19 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_ssa.cpp @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_ssa.h" +#include "aarch64_cg.h" + +namespace maplebe { +void AArch64CGSSAInfo::RenameInsn(Insn &insn) +{ + auto opndNum = static_cast(insn.GetOperandSize()); + const InsnDesc *md = insn.GetDesc(); + if (md->IsPhi()) { + return; + } + for (int i = opndNum - 1; i >= 0; --i) { + Operand &opnd = insn.GetOperand(static_cast(i)); + auto *opndProp = (md->opndMD[static_cast(i)]); + A64SSAOperandRenameVisitor renameVisitor(*this, insn, *opndProp, i); + opnd.Accept(renameVisitor); + } +} + +MemOperand *AArch64CGSSAInfo::CreateMemOperand(MemOperand &memOpnd, bool isOnSSA) +{ + return isOnSSA ? memOpnd.Clone(*memPool) : &static_cast(cgFunc)->GetOrCreateMemOpnd(memOpnd); +} + +RegOperand *AArch64CGSSAInfo::GetRenamedOperand(RegOperand &vRegOpnd, bool isDef, Insn &curInsn, uint32 idx) +{ + if (vRegOpnd.IsVirtualRegister()) { + DEBUG_ASSERT(!vRegOpnd.IsSSAForm(), "Unexpect ssa operand"); + if (isDef) { + VRegVersion *newVersion = CreateNewVersion(vRegOpnd, curInsn, idx); + CHECK_FATAL(newVersion != nullptr, "get ssa version failed"); + return newVersion->GetSSAvRegOpnd(); + } else { + VRegVersion *curVersion = GetVersion(vRegOpnd); + if (curVersion == nullptr) { + curVersion = RenamedOperandSpecialCase(vRegOpnd, curInsn, idx); + } + curVersion->AddUseInsn(*this, curInsn, idx); + return curVersion->GetSSAvRegOpnd(); + } + } + DEBUG_ASSERT(false, "Get Renamed operand failed"); + return nullptr; +} + +VRegVersion *AArch64CGSSAInfo::RenamedOperandSpecialCase(RegOperand &vRegOpnd, Insn &curInsn, uint32 idx) +{ + LogInfo::MapleLogger() << "WARNING: " << vRegOpnd.GetRegisterNumber() + << " has no def info in function : " << cgFunc->GetName() << " !\n"; + /* occupy operand for no def vreg */ + if (!IncreaseSSAOperand(vRegOpnd.GetRegisterNumber(), nullptr)) { + DEBUG_ASSERT(GetAllSSAOperands().find(vRegOpnd.GetRegisterNumber()) != GetAllSSAOperands().end(), + "should find"); + AddNoDefVReg(vRegOpnd.GetRegisterNumber()); + } + VRegVersion *version = CreateNewVersion(vRegOpnd, curInsn, idx); + version->SetDefInsn(nullptr, kDefByNo); + return version; +} + +RegOperand *AArch64CGSSAInfo::CreateSSAOperand(RegOperand &virtualOpnd) +{ + regno_t ssaRegNO = static_cast(GetAllSSAOperands().size()) + SSARegNObase; + while (GetAllSSAOperands().count(ssaRegNO)) { + ssaRegNO++; + SSARegNObase++; + } + RegOperand *newVreg = memPool->New(ssaRegNO, virtualOpnd.GetSize(), virtualOpnd.GetRegisterType()); + newVreg->SetValidBitsNum(virtualOpnd.GetValidBitsNum()); + newVreg->SetOpndSSAForm(); + return newVreg; +} + +void AArch64CGSSAInfo::ReplaceInsn(Insn &oriInsn, Insn &newInsn) +{ + A64OpndSSAUpdateVsitor ssaUpdator(*this); + auto UpdateInsnSSAInfo = [&ssaUpdator](Insn &curInsn, bool isDelete) { + const InsnDesc *md = curInsn.GetDesc(); + for (uint32 i = 0; i < curInsn.GetOperandSize(); ++i) { + Operand &opnd = curInsn.GetOperand(i); + auto *opndProp = md->opndMD[i]; + if (isDelete) { + ssaUpdator.MarkDecrease(); + } else { + ssaUpdator.MarkIncrease(); + } + ssaUpdator.SetInsnOpndInfo(curInsn, *opndProp, i); + opnd.Accept(ssaUpdator); + } + }; + UpdateInsnSSAInfo(oriInsn, true); + newInsn.SetId(oriInsn.GetId()); + UpdateInsnSSAInfo(newInsn, false); + CHECK_FATAL(!ssaUpdator.HasDeleteDef(), "delete def point in replace insn, please check"); +} + +/* do not break binding between input and output operands in asm */ +void AArch64CGSSAInfo::CheckAsmDUbinding(Insn &insn, const VRegVersion *toBeReplaced, VRegVersion *newVersion) +{ + if (insn.GetMachineOpcode() == MOP_asm) { + for (auto &opndIt : static_cast(insn.GetOperand(kAsmOutputListOpnd)).GetOperands()) { + if (opndIt->IsSSAForm()) { + VRegVersion *defVersion = FindSSAVersion(opndIt->GetRegisterNumber()); + if (defVersion && defVersion->GetOriginalRegNO() == toBeReplaced->GetOriginalRegNO()) { + insn.AddRegBinding(defVersion->GetOriginalRegNO(), + newVersion->GetSSAvRegOpnd()->GetRegisterNumber()); + } + } + } + } +} + +void AArch64CGSSAInfo::ReplaceAllUse(VRegVersion *toBeReplaced, VRegVersion *newVersion) +{ + MapleUnorderedMap &useList = toBeReplaced->GetAllUseInsns(); + for (auto it = useList.begin(); it != useList.end();) { + Insn *useInsn = it->second->GetInsn(); + CheckAsmDUbinding(*useInsn, toBeReplaced, newVersion); + for (auto &opndIt : it->second->GetOperands()) { + Operand &opnd = useInsn->GetOperand(opndIt.first); + A64ReplaceRegOpndVisitor replaceRegOpndVisitor( + *cgFunc, *useInsn, opndIt.first, *toBeReplaced->GetSSAvRegOpnd(), *newVersion->GetSSAvRegOpnd()); + opnd.Accept(replaceRegOpndVisitor); + newVersion->AddUseInsn(*this, *useInsn, opndIt.first); + it->second->ClearDU(opndIt.first); + } + it = useList.erase(it); + } +} + +void AArch64CGSSAInfo::CreateNewInsnSSAInfo(Insn &newInsn) +{ + uint32 opndNum = newInsn.GetOperandSize(); + MarkInsnsInSSA(newInsn); + for (uint32 i = 0; i < opndNum; i++) { + Operand &opnd = newInsn.GetOperand(i); + auto *opndProp = newInsn.GetDesc()->opndMD[i]; + if (opndProp->IsDef() && opndProp->IsUse()) { + CHECK_FATAL(false, "do not support both def and use"); + } + if (opndProp->IsDef()) { + CHECK_FATAL(opnd.IsRegister(), "defOpnd must be reg"); + auto &defRegOpnd = static_cast(opnd); + regno_t defRegNO = defRegOpnd.GetRegisterNumber(); + uint32 defVIdx = IncreaseVregCount(defRegNO); + RegOperand *defSSAOpnd = CreateSSAOperand(defRegOpnd); + newInsn.SetOperand(i, *defSSAOpnd); + auto *defVersion = memPool->New(ssaAlloc, *defSSAOpnd, defVIdx, defRegNO); + auto *defInfo = CreateDUInsnInfo(&newInsn, i); + defVersion->SetDefInsn(defInfo, kDefByInsn); + if (!IncreaseSSAOperand(defSSAOpnd->GetRegisterNumber(), defVersion)) { + CHECK_FATAL(false, "insert ssa operand failed"); + } + } else if (opndProp->IsUse()) { + A64OpndSSAUpdateVsitor ssaUpdator(*this); + ssaUpdator.MarkIncrease(); + ssaUpdator.SetInsnOpndInfo(newInsn, *opndProp, i); + opnd.Accept(ssaUpdator); + } + } +} + +void AArch64CGSSAInfo::DumpInsnInSSAForm(const Insn &insn) const +{ + MOperator mOp = insn.GetMachineOpcode(); + const InsnDesc *md = insn.GetDesc(); + DEBUG_ASSERT(md != nullptr, "md should not be nullptr"); + + LogInfo::MapleLogger() << "< " << insn.GetId() << " > "; + LogInfo::MapleLogger() << md->name << "(" << mOp << ")"; + + for (uint32 i = 0; i < insn.GetOperandSize(); ++i) { + Operand &opnd = insn.GetOperand(i); + LogInfo::MapleLogger() << " (opnd" << i << ": "; + A64SSAOperandDumpVisitor a64OpVisitor(GetAllSSAOperands()); + opnd.Accept(a64OpVisitor); + if (!a64OpVisitor.HasDumped()) { + opnd.Dump(); + LogInfo::MapleLogger() << ")"; + } + } + if (insn.IsVectorOp()) { + auto &vInsn = static_cast(insn); + if (vInsn.GetNumOfRegSpec() != 0) { + LogInfo::MapleLogger() << " (vecSpec: " << vInsn.GetNumOfRegSpec() << ")"; + } + } + LogInfo::MapleLogger() << "\n"; +} + +void A64SSAOperandRenameVisitor::Visit(RegOperand *v) +{ + if (v->IsVirtualRegister()) { + if (opndDes->IsRegDef() && opndDes->IsRegUse()) { /* both def use */ + insn->SetOperand(idx, *ssaInfo->GetRenamedOperand(*v, false, *insn, idx)); + (void)ssaInfo->GetRenamedOperand(*v, true, *insn, idx); + } else { + insn->SetOperand(idx, *ssaInfo->GetRenamedOperand(*v, opndDes->IsRegDef(), *insn, idx)); + } + } +} + +void A64SSAOperandRenameVisitor::Visit(MemOperand *a64MemOpnd) +{ + RegOperand *base = a64MemOpnd->GetBaseRegister(); + RegOperand *index = a64MemOpnd->GetIndexRegister(); + bool needCopy = (base != nullptr && base->IsVirtualRegister()) || (index != nullptr && index->IsVirtualRegister()); + if (needCopy) { + MemOperand *cpyMem = ssaInfo->CreateMemOperand(*a64MemOpnd, true); + if (base != nullptr && base->IsVirtualRegister()) { + bool isDef = !a64MemOpnd->IsIntactIndexed(); + cpyMem->SetBaseRegister(*ssaInfo->GetRenamedOperand(*base, isDef, *insn, idx)); + } + if (index != nullptr && index->IsVirtualRegister()) { + cpyMem->SetIndexRegister(*ssaInfo->GetRenamedOperand(*index, false, *insn, idx)); + } + insn->SetMemOpnd(ssaInfo->CreateMemOperand(*cpyMem, false)); + } +} + +void A64SSAOperandRenameVisitor::Visit(ListOperand *v) +{ + bool isAsm = insn->GetMachineOpcode() == MOP_asm; + /* record the orignal list order */ + std::list tempList; + auto &opndList = v->GetOperands(); + while (!opndList.empty()) { + auto *op = opndList.front(); + opndList.pop_front(); + + if (op->IsSSAForm() || !op->IsVirtualRegister()) { + tempList.push_back(op); + continue; + } + + bool isDef = isAsm && (idx == kAsmClobberListOpnd || idx == kAsmOutputListOpnd); + RegOperand *renameOpnd = ssaInfo->GetRenamedOperand(*op, isDef, *insn, idx); + tempList.push_back(renameOpnd); + } + DEBUG_ASSERT(v->GetOperands().empty(), "need to clean list"); + v->GetOperands().assign(tempList.begin(), tempList.end()); +} + +void A64OpndSSAUpdateVsitor::Visit(RegOperand *regOpnd) +{ + if (regOpnd->IsSSAForm()) { + if (opndDes->IsRegDef() && opndDes->IsRegUse()) { + UpdateRegUse(regOpnd->GetRegisterNumber()); + UpdateRegDef(regOpnd->GetRegisterNumber()); + } else { + if (opndDes->IsRegDef()) { + UpdateRegDef(regOpnd->GetRegisterNumber()); + } else if (opndDes->IsRegUse()) { + UpdateRegUse(regOpnd->GetRegisterNumber()); + } else if (IsPhi()) { + UpdateRegUse(regOpnd->GetRegisterNumber()); + } else { + DEBUG_ASSERT(false, "invalid opnd"); + } + } + } +} + +void A64OpndSSAUpdateVsitor::Visit(maplebe::MemOperand *a64MemOpnd) +{ + RegOperand *base = a64MemOpnd->GetBaseRegister(); + RegOperand *index = a64MemOpnd->GetIndexRegister(); + if (base != nullptr && base->IsSSAForm()) { + if (a64MemOpnd->IsIntactIndexed()) { + UpdateRegUse(base->GetRegisterNumber()); + } else { + UpdateRegDef(base->GetRegisterNumber()); + } + } + if (index != nullptr && index->IsSSAForm()) { + UpdateRegUse(index->GetRegisterNumber()); + } +} + +void A64OpndSSAUpdateVsitor::Visit(PhiOperand *phiOpnd) +{ + SetPhi(true); + for (auto phiListIt = phiOpnd->GetOperands().begin(); phiListIt != phiOpnd->GetOperands().end(); ++phiListIt) { + Visit(phiListIt->second); + } + SetPhi(false); +} + +void A64OpndSSAUpdateVsitor::Visit(ListOperand *v) +{ + /* do not handle asm here, so there is no list def */ + if (insn->GetMachineOpcode() == MOP_asm) { + DEBUG_ASSERT(false, "do not support asm yet"); + return; + } + for (auto *op : v->GetOperands()) { + if (op->IsSSAForm()) { + UpdateRegUse(op->GetRegisterNumber()); + } + } +} + +void A64OpndSSAUpdateVsitor::UpdateRegUse(uint32 ssaIdx) +{ + VRegVersion *curVersion = ssaInfo->FindSSAVersion(ssaIdx); + if (isDecrease) { + curVersion->RemoveUseInsn(*insn, idx); + } else { + curVersion->AddUseInsn(*ssaInfo, *insn, idx); + } +} + +void A64OpndSSAUpdateVsitor::UpdateRegDef(uint32 ssaIdx) +{ + VRegVersion *curVersion = ssaInfo->FindSSAVersion(ssaIdx); + if (isDecrease) { + deletedDef.emplace(ssaIdx); + curVersion->MarkDeleted(); + } else { + if (deletedDef.count(ssaIdx)) { + deletedDef.erase(ssaIdx); + curVersion->MarkRecovery(); + } else { + CHECK_FATAL(false, "do no support new define in ssaUpdating"); + } + DEBUG_ASSERT(!insn->IsPhi(), "do no support yet"); + curVersion->SetDefInsn(ssaInfo->CreateDUInsnInfo(insn, idx), kDefByInsn); + } +} + +void A64SSAOperandDumpVisitor::Visit(RegOperand *a64RegOpnd) +{ + DEBUG_ASSERT(!a64RegOpnd->IsConditionCode(), "both condi and reg"); + if (a64RegOpnd->IsSSAForm()) { + std::array prims = {"U", "R", "V", "C", "X", "Vra"}; + std::array classes = {"[U]", "[I]", "[F]", "[CC]", "[X87]", "[Vra]"}; + CHECK_FATAL(a64RegOpnd->IsVirtualRegister() && a64RegOpnd->IsSSAForm(), "only dump ssa opnd here"); + RegType regType = a64RegOpnd->GetRegisterType(); + DEBUG_ASSERT(regType < kRegTyLast, "unexpected regType"); + auto ssaVit = allSSAOperands.find(a64RegOpnd->GetRegisterNumber()); + CHECK_FATAL(ssaVit != allSSAOperands.end(), "find ssa version failed"); + LogInfo::MapleLogger() << "ssa_reg:" << prims[regType] << ssaVit->second->GetOriginalRegNO() << "_" + << ssaVit->second->GetVersionIdx() << " class: " << classes[regType] << " validBitNum: [" + << static_cast(a64RegOpnd->GetValidBitsNum()) << "]"; + LogInfo::MapleLogger() << ")"; + SetHasDumped(); + } +} + +void A64SSAOperandDumpVisitor::Visit(ListOperand *v) +{ + for (auto regOpnd : v->GetOperands()) { + if (regOpnd->IsSSAForm()) { + Visit(regOpnd); + continue; + } + } +} + +void A64SSAOperandDumpVisitor::Visit(MemOperand *a64MemOpnd) +{ + if (a64MemOpnd->GetBaseRegister() != nullptr && a64MemOpnd->GetBaseRegister()->IsSSAForm()) { + LogInfo::MapleLogger() << "Mem: "; + Visit(a64MemOpnd->GetBaseRegister()); + if (a64MemOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) { + LogInfo::MapleLogger() << "offset:"; + a64MemOpnd->GetOffsetOperand()->Dump(); + } + } + if (a64MemOpnd->GetIndexRegister() != nullptr && a64MemOpnd->GetIndexRegister()->IsSSAForm()) { + DEBUG_ASSERT(a64MemOpnd->GetAddrMode() == MemOperand::kAddrModeBOrX, "mem mode false"); + LogInfo::MapleLogger() << "offset:"; + Visit(a64MemOpnd->GetIndexRegister()); + } +} + +void A64SSAOperandDumpVisitor::Visit(PhiOperand *phi) +{ + for (auto phiListIt = phi->GetOperands().begin(); phiListIt != phi->GetOperands().end();) { + Visit(phiListIt->second); + LogInfo::MapleLogger() << " fBB<" << phiListIt->first << ">"; + LogInfo::MapleLogger() << (++phiListIt == phi->GetOperands().end() ? ")" : ", "); + } +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_strldr.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_strldr.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c00f4737ba6e2de71da97bb0e853f9e80e27b647 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_strldr.cpp @@ -0,0 +1,1101 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_strldr.h" +#include "aarch64_reaching.h" +#include "aarch64_cgfunc.h" +#include "common_utils.h" + +namespace maplebe { +using namespace maple; + +static MOperator SelectMovMop(bool isFloatOrSIMD, bool is64Bit) +{ + return isFloatOrSIMD ? (is64Bit ? MOP_xvmovd : MOP_xvmovs) : (is64Bit ? MOP_xmovrr : MOP_wmovrr); +} + +void AArch64StoreLoadOpt::Run() +{ + DoStoreLoadOpt(); +} + +/* + * Transfer: store x100, [MEM] + * ... // May exist branches. + * load x200, [MEM] + * ==> + * OPT_VERSION_STR_LIVE: + * store x100, [MEM] + * ... // May exist branches. if x100 not dead here. + * mov x200, x100 + * OPT_VERSION_STR_DIE: + * store x100, [MEM] + * mov x9000(new reg), x100 + * ... // May exist branches. if x100 dead here. + * mov x200, x9000 + * Params: + * strInsn: indicate store insn. + * strSrcIdx: index of source register operand of store insn. (x100 in this example) + * memSeq: represent first memOpreand or second memOperand + * memUseInsnSet: insns using memOperand + */ +void AArch64StoreLoadOpt::DoLoadToMoveTransfer(Insn &strInsn, short strSrcIdx, short memSeq, + const InsnSet &memUseInsnSet) +{ + /* stp instruction need two registers, str only need one register */ + DEBUG_ASSERT(strSrcIdx < kDivide2, "CG internal error."); + /* Find x100's definition insn. */ + InsnSet regDefInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(strInsn, strSrcIdx); + DEBUG_ASSERT(!regDefInsnSet.empty(), "RegOperand is used before defined"); + if (regDefInsnSet.size() != 1) { + return; + } + std::map InsnState; + for (auto *ldrInsn : memUseInsnSet) { + InsnState[ldrInsn] = true; + } + for (auto *ldrInsn : memUseInsnSet) { + if (!ldrInsn->IsLoad() || (ldrInsn->GetDefRegs().size() > 1) || ldrInsn->GetBB()->IsCleanup()) { + continue; + } + + if (HasMemBarrier(*ldrInsn, strInsn)) { + continue; + } + + /* ldr x200, [mem], mem index is 1, x200 index is 0 */ + InsnSet memDefInsnSet = cgFunc.GetRD()->FindDefForMemOpnd(*ldrInsn, kInsnSecondOpnd); + DEBUG_ASSERT(!memDefInsnSet.empty(), "load insn should have definitions."); + /* If load has multiple definition, continue. */ + if (memDefInsnSet.size() > 1) { + InsnState[ldrInsn] = false; + continue; + } + + Operand &resOpnd = ldrInsn->GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = strInsn.GetOperand(static_cast(strSrcIdx)); + if (resOpnd.GetSize() != srcOpnd.GetSize()) { + return; + } + + auto &resRegOpnd = static_cast(resOpnd); + auto &srcRegOpnd = static_cast(srcOpnd); + if (resRegOpnd.GetRegisterType() != srcRegOpnd.GetRegisterType()) { + continue; + } + + /* Check if use operand of store is live at load insn. */ + if (cgFunc.GetRD()->RegIsLiveBetweenInsn(srcRegOpnd.GetRegisterNumber(), strInsn, *ldrInsn)) { + GenerateMoveLiveInsn(resRegOpnd, srcRegOpnd, *ldrInsn, strInsn, memSeq); + InsnState[ldrInsn] = false; + } else if (!cgFunc.IsAfterRegAlloc()) { + GenerateMoveDeadInsn(resRegOpnd, srcRegOpnd, *ldrInsn, strInsn, memSeq); + } + + if (CG_DEBUG_FUNC(cgFunc)) { + LogInfo::MapleLogger() << "Do store-load optimization 1: str version"; + LogInfo::MapleLogger() << cgFunc.GetName() << '\n'; + LogInfo::MapleLogger() << "Store insn: "; + strInsn.Dump(); + LogInfo::MapleLogger() << "Load insn: "; + ldrInsn->Dump(); + } + } + auto it = memUseInsnSet.begin(); + ++it; + for (; it != memUseInsnSet.end(); ++it) { + Insn *curInsn = *it; + if (InsnState[curInsn] == false) { + continue; + } + if (!curInsn->IsLoad() || (curInsn->GetDefRegs().size() > 1) || curInsn->GetBB()->IsCleanup()) { + continue; + } + InsnSet memDefInsnSet = cgFunc.GetRD()->FindDefForMemOpnd(*curInsn, kInsnSecondOpnd); + DEBUG_ASSERT(!memDefInsnSet.empty(), "load insn should have definitions."); + if (memDefInsnSet.size() > 1) { + continue; + } + auto prevIt = it; + do { + --prevIt; + Insn *prevInsn = *prevIt; + if (InsnState[prevInsn] == false) { + continue; + } + if (prevInsn->GetBB() != curInsn->GetBB()) { + break; + } + if (!prevInsn->IsLoad() || (prevInsn->GetDefRegs().size() > 1) || prevInsn->GetBB()->IsCleanup()) { + continue; + } + InsnSet memoryDefInsnSet = cgFunc.GetRD()->FindDefForMemOpnd(*curInsn, kInsnSecondOpnd); + DEBUG_ASSERT(!memoryDefInsnSet.empty(), "load insn should have definitions."); + if (memoryDefInsnSet.size() > 1) { + break; + } + Operand &resOpnd = curInsn->GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = prevInsn->GetOperand(kInsnFirstOpnd); + if (resOpnd.GetSize() != srcOpnd.GetSize()) { + continue; + } + + auto &resRegOpnd = static_cast(resOpnd); + auto &srcRegOpnd = static_cast(srcOpnd); + if (resRegOpnd.GetRegisterType() != srcRegOpnd.GetRegisterType()) { + continue; + } + /* Check if use operand of store is live at load insn. */ + if (cgFunc.GetRD() + ->FindRegDefBetweenInsn(srcRegOpnd.GetRegisterNumber(), prevInsn->GetNext(), curInsn->GetPrev()) + .empty()) { + GenerateMoveLiveInsn(resRegOpnd, srcRegOpnd, *curInsn, *prevInsn, memSeq); + InsnState[curInsn] = false; + } + break; + } while (prevIt != memUseInsnSet.begin()); + } +} + +void AArch64StoreLoadOpt::GenerateMoveLiveInsn(RegOperand &resRegOpnd, RegOperand &srcRegOpnd, Insn &ldrInsn, + Insn &strInsn, short memSeq) +{ + MOperator movMop = SelectMovMop(resRegOpnd.IsOfFloatOrSIMDClass(), resRegOpnd.GetSize() == k64BitSize); + Insn *movInsn = nullptr; + if (str2MovMap[&strInsn][memSeq] != nullptr && !cgFunc.IsAfterRegAlloc()) { + Insn *movInsnOfStr = str2MovMap[&strInsn][memSeq]; + auto &vregOpnd = static_cast(movInsnOfStr->GetOperand(kInsnFirstOpnd)); + movInsn = &cgFunc.GetInsnBuilder()->BuildInsn(movMop, resRegOpnd, vregOpnd); + } else { + movInsn = &cgFunc.GetInsnBuilder()->BuildInsn(movMop, resRegOpnd, srcRegOpnd); + } + if (&resRegOpnd == &srcRegOpnd && cgFunc.IsAfterRegAlloc()) { + ldrInsn.GetBB()->RemoveInsn(ldrInsn); + cgFunc.GetRD()->InitGenUse(*ldrInsn.GetBB(), false); + return; + } + movInsn->SetId(ldrInsn.GetId()); + ldrInsn.GetBB()->ReplaceInsn(ldrInsn, *movInsn); + if (CG_DEBUG_FUNC(cgFunc)) { + LogInfo::MapleLogger() << "replace ldrInsn:\n"; + ldrInsn.Dump(); + LogInfo::MapleLogger() << "with movInsn:\n"; + movInsn->Dump(); + } + /* Add comment. */ + MapleString newComment = ldrInsn.GetComment(); + if (strInsn.IsStorePair()) { + newComment += "; stp-load live version."; + } else { + newComment += "; str-load live version."; + } + movInsn->SetComment(newComment); + cgFunc.GetRD()->InitGenUse(*ldrInsn.GetBB(), false); +} + +void AArch64StoreLoadOpt::GenerateMoveDeadInsn(RegOperand &resRegOpnd, RegOperand &srcRegOpnd, Insn &ldrInsn, + Insn &strInsn, short memSeq) +{ + Insn *newMovInsn = nullptr; + RegOperand *vregOpnd = nullptr; + + if (str2MovMap[&strInsn][memSeq] == nullptr) { + RegType regTy = srcRegOpnd.IsOfFloatOrSIMDClass() ? kRegTyFloat : kRegTyInt; + regno_t vRegNO = cgFunc.NewVReg(regTy, srcRegOpnd.GetSize() <= k32BitSize ? k4ByteSize : k8ByteSize); + /* generate a new vreg, check if the size of DataInfo is big enough */ + if (vRegNO >= cgFunc.GetRD()->GetRegSize(*strInsn.GetBB())) { + cgFunc.GetRD()->EnlargeRegCapacity(vRegNO); + } + vregOpnd = &cgFunc.CreateVirtualRegisterOperand(vRegNO); + MOperator newMop = SelectMovMop(resRegOpnd.IsOfFloatOrSIMDClass(), resRegOpnd.GetSize() == k64BitSize); + newMovInsn = &cgFunc.GetInsnBuilder()->BuildInsn(newMop, *vregOpnd, srcRegOpnd); + newMovInsn->SetId(strInsn.GetId() + memSeq + 1); + strInsn.GetBB()->InsertInsnAfter(strInsn, *newMovInsn); + str2MovMap[&strInsn][memSeq] = newMovInsn; + /* update DataInfo */ + cgFunc.GetRD()->UpdateInOut(*strInsn.GetBB(), true); + } else { + newMovInsn = str2MovMap[&strInsn][memSeq]; + vregOpnd = &static_cast(newMovInsn->GetOperand(kInsnFirstOpnd)); + } + MOperator movMop = SelectMovMop(resRegOpnd.IsOfFloatOrSIMDClass(), resRegOpnd.GetSize() == k64BitSize); + Insn &movInsn = cgFunc.GetInsnBuilder()->BuildInsn(movMop, resRegOpnd, *vregOpnd); + movInsn.SetId(ldrInsn.GetId()); + ldrInsn.GetBB()->ReplaceInsn(ldrInsn, movInsn); + if (CG_DEBUG_FUNC(cgFunc)) { + LogInfo::MapleLogger() << "replace ldrInsn:\n"; + ldrInsn.Dump(); + LogInfo::MapleLogger() << "with movInsn:\n"; + movInsn.Dump(); + } + + /* Add comment. */ + MapleString newComment = ldrInsn.GetComment(); + if (strInsn.IsStorePair()) { + newComment += "; stp-load die version."; + } else { + newComment += "; str-load die version."; + } + movInsn.SetComment(newComment); + cgFunc.GetRD()->InitGenUse(*ldrInsn.GetBB(), false); +} + +bool AArch64StoreLoadOpt::HasMemBarrier(const Insn &ldrInsn, const Insn &strInsn) const +{ + if (!cgFunc.GetMirModule().IsCModule()) { + return false; + } + const Insn *currInsn = strInsn.GetNext(); + while (currInsn != &ldrInsn) { + if (currInsn == nullptr) { + return false; + } + if (currInsn->IsMachineInstruction() && currInsn->IsCall()) { + return true; + } + currInsn = currInsn->GetNext(); + } + return false; +} + +/* + * Transfer: store wzr, [MEM] + * ... // May exist branches. + * load x200, [MEM] + * ==> + * OPT_VERSION_STP_ZERO / OPT_VERSION_STR_ZERO: + * store wzr, [MEM] + * ... // May exist branches. if x100 not dead here. + * mov x200, wzr + * + * Params: + * stInsn: indicate store insn. + * strSrcIdx: index of source register operand of store insn. (wzr in this example) + * memUseInsnSet: insns using memOperand + */ +void AArch64StoreLoadOpt::DoLoadZeroToMoveTransfer(const Insn &strInsn, short strSrcIdx, + const InsnSet &memUseInsnSet) const +{ + /* comment for strInsn should be only added once */ + for (auto *ldrInsn : memUseInsnSet) { + /* Currently we don't support useInsn is ldp insn. */ + if (!ldrInsn->IsLoad() || ldrInsn->GetDefRegs().size() > 1) { + continue; + } + if (HasMemBarrier(*ldrInsn, strInsn)) { + continue; + } + /* ldr reg, [mem], the index of [mem] is 1 */ + InsnSet defInsnForUseInsns = cgFunc.GetRD()->FindDefForMemOpnd(*ldrInsn, 1); + /* If load has multiple definition, continue. */ + if (defInsnForUseInsns.size() > 1) { + continue; + } + + auto &resOpnd = ldrInsn->GetOperand(0); + auto &srcOpnd = strInsn.GetOperand(static_cast(strSrcIdx)); + + if (resOpnd.GetSize() != srcOpnd.GetSize()) { + return; + } + RegOperand &resRegOpnd = static_cast(resOpnd); + MOperator movMop = SelectMovMop(resRegOpnd.IsOfFloatOrSIMDClass(), resRegOpnd.GetSize() == k64BitSize); + Insn &movInsn = cgFunc.GetInsnBuilder()->BuildInsn(movMop, resOpnd, srcOpnd); + movInsn.SetId(ldrInsn->GetId()); + ldrInsn->GetBB()->ReplaceInsn(*ldrInsn, movInsn); + + /* Add comment. */ + MapleString newComment = ldrInsn->GetComment(); + newComment += ", str-load zero version"; + movInsn.SetComment(newComment); + } +} + +bool AArch64StoreLoadOpt::CheckStoreOpCode(MOperator opCode) const +{ + switch (opCode) { + case MOP_wstr: + case MOP_xstr: + case MOP_sstr: + case MOP_dstr: + case MOP_wstp: + case MOP_xstp: + case MOP_sstp: + case MOP_dstp: + case MOP_wstrb: + case MOP_wstrh: + return true; + default: + return false; + } +} + +void AArch64StoreLoadOpt::MemPropInit() +{ + propMode = kUndef; + amount = 0; + removeDefInsn = false; +} + +bool AArch64StoreLoadOpt::CheckReplaceReg(Insn &defInsn, Insn &currInsn, InsnSet &replaceRegDefSet, + regno_t replaceRegNo) +{ + if (replaceRegDefSet.empty()) { + return true; + } + if (defInsn.GetBB() == currInsn.GetBB()) { + /* check replace reg def between defInsn and currInsn */ + Insn *tmpInsn = defInsn.GetNext(); + while (tmpInsn != nullptr && tmpInsn != &currInsn) { + if (replaceRegDefSet.find(tmpInsn) != replaceRegDefSet.end()) { + return false; + } + tmpInsn = tmpInsn->GetNext(); + } + } else { + regno_t defRegno = static_cast(defInsn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + if (defRegno == replaceRegNo) { + uint32 defLoopId = 0; + uint32 curLoopId = 0; + if (defInsn.GetBB()->GetLoop()) { + defLoopId = defInsn.GetBB()->GetLoop()->GetHeader()->GetId(); + } + if (currInsn.GetBB()->GetLoop()) { + curLoopId = currInsn.GetBB()->GetLoop()->GetHeader()->GetId(); + } + if (defLoopId != curLoopId) { + return false; + } + } + AArch64ReachingDefinition *a64RD = static_cast(cgFunc.GetRD()); + if (a64RD->HasRegDefBetweenInsnGlobal(replaceRegNo, defInsn, currInsn)) { + return false; + } + } + + if (replaceRegDefSet.size() == 1 && *replaceRegDefSet.begin() == &defInsn) { + /* lsl x1, x1, #3 <-----should be removed after replace MemOperand of ldrInsn. + * ldr x0, [x0,x1] <-----should be single useInsn for x1 + */ + InsnSet newRegUseSet = cgFunc.GetRD()->FindUseForRegOpnd(defInsn, replaceRegNo, true); + if (newRegUseSet.size() != k1BitSize) { + return false; + } + removeDefInsn = true; + } + return true; +} + +bool AArch64StoreLoadOpt::CheckDefInsn(Insn &defInsn, Insn &currInsn) +{ + if (defInsn.GetOperandSize() < k2ByteSize) { + return false; + } + for (uint32 i = kInsnSecondOpnd; i < defInsn.GetOperandSize(); i++) { + Operand &opnd = defInsn.GetOperand(i); + if (defInsn.IsMove() && opnd.IsRegister() && !cgFunc.IsSPOrFP(static_cast(opnd))) { + return false; + } + if (opnd.IsRegister()) { + RegOperand &a64OpndTmp = static_cast(opnd); + regno_t replaceRegNo = a64OpndTmp.GetRegisterNumber(); + InsnSet newRegDefSet = cgFunc.GetRD()->FindDefForRegOpnd(currInsn, replaceRegNo, true); + if (!CheckReplaceReg(defInsn, currInsn, newRegDefSet, replaceRegNo)) { + return false; + } + } + } + return true; +} + +bool AArch64StoreLoadOpt::CheckNewAmount(const Insn &insn, uint32 newAmount) +{ + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_wstrb: + case MOP_wldrsb: + case MOP_xldrsb: + case MOP_wldrb: { + return newAmount == 0; + } + case MOP_wstrh: + case MOP_wldrsh: + case MOP_xldrsh: + case MOP_wldrh: { + return (newAmount == 0) || (newAmount == k1BitSize); + } + case MOP_wstr: + case MOP_sstr: + case MOP_wldr: + case MOP_sldr: + case MOP_xldrsw: { + return (newAmount == 0) || (newAmount == k2BitSize); + } + case MOP_qstr: + case MOP_qldr: { + return (newAmount == 0) || (newAmount == k4BitSize); + } + default: { + return (newAmount == 0) || (newAmount == k3ByteSize); + } + } +} + +bool AArch64StoreLoadOpt::CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx) +{ + AArch64CGFunc &a64CgFunc = static_cast(cgFunc); + if ((newMemOpnd->GetOffsetImmediate() != nullptr) && + !a64CgFunc.IsOperandImmValid(insn.GetMachineOpcode(), newMemOpnd, opndIdx)) { + return false; + } + auto newAmount = newMemOpnd->ShiftAmount(); + if (!CheckNewAmount(insn, newAmount)) { + return false; + } + /* is ldp or stp, addrMode must be BOI */ + if ((opndIdx == kInsnThirdOpnd) && (newMemOpnd->GetAddrMode() != MemOperand::kAddrModeBOi)) { + return false; + } + return true; +} + +MemOperand *AArch64StoreLoadOpt::SelectReplaceExt(const Insn &defInsn, RegOperand &base, bool isSigned) +{ + MemOperand *newMemOpnd = nullptr; + RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + /* defInsn is extend, currMemOpnd is same extend or shift */ + bool propExtend = (propMode == kPropShift) || ((propMode == kPropSignedExtend) && isSigned) || + ((propMode == kPropUnsignedExtend) && !isSigned); + if (propMode == kPropOffset) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand(MemOperand::kAddrModeBOrX, k64BitSize, base, + *newOffset, 0, isSigned); + } else if (propExtend) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand(MemOperand::kAddrModeBOrX, k64BitSize, base, + *newOffset, amount, isSigned); + } else { + return nullptr; + } + return newMemOpnd; +} + +MemOperand *AArch64StoreLoadOpt::HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal) +{ + if (propMode != kPropBase) { + return nullptr; + } + OfstOperand *newOfstImm = nullptr; + if (oldOffset == nullptr) { + newOfstImm = &static_cast(cgFunc).CreateOfstOpnd(static_cast(defVal), k32BitSize); + } else { + auto *ofstOpnd = static_cast(oldOffset); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffsetOpnd is null"); + newOfstImm = &static_cast(cgFunc).CreateOfstOpnd( + static_cast(defVal + ofstOpnd->GetValue()), k32BitSize); + } + CHECK_FATAL(newOfstImm != nullptr, "newOffset is null!"); + return static_cast(cgFunc).CreateMemOperand(MemOperand::kAddrModeBOi, k64BitSize, replace, nullptr, + newOfstImm, nullptr); +} + +/* + * limit to adjacent bb to avoid ra spill. + */ +bool AArch64StoreLoadOpt::IsAdjacentBB(Insn &defInsn, Insn &curInsn) const +{ + if (defInsn.GetBB() == curInsn.GetBB()) { + return true; + } + for (auto *bb : defInsn.GetBB()->GetSuccs()) { + if (bb == curInsn.GetBB()) { + return true; + } + if (bb->IsSoloGoto()) { + BB *tragetBB = CGCFG::GetTargetSuc(*bb); + if (tragetBB == curInsn.GetBB()) { + return true; + } + } + } + return false; +} + +/* + * currAddrMode | defMop | propMode | replaceAddrMode + * ============================================================================= + * boi | addrri | base | boi, update imm(offset) + * | addrrr | base | imm(offset) == 0(nullptr) ? borx : NA + * | subrri | base | boi, update imm(offset) + * | subrrr | base | NA + * | adrpl12 | base | imm(offset) == 0(nullptr) ? literal : NA + * | movrr | base | boi + * | movri | base | NA + * | extend/lsl | base | NA + * ============================================================================= + * borx | addrri | offset | NA + * (noextend) | addrrr | offset | NA + * | subrri | offset | NA + * | subrrr | offset | NA + * | adrpl12 | offset | NA + * | movrr | offset | borx + * | movri | offset | bori + * | extend/lsl | offset | borx(with extend) + * ============================================================================= + * borx | addrri | extend | NA + * (extend) | addrrr | extend | NA + * | subrri | extend | NA + * | subrrr | extend | NA + * | adrpl12 | extend | NA + * | movrr | extend | borx + * | movri | extend | NA + * | extend/lsl | extend | borx(with extend) + * ============================================================================= + */ +MemOperand *AArch64StoreLoadOpt::SelectReplaceMem(Insn &defInsn, Insn &curInsn, RegOperand &base, Operand *offset) +{ + MemOperand *newMemOpnd = nullptr; + MOperator opCode = defInsn.GetMachineOpcode(); + RegOperand *replace = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + switch (opCode) { + case MOP_xsubrri12: + case MOP_wsubrri12: { + if (!IsAdjacentBB(defInsn, curInsn)) { + break; + } + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + int64 defVal = -(immOpnd.GetValue()); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal); + break; + } + case MOP_xaddrri12: + case MOP_waddrri12: { + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + int64 defVal = immOpnd.GetValue(); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal); + break; + } + case MOP_xaddrrr: + case MOP_waddrrr: + case MOP_dadd: + case MOP_sadd: { + if (propMode == kPropBase) { + ImmOperand *ofstOpnd = static_cast(offset); + if (!ofstOpnd->IsZero()) { + break; + } + RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, *replace, newOffset, nullptr, nullptr); + } + break; + } + case MOP_xadrpl12: { + if (propMode == kPropBase) { + ImmOperand *ofstOpnd = static_cast(offset); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffset is null!"); + int64 val = ofstOpnd->GetValue(); + StImmOperand *offset1 = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + CHECK_FATAL(offset1 != nullptr, "offset1 is null!"); + val += offset1->GetOffset(); + OfstOperand *newOfsetOpnd = + &static_cast(cgFunc).CreateOfstOpnd(static_cast(val), k32BitSize); + CHECK_FATAL(newOfsetOpnd != nullptr, "newOfsetOpnd is null!"); + const MIRSymbol *addr = offset1->GetSymbol(); + /* do not guarantee rodata alignment at Os */ + if (CGOptions::OptimizeForSize() && addr->IsReadOnly()) { + break; + } + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeLo12Li, k64BitSize, *replace, nullptr, newOfsetOpnd, addr); + } + break; + } + case MOP_xmovrr: + case MOP_wmovrr: { + if (propMode == kPropBase) { + OfstOperand *offsetTmp = static_cast(offset); + CHECK_FATAL(offsetTmp != nullptr, "newOffset is null!"); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOi, k64BitSize, *replace, nullptr, offsetTmp, nullptr); + } else if (propMode == kPropOffset) { /* if newOffset is SP, swap base and newOffset */ + if (cgFunc.IsSPOrFP(*replace)) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, *replace, &base, nullptr, nullptr); + } else { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, replace, nullptr, nullptr); + } + } else if (propMode == kPropSignedExtend) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *replace, amount, true); + } else { + newMemOpnd = static_cast(cgFunc).CreateMemOperand(MemOperand::kAddrModeBOrX, + k64BitSize, base, *replace, amount); + } + break; + } + case MOP_wmovri32: + case MOP_xmovri64: { + if (propMode == kPropOffset) { + ImmOperand *imm = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + OfstOperand *newOffset = &static_cast(cgFunc).CreateOfstOpnd( + static_cast(imm->GetValue()), k32BitSize); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + newMemOpnd = static_cast(cgFunc).CreateMemOperand(MemOperand::kAddrModeBOi, k64BitSize, + base, nullptr, newOffset, nullptr); + } + break; + } + case MOP_xlslrri6: + case MOP_wlslrri5: { + ImmOperand *imm = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + uint32 shift = static_cast(imm->GetValue()); + if (propMode == kPropOffset) { + if ((shift < k4ByteSize) && (shift >= 0)) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, shift); + } + } else if (propMode == kPropShift) { + shift += amount; + if ((shift < k4ByteSize) && (shift >= 0)) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, shift); + } + } + break; + } + case MOP_xsxtw64: { + newMemOpnd = SelectReplaceExt(defInsn, base, true); + break; + } + case MOP_xuxtw64: { + newMemOpnd = SelectReplaceExt(defInsn, base, false); + break; + } + default: + break; + } + return newMemOpnd; +} + +bool AArch64StoreLoadOpt::ReplaceMemOpnd(Insn &insn, regno_t regNo, RegOperand &base, Operand *offset) +{ + AArch64ReachingDefinition *a64RD = static_cast(cgFunc.GetRD()); + CHECK_FATAL((a64RD != nullptr), "check a64RD!"); + InsnSet regDefSet = a64RD->FindDefForRegOpnd(insn, regNo, true); + if (regDefSet.size() != k1BitSize) { + return false; + } + Insn *regDefInsn = *regDefSet.begin(); + if (!CheckDefInsn(*regDefInsn, insn)) { + return false; + } + MemOperand *newMemOpnd = SelectReplaceMem(*regDefInsn, insn, base, offset); + if (newMemOpnd == nullptr) { + return false; + } + + /* check new memOpnd */ + if (newMemOpnd->GetBaseRegister() != nullptr) { + InsnSet regDefSetForNewBase = + a64RD->FindDefForRegOpnd(insn, newMemOpnd->GetBaseRegister()->GetRegisterNumber(), true); + if (regDefSetForNewBase.size() != k1BitSize) { + return false; + } + } + if (newMemOpnd->GetIndexRegister() != nullptr) { + InsnSet regDefSetForNewIndex = + a64RD->FindDefForRegOpnd(insn, newMemOpnd->GetIndexRegister()->GetRegisterNumber(), true); + if (regDefSetForNewIndex.size() != k1BitSize) { + return false; + } + } + + uint32 opndIdx; + if (insn.IsLoadPair() || insn.IsStorePair()) { + if (newMemOpnd->GetOffsetImmediate() == nullptr) { + return false; + } + opndIdx = kInsnThirdOpnd; + } else { + opndIdx = kInsnSecondOpnd; + } + if (!CheckNewMemOffset(insn, newMemOpnd, opndIdx)) { + return false; + } + if (CG_DEBUG_FUNC(cgFunc)) { + std::cout << "replace insn:" << std::endl; + insn.Dump(); + } + insn.SetOperand(opndIdx, *newMemOpnd); + if (CG_DEBUG_FUNC(cgFunc)) { + std::cout << "new insn:" << std::endl; + insn.Dump(); + } + if (removeDefInsn) { + if (CG_DEBUG_FUNC(cgFunc)) { + std::cout << "remove insn:" << std::endl; + regDefInsn->Dump(); + } + regDefInsn->GetBB()->RemoveInsn(*regDefInsn); + } + cgFunc.GetRD()->InitGenUse(*regDefInsn->GetBB(), false); + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), false); + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), true); + return true; +} + +bool AArch64StoreLoadOpt::CanDoMemProp(const Insn *insn) +{ + if (!cgFunc.GetMirModule().IsCModule()) { + return false; + } + if (!insn->IsMachineInstruction()) { + return false; + } + if (insn->GetMachineOpcode() == MOP_qstr) { + return false; + } + + if (insn->IsLoad() || insn->IsStore()) { + if (insn->IsAtomic()) { + return false; + } + // It is not desired to propagate on 128bit reg with immediate offset + // which may cause linker to issue misalignment error + if (insn->IsAtomic() || insn->GetOperand(0).GetSize() == k128BitSize) { + return false; + } + MemOperand *currMemOpnd = static_cast(insn->GetMemOpnd()); + return currMemOpnd != nullptr; + } + return false; +} + +void AArch64StoreLoadOpt::SelectPropMode(const MemOperand &currMemOpnd) +{ + MemOperand::AArch64AddressingMode currAddrMode = currMemOpnd.GetAddrMode(); + switch (currAddrMode) { + case MemOperand::kAddrModeBOi: { + if (!currMemOpnd.IsPreIndexed() && !currMemOpnd.IsPostIndexed()) { + propMode = kPropBase; + } + break; + } + case MemOperand::kAddrModeBOrX: { + propMode = kPropOffset; + amount = currMemOpnd.ShiftAmount(); + if (currMemOpnd.GetExtendAsString() == "LSL") { + if (amount != 0) { + propMode = kPropShift; + } + break; + } else if (currMemOpnd.SignedExtend()) { + propMode = kPropSignedExtend; + } else if (currMemOpnd.UnsignedExtend()) { + propMode = kPropUnsignedExtend; + } + break; + } + default: + propMode = kUndef; + } +} + +/* + * Optimize: store x100, [MEM] + * ... // May exist branches. + * load x200, [MEM] + * ==> + * OPT_VERSION_STP_LIVE / OPT_VERSION_STR_LIVE: + * store x100, [MEM] + * ... // May exist branches. if x100 not dead here. + * mov x200, x100 + * OPT_VERSION_STP_DIE / OPT_VERSION_STR_DIE: + * store x100, [MEM] + * mov x9000(new reg), x100 + * ... // May exist branches. if x100 dead here. + * mov x200, x9000 + * + * Note: x100 may be wzr/xzr registers. + */ +void AArch64StoreLoadOpt::DoStoreLoadOpt() +{ + AArch64CGFunc &a64CgFunc = static_cast(cgFunc); + if (a64CgFunc.IsIntrnCallForC()) { + return; + } + FOR_ALL_BB(bb, &a64CgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, next) { + MOperator mOp = insn->GetMachineOpcode(); + if (CanDoMemProp(insn)) { + MemProp(*insn); + } + if (a64CgFunc.GetMirModule().IsCModule() && cgFunc.GetRD()->OnlyAnalysisReg()) { + continue; + } + if (!insn->IsMachineInstruction() || !insn->IsStore() || !CheckStoreOpCode(mOp) || + (a64CgFunc.GetMirModule().IsCModule() && !a64CgFunc.IsAfterRegAlloc()) || + (!a64CgFunc.GetMirModule().IsCModule() && a64CgFunc.IsAfterRegAlloc())) { + continue; + } + if (insn->IsStorePair()) { + ProcessStrPair(*insn); + continue; + } + ProcessStr(*insn); + } + } +} + +/* + * PropBase: + * add/sub x1, x2, #immVal1 + * ...(no def of x2) + * ldr/str x0, [x1, #immVal2] + * ======> + * add/sub x1, x2, #immVal1 + * ... + * ldr/str x0, [x2, #(immVal1 + immVal2)/#(-immVal1 + immVal2)] + * + * PropOffset: + * sxtw x2, w2 + * lsl x1, x2, #1~3 + * ...(no def of x2) + * ldr/str x0, [x0, x1] + * ======> + * sxtw x2, w2 + * lsl x1, x2, #1~3 + * ... + * ldr/str x0, [x0, w2, sxtw 1~3] + */ +void AArch64StoreLoadOpt::MemProp(Insn &insn) +{ + MemPropInit(); + MemOperand *currMemOpnd = static_cast(insn.GetMemOpnd()); + SelectPropMode(*currMemOpnd); + RegOperand *base = currMemOpnd->GetBaseRegister(); + Operand *offset = currMemOpnd->GetOffset(); + bool memReplaced = false; + + if (propMode == kUndef) { + return; + } else if (propMode == kPropBase) { + ImmOperand *immOffset = static_cast(offset); + CHECK_FATAL(immOffset != nullptr, "immOffset is nullptr!"); + regno_t baseRegNo = base->GetRegisterNumber(); + memReplaced = ReplaceMemOpnd(insn, baseRegNo, *base, immOffset); + } else { + RegOperand *regOffset = static_cast(offset); + if (regOffset == nullptr) { + return; + } + regno_t offsetRegNo = regOffset->GetRegisterNumber(); + memReplaced = ReplaceMemOpnd(insn, offsetRegNo, *base, regOffset); + } + + /* if prop success, find more prop chance */ + if (memReplaced) { + MemProp(insn); + } +} + +/* + * Assume stack(FP) will not be varied out of pro/epi log + * PreIndex: + * add/sub x1, x1 #immVal1 + * ...(no def/use of x1) + * ldr/str x0, [x1] + * ======> + * ldr/str x0, [x1, #immVal1]! + * + * PostIndex: + * ldr/str x0, [x1] + * ...(no def/use of x1) + * add/sub x1, x1, #immVal1 + * ======> + * ldr/str x0, [x1], #immVal1 + */ +void AArch64StoreLoadOpt::StrLdrIndexModeOpt(Insn &currInsn) +{ + auto *curMemopnd = static_cast(currInsn.GetMemOpnd()); + DEBUG_ASSERT(curMemopnd != nullptr, " get memopnd failed"); + /* one instruction cannot define one register twice */ + if (!CanDoIndexOpt(*curMemopnd) || currInsn.IsRegDefined(curMemopnd->GetBaseRegister()->GetRegisterNumber())) { + return; + } + MemOperand *newMemopnd = SelectIndexOptMode(currInsn, *curMemopnd); + if (newMemopnd != nullptr) { + currInsn.SetMemOpnd(newMemopnd); + } +} + +bool AArch64StoreLoadOpt::CanDoIndexOpt(const MemOperand &MemOpnd) +{ + if (MemOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || !MemOpnd.IsIntactIndexed()) { + return false; + } + DEBUG_ASSERT(MemOpnd.GetOffsetImmediate() != nullptr, " kAddrModeBOi memopnd have no offset imm"); + if (!MemOpnd.GetOffsetImmediate()->IsImmOffset()) { + return false; + } + if (cgFunc.IsSPOrFP(*MemOpnd.GetBaseRegister())) { + return false; + } + OfstOperand *a64Ofst = MemOpnd.GetOffsetImmediate(); + if (a64Ofst == nullptr) { + return false; + } + return a64Ofst->GetValue() == 0; +} + +int64 AArch64StoreLoadOpt::GetOffsetForNewIndex(Insn &defInsn, Insn &insn, regno_t baseRegNO, uint32 memOpndSize) +{ + bool subMode = defInsn.GetMachineOpcode() == MOP_wsubrri12 || defInsn.GetMachineOpcode() == MOP_xsubrri12; + bool addMode = defInsn.GetMachineOpcode() == MOP_waddrri12 || defInsn.GetMachineOpcode() == MOP_xaddrri12; + if (addMode || subMode) { + DEBUG_ASSERT(static_cast(defInsn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber() == baseRegNO, + "check def opnd"); + auto &srcOpnd = static_cast(defInsn.GetOperand(kInsnSecondOpnd)); + if (srcOpnd.GetRegisterNumber() == baseRegNO && defInsn.GetBB() == insn.GetBB()) { + int64 offsetVal = static_cast(defInsn.GetOperand(kInsnThirdOpnd)).GetValue(); + if (!MemOperand::IsSIMMOffsetOutOfRange(offsetVal, memOpndSize == k64BitSize, insn.IsLoadStorePair())) { + return subMode ? -offsetVal : offsetVal; + } + } + } + return kMaxPimm8; /* simm max value cannot excced pimm max value */ +}; + +MemOperand *AArch64StoreLoadOpt::SelectIndexOptMode(Insn &insn, const MemOperand &curMemOpnd) +{ + AArch64ReachingDefinition *a64RD = static_cast(cgFunc.GetRD()); + DEBUG_ASSERT((a64RD != nullptr), "check a64RD!"); + regno_t baseRegisterNO = curMemOpnd.GetBaseRegister()->GetRegisterNumber(); + auto &a64cgFunc = static_cast(cgFunc); + /* pre index */ + InsnSet regDefSet = a64RD->FindDefForRegOpnd(insn, baseRegisterNO, true); + if (regDefSet.size() == k1BitSize) { + Insn *defInsn = *regDefSet.begin(); + int64 defOffset = GetOffsetForNewIndex(*defInsn, insn, baseRegisterNO, curMemOpnd.GetSize()); + if (defOffset < kMaxPimm8) { + InsnSet tempCheck; + (void)a64RD->FindRegUseBetweenInsn(baseRegisterNO, defInsn->GetNext(), insn.GetPrev(), tempCheck); + if (tempCheck.empty() && (defInsn->GetBB() == insn.GetBB())) { + auto &newMem = a64cgFunc.CreateMemOpnd(*curMemOpnd.GetBaseRegister(), defOffset, curMemOpnd.GetSize()); + DEBUG_ASSERT(newMem.GetOffsetImmediate() != nullptr, "need offset for memopnd in this case"); + newMem.SetIndexOpt(MemOperand::kPreIndex); + insn.GetBB()->RemoveInsn(*defInsn); + return &newMem; + } + } + } + /* post index */ + std::vector refDefVec = + a64RD->FindRegDefBetweenInsn(baseRegisterNO, &insn, insn.GetBB()->GetLastInsn(), true); + if (!refDefVec.empty()) { + Insn *defInsn = refDefVec.back(); + int64 defOffset = GetOffsetForNewIndex(*defInsn, insn, baseRegisterNO, curMemOpnd.GetSize()); + if (defOffset < kMaxPimm8) { + InsnSet tempCheck; + (void)a64RD->FindRegUseBetweenInsn(baseRegisterNO, insn.GetNext(), defInsn->GetPrev(), tempCheck); + if (tempCheck.empty() && (defInsn->GetBB() == insn.GetBB())) { + auto &newMem = a64cgFunc.CreateMemOpnd(*curMemOpnd.GetBaseRegister(), defOffset, curMemOpnd.GetSize()); + DEBUG_ASSERT(newMem.GetOffsetImmediate() != nullptr, "need offset for memopnd in this case"); + newMem.SetIndexOpt(MemOperand::kPostIndex); + insn.GetBB()->RemoveInsn(*defInsn); + return &newMem; + } + } + } + return nullptr; +} + +void AArch64StoreLoadOpt::ProcessStrPair(Insn &insn) +{ + const short memIndex = 2; + short regIndex = 0; + Operand &opnd = insn.GetOperand(memIndex); + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if ((base == nullptr) || !(cgFunc.GetRD()->IsFrameReg(*base))) { + return; + } + if (cgFunc.IsAfterRegAlloc() && !insn.IsSpillInsn()) { + return; + } + DEBUG_ASSERT(memOpnd.GetIndexRegister() == nullptr, "frame MemOperand must not be exist register index"); + InsnSet memUseInsnSet; + for (int i = 0; i != kMaxMovNum; ++i) { + memUseInsnSet.clear(); + if (i == 0) { + regIndex = 0; + memUseInsnSet = cgFunc.GetRD()->FindUseForMemOpnd(insn, memIndex); + } else { + regIndex = 1; + memUseInsnSet = cgFunc.GetRD()->FindUseForMemOpnd(insn, memIndex, true); + } + if (memUseInsnSet.empty()) { + return; + } + auto ®Opnd = static_cast(insn.GetOperand(static_cast(regIndex))); + if (regOpnd.GetRegisterNumber() == RZR) { + DoLoadZeroToMoveTransfer(insn, regIndex, memUseInsnSet); + } else { + DoLoadToMoveTransfer(insn, regIndex, i, memUseInsnSet); + } + } +} + +void AArch64StoreLoadOpt::ProcessStr(Insn &insn) +{ + /* str x100, [mem], mem index is 1, x100 index is 0; */ + const short memIndex = 1; + const short regIndex = 0; + Operand &opnd = insn.GetOperand(memIndex); + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if ((base == nullptr) || !(cgFunc.GetRD()->IsFrameReg(*base))) { + return; + } + + if (cgFunc.IsAfterRegAlloc() && !insn.IsSpillInsn()) { + return; + } + DEBUG_ASSERT(memOpnd.GetIndexRegister() == nullptr, "frame MemOperand must not be exist register index"); + + InsnSet memUseInsnSet = cgFunc.GetRD()->FindUseForMemOpnd(insn, memIndex); + if (memUseInsnSet.empty()) { + return; + } + + auto *regOpnd = static_cast(&insn.GetOperand(regIndex)); + CHECK_NULL_FATAL(regOpnd); + if (regOpnd->GetRegisterNumber() == RZR) { + DoLoadZeroToMoveTransfer(insn, regIndex, memUseInsnSet); + } else { + DoLoadToMoveTransfer(insn, regIndex, 0, memUseInsnSet); + } + if (cgFunc.IsAfterRegAlloc() && insn.IsSpillInsn()) { + InsnSet newmemUseInsnSet = cgFunc.GetRD()->FindUseForMemOpnd(insn, memIndex); + if (newmemUseInsnSet.empty()) { + insn.GetBB()->RemoveInsn(insn); + } + } +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_utils.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_utils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dbc618a2e76b3dc3bb939e2c8e3fd4261399fd3b --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_utils.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_utils.h" +#include "cg_option.h" + +namespace maplebe { + +MemOperand *GetOrCreateMemOperandForNewMOP(CGFunc &cgFunc, const Insn &loadIns, MOperator newLoadMop) +{ + MemPool &memPool = *cgFunc.GetMemoryPool(); + auto *memOp = static_cast(loadIns.GetMemOpnd()); + MOperator loadMop = loadIns.GetMachineOpcode(); + + DEBUG_ASSERT(loadIns.IsLoad() && AArch64CG::kMd[newLoadMop].IsLoad(), "ins and Mop must be load"); + + MemOperand *newMemOp = memOp; + + uint32 memSize = AArch64CG::kMd[loadMop].GetOperandSize(); + uint32 newMemSize = AArch64CG::kMd[newLoadMop].GetOperandSize(); + + if (newMemSize == memSize) { + // if sizes are the same just return old memory operand + return newMemOp; + } + + newMemOp = memOp->Clone(memPool); + newMemOp->SetSize(newMemSize); + + if (!CGOptions::IsBigEndian()) { + return newMemOp; + } + + // for big-endian it's necessary to adjust offset if it's present + if (memOp->GetAddrMode() != MemOperand::kAddrModeBOi || newMemSize > memSize) { + // currently, it's possible to adjust an offset only for immediate offset + // operand if new size is less than the original one + return nullptr; + } + + auto *newOffOp = static_cast(memOp->GetOffsetImmediate()->Clone(memPool)); + + newOffOp->AdjustOffset(static_cast((memSize - newMemSize) >> kLog2BitsPerByte)); + newMemOp->SetOffsetOperand(*newOffOp); + + DEBUG_ASSERT(memOp->IsOffsetMisaligned(memSize) || !newMemOp->IsOffsetMisaligned(newMemSize), + "New offset value is misaligned!"); + + return newMemOp; +} + +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3a0dc89aa0712ab138107f15c82dba2d5716556b --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp @@ -0,0 +1,585 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_validbit_opt.h" +#include "aarch64_cg.h" + +namespace maplebe { +void AArch64ValidBitOpt::DoOpt(BB &bb, Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + switch (curMop) { + case MOP_wandrri12: + case MOP_xandrri13: { + Optimize(bb, insn); + break; + } + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: + case MOP_wsbfxrri5i5: + case MOP_xsbfxrri6i6: { + Optimize(bb, insn); + break; + } + case MOP_wcsetrc: + case MOP_xcsetrc: { + Optimize(bb, insn); + break; + } + case MOP_bge: + case MOP_blt: { + Optimize(bb, insn); + break; + } + default: + break; + } +} + +void AArch64ValidBitOpt::SetValidBits(Insn &insn) +{ + MOperator mop = insn.GetMachineOpcode(); + switch (mop) { + case MOP_wcsetrc: + case MOP_xcsetrc: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + dstOpnd.SetValidBitsNum(k1BitSize); + break; + } + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &srcOpnd = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(srcOpnd.IsIntImmediate(), "must be ImmOperand"); + auto &immOpnd = static_cast(srcOpnd); + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + dstOpnd.SetValidBitsNum(GetImmValidBit(immOpnd.GetValue(), dstOpnd.GetSize())); + break; + } + case MOP_xmovrr: + case MOP_wmovrr: { + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (!srcOpnd.IsVirtualRegister()) { + break; + } + if (srcOpnd.GetRegisterNumber() == RZR) { + srcOpnd.SetValidBitsNum(k1BitSize); + } + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (!(dstOpnd.GetSize() == k64BitSize && srcOpnd.GetSize() == k32BitSize) && + !(dstOpnd.GetSize() == k32BitSize && srcOpnd.GetSize() == k64BitSize)) { + dstOpnd.SetValidBitsNum(srcOpnd.GetValidBitsNum()); + } + break; + } + case MOP_wlsrrri5: + case MOP_xlsrrri6: + case MOP_wasrrri5: + case MOP_xasrrri6: { + Operand &opnd = insn.GetOperand(kInsnThirdOpnd); + DEBUG_ASSERT(opnd.IsIntImmediate(), "must be ImmOperand"); + uint32 shiftBits = static_cast(static_cast(opnd).GetValue()); + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if ((static_cast(srcOpnd.GetValidBitsNum()) - shiftBits) <= 0) { + dstOpnd.SetValidBitsNum(k1BitSize); + } else { + dstOpnd.SetValidBitsNum(srcOpnd.GetValidBitsNum() - shiftBits); + } + break; + } + case MOP_wlslrri5: + case MOP_xlslrri6: { + Operand &opnd = insn.GetOperand(kInsnThirdOpnd); + DEBUG_ASSERT(opnd.IsIntImmediate(), "must be ImmOperand"); + uint32 shiftBits = static_cast(static_cast(opnd).GetValue()); + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + uint32 newVB = ((srcOpnd.GetValidBitsNum() + shiftBits) > srcOpnd.GetSize()) + ? srcOpnd.GetSize() + : (srcOpnd.GetValidBitsNum() + shiftBits); + dstOpnd.SetValidBitsNum(newVB); + } + case MOP_xuxtb32: + case MOP_xuxth32: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + uint32 srcVB = srcOpnd.GetValidBitsNum(); + uint32 newVB = dstOpnd.GetValidBitsNum(); + newVB = (mop == MOP_xuxtb32) ? ((srcVB < k8BitSize) ? srcVB : k8BitSize) : newVB; + newVB = (mop == MOP_xuxth32) ? ((srcVB < k16BitSize) ? srcVB : k16BitSize) : newVB; + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wldrb: + case MOP_wldrh: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 newVB = (mop == MOP_wldrb) ? k8BitSize : k16BitSize; + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wandrrr: + case MOP_xandrrr: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValidBitsNum(); + uint32 newVB = (src1VB <= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wandrri12: + case MOP_xandrri13: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = GetImmValidBit(immOpnd.GetValue(), dstOpnd.GetSize()); + uint32 newVB = (src1VB <= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wiorrrr: + case MOP_xiorrrr: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValidBitsNum(); + uint32 newVB = (src1VB >= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wiorrri12: + case MOP_xiorrri13: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = GetImmValidBit(immOpnd.GetValue(), dstOpnd.GetSize()); + uint32 newVB = (src1VB >= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + default: + break; + } +} + +bool AArch64ValidBitOpt::SetPhiValidBits(Insn &insn) +{ + Operand &defOpnd = insn.GetOperand(kInsnFirstOpnd); + DEBUG_ASSERT(defOpnd.IsRegister(), "expect register"); + auto &defRegOpnd = static_cast(defOpnd); + Operand &phiOpnd = insn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(phiOpnd.IsPhi(), "expect phiList"); + auto &phiList = static_cast(phiOpnd); + int32 maxVB = -1; + for (auto phiOpndIt : phiList.GetOperands()) { + if (phiOpndIt.second != nullptr) { + maxVB = (maxVB < static_cast(phiOpndIt.second->GetValidBitsNum())) + ? static_cast(phiOpndIt.second->GetValidBitsNum()) + : maxVB; + } + } + if (maxVB >= static_cast(k0BitSize) && static_cast(maxVB) != defRegOpnd.GetValidBitsNum()) { + defRegOpnd.SetValidBitsNum(static_cast(maxVB)); + return true; + } + return false; +} + +static bool IsZeroRegister(const Operand &opnd) +{ + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + +bool AndValidBitPattern::CheckImmValidBit(int64 andImm, uint32 andImmVB, int64 shiftImm) const +{ + if ((__builtin_ffs(static_cast(andImm)) - 1 == shiftImm) && + ((andImm >> shiftImm) == ((1 << (andImmVB - shiftImm)) - 1))) { + return true; + } + return false; +} + +bool AndValidBitPattern::CheckCondition(Insn &insn) +{ + MOperator mOp = insn.GetMachineOpcode(); + if (mOp == MOP_wandrri12) { + newMop = MOP_wmovrr; + } else if (mOp == MOP_xandrri13) { + newMop = MOP_xmovrr; + } + if (newMop == MOP_undef) { + return false; + } + CHECK_FATAL(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "must be register!"); + CHECK_FATAL(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "must be register!"); + CHECK_FATAL(insn.GetOperand(kInsnThirdOpnd).IsImmediate(), "must be imm!"); + desReg = static_cast(&insn.GetOperand(kInsnFirstOpnd)); + srcReg = static_cast(&insn.GetOperand(kInsnSecondOpnd)); + auto &andImm = static_cast(insn.GetOperand(kInsnThirdOpnd)); + int64 immVal = andImm.GetValue(); + uint32 validBit = srcReg->GetValidBitsNum(); + if (validBit == k8BitSize && immVal == 0xFF) { + return true; + } else if (validBit == k16BitSize && immVal == 0xFFFF) { + return true; + } + /* and R287[32], R286[64], #255 */ + if ((desReg->GetSize() < srcReg->GetSize()) && (srcReg->GetValidBitsNum() > desReg->GetSize())) { + return false; + } + InsnSet useInsns = GetAllUseInsn(*desReg); + if (useInsns.size() == 1) { + Insn *useInsn = *useInsns.begin(); + MOperator useMop = useInsn->GetMachineOpcode(); + if (useMop != MOP_wasrrri5 && useMop != MOP_xasrrri6 && useMop != MOP_wlsrrri5 && useMop != MOP_xlsrrri6) { + return false; + } + Operand &shiftOpnd = useInsn->GetOperand(kInsnThirdOpnd); + CHECK_FATAL(shiftOpnd.IsImmediate(), "must be immediate"); + int64 shiftImm = static_cast(shiftOpnd).GetValue(); + uint32 andImmVB = ValidBitOpt::GetImmValidBit(andImm.GetValue(), desReg->GetSize()); + if ((srcReg->GetValidBitsNum() == andImmVB) && CheckImmValidBit(andImm.GetValue(), andImmVB, shiftImm)) { + return true; + } + } + return false; +} + +void AndValidBitPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, *desReg, *srcReg); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + if (desReg->GetSize() < srcReg->GetSize()) { + ssaInfo->InsertSafePropInsn(newInsn.GetId()); + } + /* dump pattern info */ + if (CG_VALIDBIT_OPT_DUMP) { + std::vector prevs; + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool ExtValidBitPattern::CheckCondition(Insn &insn) +{ + Operand &dstOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = insn.GetOperand(kInsnSecondOpnd); + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_xuxtb32: + case MOP_xuxth32: { + CHECK_FATAL(dstOpnd.IsRegister(), "must be register"); + CHECK_FATAL(srcOpnd.IsRegister(), "must be register"); + if (static_cast(dstOpnd).GetValidBitsNum() != + static_cast(srcOpnd).GetValidBitsNum()) { + return false; + } + newMop = MOP_wmovrr; + break; + } + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: + case MOP_wsbfxrri5i5: + case MOP_xsbfxrri6i6: { + Operand &immOpnd1 = insn.GetOperand(kInsnThirdOpnd); + Operand &immOpnd2 = insn.GetOperand(kInsnFourthOpnd); + CHECK_FATAL(immOpnd1.IsImmediate(), "must be immediate"); + CHECK_FATAL(immOpnd2.IsImmediate(), "must be immediate"); + int64 lsb = static_cast(immOpnd1).GetValue(); + int64 width = static_cast(immOpnd2).GetValue(); + if (lsb != 0 || static_cast(srcOpnd).GetValidBitsNum() > width) { + return false; + } + if ((mOp == MOP_wsbfxrri5i5 || mOp == MOP_xsbfxrri6i6) && + width != static_cast(srcOpnd).GetSize()) { + return false; + } + if (mOp == MOP_wubfxrri5i5 || mOp == MOP_wsbfxrri5i5) { + newMop = MOP_wmovrr; + } else if (mOp == MOP_xubfxrri6i6 || mOp == MOP_xsbfxrri6i6) { + newMop = MOP_xmovrr; + } + break; + } + default: + return false; + } + newDstOpnd = &static_cast(dstOpnd); + newSrcOpnd = &static_cast(srcOpnd); + return true; +} + +void ExtValidBitPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_xuxtb32: + case MOP_xuxth32: { + insn.SetMOP(AArch64CG::kMd[newMop]); + break; + } + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: + case MOP_wsbfxrri5i5: + case MOP_xsbfxrri6i6: { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, *newDstOpnd, *newSrcOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + /* dump pattern info */ + if (CG_VALIDBIT_OPT_DUMP) { + std::vector prevs; + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, &insn, &newInsn); + } + } + default: + return; + } +} + +bool CmpCsetVBPattern::IsContinuousCmpCset(const Insn &curInsn) +{ + auto &csetDstReg = static_cast(curInsn.GetOperand(kInsnFirstOpnd)); + CHECK_FATAL(csetDstReg.IsSSAForm(), "dstOpnd must be ssa form"); + VRegVersion *dstVersion = ssaInfo->FindSSAVersion(csetDstReg.GetRegisterNumber()); + DEBUG_ASSERT(dstVersion != nullptr, "find vRegVersion failed"); + for (auto useDUInfoIt : dstVersion->GetAllUseInsns()) { + if (useDUInfoIt.second == nullptr) { + continue; + } + Insn *useInsn = useDUInfoIt.second->GetInsn(); + if (useInsn == nullptr) { + continue; + } + MOperator useMop = useInsn->GetMachineOpcode(); + if (useMop == MOP_wcmpri || useMop == MOP_xcmpri) { + auto &ccDstReg = static_cast(useInsn->GetOperand(kInsnFirstOpnd)); + CHECK_FATAL(ccDstReg.IsSSAForm(), "dstOpnd must be ssa form"); + VRegVersion *ccDstVersion = ssaInfo->FindSSAVersion(ccDstReg.GetRegisterNumber()); + DEBUG_ASSERT(ccDstVersion != nullptr, "find vRegVersion failed"); + for (auto ccUseDUInfoIt : ccDstVersion->GetAllUseInsns()) { + if (ccUseDUInfoIt.second == nullptr) { + continue; + } + Insn *ccUseInsn = ccUseDUInfoIt.second->GetInsn(); + if (ccUseInsn == nullptr) { + continue; + } + MOperator ccUseMop = ccUseInsn->GetMachineOpcode(); + if (ccUseMop == MOP_wcsetrc || ccUseMop == MOP_xcsetrc) { + return true; + } + } + } + } + return false; +} + +bool CmpCsetVBPattern::OpndDefByOneValidBit(const Insn &defInsn) +{ + if (defInsn.IsPhi()) { + return (static_cast(cmpInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() == k1BitSize) || + (static_cast(cmpInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() == k0BitSize); + } + MOperator defMop = defInsn.GetMachineOpcode(); + switch (defMop) { + case MOP_wcsetrc: + case MOP_xcsetrc: + return true; + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &defOpnd = defInsn.GetOperand(kInsnSecondOpnd); + DEBUG_ASSERT(defOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &defConst = static_cast(defOpnd); + int64 defConstValue = defConst.GetValue(); + return (defConstValue == 0 || defConstValue == 1); + } + case MOP_xmovrr: + case MOP_wmovrr: + return IsZeroRegister(defInsn.GetOperand(kInsnSecondOpnd)); + case MOP_wlsrrri5: + case MOP_xlsrrri6: { + Operand &opnd2 = defInsn.GetOperand(kInsnThirdOpnd); + DEBUG_ASSERT(opnd2.IsIntImmediate(), "expects ImmOperand"); + auto &opndImm = static_cast(opnd2); + int64 shiftBits = opndImm.GetValue(); + return ((defMop == MOP_wlsrrri5 && shiftBits == (k32BitSize - 1)) || + (defMop == MOP_xlsrrri6 && shiftBits == (k64BitSize - 1))); + } + default: + return false; + } +} + +bool CmpCsetVBPattern::CheckCondition(Insn &csetInsn) +{ + MOperator curMop = csetInsn.GetMachineOpcode(); + if (curMop != MOP_wcsetrc && curMop != MOP_xcsetrc) { + return false; + } + /* combine [continuous cmp & cset] first, to eliminate more insns */ + if (IsContinuousCmpCset(csetInsn)) { + return false; + } + RegOperand &ccReg = static_cast(csetInsn.GetOperand(kInsnThirdOpnd)); + regno_t ccRegNo = ccReg.GetRegisterNumber(); + cmpInsn = GetDefInsn(ccReg); + CHECK_NULL_FATAL(cmpInsn); + MOperator mop = cmpInsn->GetMachineOpcode(); + if ((mop != MOP_wcmpri) && (mop != MOP_xcmpri)) { + return false; + } + VRegVersion *ccRegVersion = ssaInfo->FindSSAVersion(ccRegNo); + if (ccRegVersion->GetAllUseInsns().size() > k1BitSize) { + return false; + } + Operand &cmpSecondOpnd = cmpInsn->GetOperand(kInsnThirdOpnd); + CHECK_FATAL(cmpSecondOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &cmpConst = static_cast(cmpSecondOpnd); + cmpConstVal = cmpConst.GetValue(); + /* get ImmOperand, must be 0 or 1 */ + if ((cmpConstVal != 0) && (cmpConstVal != k1BitSize)) { + return false; + } + Operand &cmpFirstOpnd = cmpInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(cmpFirstOpnd.IsRegister(), "cmpFirstOpnd must be register!"); + RegOperand &cmpReg = static_cast(cmpFirstOpnd); + Insn *defInsn = GetDefInsn(cmpReg); + if (defInsn == nullptr) { + return false; + } + if (defInsn->GetMachineOpcode() == MOP_wmovrr || defInsn->GetMachineOpcode() == MOP_xmovrr) { + auto &srcOpnd = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + if (!srcOpnd.IsVirtualRegister()) { + return false; + } + } + return ((cmpReg.GetValidBitsNum() == k1BitSize) || (cmpReg.GetValidBitsNum() == k0BitSize) || + OpndDefByOneValidBit(*defInsn)); +} + +void CmpCsetVBPattern::Run(BB &bb, Insn &csetInsn) +{ + if (!CheckCondition(csetInsn)) { + return; + } + Operand &csetFirstOpnd = csetInsn.GetOperand(kInsnFirstOpnd); + Operand &cmpFirstOpnd = cmpInsn->GetOperand(kInsnSecondOpnd); + auto &cond = static_cast(csetInsn.GetOperand(kInsnSecondOpnd)); + Insn *newInsn = nullptr; + + /* cmpFirstOpnd == 1 */ + if ((cmpConstVal == 0 && cond.GetCode() == CC_NE) || (cmpConstVal == 1 && cond.GetCode() == CC_EQ)) { + MOperator mopCode = (cmpFirstOpnd.GetSize() == k64BitSize) ? MOP_xmovrr : MOP_wmovrr; + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(mopCode, csetFirstOpnd, cmpFirstOpnd); + } else if ((cmpConstVal == 1 && cond.GetCode() == CC_NE) || (cmpConstVal == 0 && cond.GetCode() == CC_EQ)) { + /* cmpFirstOpnd == 0 */ + MOperator mopCode = (cmpFirstOpnd.GetSize() == k64BitSize) ? MOP_xeorrri13 : MOP_weorrri12; + ImmOperand &one = static_cast(cgFunc)->CreateImmOperand(1, k8BitSize, false); + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(mopCode, csetFirstOpnd, cmpFirstOpnd, one); + } + if (newInsn == nullptr) { + return; + } + bb.ReplaceInsn(csetInsn, *newInsn); + ssaInfo->ReplaceInsn(csetInsn, *newInsn); + if (CG_VALIDBIT_OPT_DUMP && (newInsn != nullptr)) { + std::vector prevInsns; + prevInsns.emplace_back(cmpInsn); + prevInsns.emplace_back(&csetInsn); + DumpAfterPattern(prevInsns, newInsn, nullptr); + } +} + +void CmpBranchesPattern::SelectNewMop(MOperator mop) +{ + switch (mop) { + case MOP_bge: { + newMop = is64Bit ? MOP_xtbnz : MOP_wtbnz; + break; + } + case MOP_blt: { + newMop = is64Bit ? MOP_xtbz : MOP_wtbz; + break; + } + default: + break; + } +} + +bool CmpBranchesPattern::CheckCondition(Insn &insn) +{ + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_bge && curMop != MOP_blt) { + return false; + } + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevCmpInsn = GetDefInsn(ccReg); + if (prevCmpInsn == nullptr) { + return false; + } + MOperator cmpMop = prevCmpInsn->GetMachineOpcode(); + if (cmpMop != MOP_wcmpri && cmpMop != MOP_xcmpri) { + return false; + } + is64Bit = (cmpMop == MOP_xcmpri); + auto &cmpUseOpnd = static_cast(prevCmpInsn->GetOperand(kInsnSecondOpnd)); + auto &cmpImmOpnd = static_cast(prevCmpInsn->GetOperand(kInsnThirdOpnd)); + int64 cmpImmVal = cmpImmOpnd.GetValue(); + newImmVal = ValidBitOpt::GetLogValueAtBase2(cmpImmVal); + if (newImmVal < 0 || cmpUseOpnd.GetValidBitsNum() != (newImmVal + 1)) { + return false; + } + SelectNewMop(curMop); + if (newMop == MOP_undef) { + return false; + } + return true; +} + +void CmpBranchesPattern::Run(BB &bb, Insn &insn) +{ + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &newImmOpnd = aarFunc->CreateImmOperand(newImmVal, k8BitSize, false); + Insn &newInsn = + cgFunc->GetInsnBuilder()->BuildInsn(newMop, prevCmpInsn->GetOperand(kInsnSecondOpnd), newImmOpnd, labelOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + /* dump pattern info */ + if (CG_VALIDBIT_OPT_DUMP) { + std::vector prevs; + prevs.emplace_back(prevCmpInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_yieldpoint.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_yieldpoint.cpp new file mode 100644 index 0000000000000000000000000000000000000000..146d171fd000ce6c7194fd346f9e7e160b1da173 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/aarch64_yieldpoint.cpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "aarch64_yieldpoint.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +using namespace maple; + +void AArch64YieldPointInsertion::Run() +{ + InsertYieldPoint(); +} + +void AArch64YieldPointInsertion::InsertYieldPoint() +{ + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + std::string refQueueName = + "Ljava_2Flang_2Fref_2FReference_3B_7C_3Cinit_3E_7C_" + "28Ljava_2Flang_2FObject_3BLjava_2Flang_2Fref_2FReferenceQueue_3B_29V"; + if (!CGOptions::IsGCOnly() && (aarchCGFunc->GetName() == refQueueName)) { + /* skip insert yieldpoint in reference constructor, avoid rc verify issue */ + DEBUG_ASSERT(aarchCGFunc->GetYieldPointInsn() != nullptr, "the entry yield point has been inserted"); + aarchCGFunc->GetYieldPointInsn()->GetBB()->RemoveInsn(*aarchCGFunc->GetYieldPointInsn()); + return; + } + + /* + * do not insert yieldpoint in function that not saved X30 into stack, + * because X30 will be changed after yieldpoint is taken. + */ + if (!aarchCGFunc->GetHasProEpilogue()) { + DEBUG_ASSERT(aarchCGFunc->GetYieldPointInsn() != nullptr, "the entry yield point has been inserted"); + aarchCGFunc->GetYieldPointInsn()->GetBB()->RemoveInsn(*aarchCGFunc->GetYieldPointInsn()); + return; + } + /* skip if no GetFirstbb(). */ + if (aarchCGFunc->GetFirstBB() == nullptr) { + return; + } + /* + * The yield point in the entry of the GetFunction() is inserted just after the initialization + * of localrefvars in HandleRCCall. + * for BBs after firstbb. + */ + for (BB *bb = aarchCGFunc->GetFirstBB()->GetNext(); bb != nullptr; bb = bb->GetNext()) { + /* insert a yieldpoint at beginning if BB is BackEdgeDest. */ + if (bb->IsBackEdgeDest()) { + aarchCGFunc->GetDummyBB()->ClearInsns(); + aarchCGFunc->GenerateYieldpoint(*aarchCGFunc->GetDummyBB()); + bb->InsertAtBeginning(*aarchCGFunc->GetDummyBB()); + } + } +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/mpl_atomic.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/mpl_atomic.cpp new file mode 100644 index 0000000000000000000000000000000000000000..83e975632295f582b3a4f721788aea9f3e2e41eb --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/aarch64/mpl_atomic.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mpl_atomic.h" +#include +#include "mpl_logging.h" + +namespace maple { +namespace { +constexpr int32 kMaxSizeOfTab = 6; +}; +MemOrd MemOrdFromU32(uint32 val) +{ + /* 6 is the size of tab below. 2 is memory_order_consume, it is Disabled. */ + CHECK_FATAL(val <= kMaxSizeOfTab, "Illegal number for MemOrd: %u", val); + CHECK_FATAL(val != 2, "Illegal number for MemOrd: %u", val); + static std::array tab = { + MemOrd::kNotAtomic, + MemOrd::memory_order_relaxed, + /* + * memory_order_consume Disabled. Its semantics is debatable. + * We don't support it now, but reserve the number. Use memory_order_acquire instead. + */ + MemOrd::memory_order_acquire, /* padding entry */ + MemOrd::memory_order_acquire, + MemOrd::memory_order_release, + MemOrd::memory_order_acq_rel, + MemOrd::memory_order_seq_cst, + }; + return tab[val]; +} + +bool MemOrdIsAcquire(MemOrd ord) +{ + static std::array tab = { + false, /* kNotAtomic */ + false, /* memory_order_relaxed */ + true, /* memory_order_consume */ + true, /* memory_order_acquire */ + false, /* memory_order_release */ + true, /* memory_order_acq_rel */ + true, /* memory_order_seq_cst */ + }; + uint32 tabIndex = static_cast(ord); + CHECK_FATAL(tabIndex <= kMaxSizeOfTab, "Illegal number for MemOrd: %u", tabIndex); + return tab[tabIndex]; +} + +bool MemOrdIsRelease(MemOrd ord) +{ + static std::array tab = { + false, /* kNotAtomic */ + false, /* memory_order_relaxed */ + false, /* memory_order_consume */ + false, /* memory_order_acquire */ + true, /* memory_order_release */ + true, /* memory_order_acq_rel */ + true, /* memory_order_seq_cst */ + }; + uint32 tabIndex = static_cast(ord); + CHECK_FATAL(tabIndex <= kMaxSizeOfTab, "Illegal number for MemOrd: %u", tabIndex); + return tab[tabIndex]; +} +} /* namespace maple */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/alignment.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/alignment.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e5326c6209b3591e360fcdbf358782c1d27b37ad --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/alignment.cpp @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "alignment.h" +#include "optimize_common.h" +#include "cgfunc.h" +#include "cg.h" +#include "cg_option.h" + +namespace maplebe { +#define ALIGN_ANALYZE_DUMP_NEWPW CG_DEBUG_FUNC(func) + +void AlignAnalysis::AnalysisAlignment() +{ + FindLoopHeader(); + FindJumpTarget(); + ComputeLoopAlign(); + ComputeJumpAlign(); + if (CGOptions::DoCondBrAlign()) { + ComputeCondBranchAlign(); + } +} + +void AlignAnalysis::Dump() +{ + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc->GetFunction().GetStIdx().Idx()); + DEBUG_ASSERT(funcSt != nullptr, "null ptr check"); + LogInfo::MapleLogger() << "\n********* alignment for " << funcSt->GetName() << " *********\n"; + LogInfo::MapleLogger() << "------ jumpTargetBBs: " << jumpTargetBBs.size() << " total ------\n"; + for (auto *jumpLabel : jumpTargetBBs) { + LogInfo::MapleLogger() << " === BB_" << jumpLabel->GetId() << " (" << std::hex << jumpLabel << ")" << std::dec + << " <" << jumpLabel->GetKindName(); + if (jumpLabel->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << jumpLabel->GetLabIdx() << "]> ===\n"; + } + if (!jumpLabel->GetPreds().empty()) { + LogInfo::MapleLogger() << "\tpreds: [ "; + for (auto *pred : jumpLabel->GetPreds()) { + LogInfo::MapleLogger() << "BB_" << pred->GetId(); + if (pred->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "GetLabIdx() << ">"; + } + LogInfo::MapleLogger() << " (" << std::hex << pred << ") " << std::dec << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + if (jumpLabel->GetPrev() != nullptr) { + LogInfo::MapleLogger() << "\tprev: [ "; + LogInfo::MapleLogger() << "BB_" << jumpLabel->GetPrev()->GetId(); + if (jumpLabel->GetPrev()->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "GetLabIdx() << ">"; + } + LogInfo::MapleLogger() << " (" << std::hex << jumpLabel->GetPrev() << ") " << std::dec << " "; + LogInfo::MapleLogger() << "]\n"; + } + FOR_BB_INSNS_CONST(insn, jumpLabel) { + insn->Dump(); + } + } + LogInfo::MapleLogger() << "\n------ loopHeaderBBs: " << loopHeaderBBs.size() << " total ------\n"; + for (auto *loopHeader : loopHeaderBBs) { + LogInfo::MapleLogger() << " === BB_" << loopHeader->GetId() << " (" << std::hex << loopHeader << ")" << std::dec + << " <" << loopHeader->GetKindName(); + if (loopHeader->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << loopHeader->GetLabIdx() << "]> ===\n"; + } + LogInfo::MapleLogger() << "\tLoop Level: " << loopHeader->GetLoop()->GetLoopLevel() << "\n"; + FOR_BB_INSNS_CONST(insn, loopHeader) { + insn->Dump(); + } + } + LogInfo::MapleLogger() << "\n------ alignInfos: " << alignInfos.size() << " total ------\n"; + MapleUnorderedMap::iterator iter; + for (iter = alignInfos.begin(); iter != alignInfos.end(); ++iter) { + BB *bb = iter->first; + LogInfo::MapleLogger() << " === BB_" << bb->GetId() << " (" << std::hex << bb << ")" << std::dec << " <" + << bb->GetKindName(); + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << bb->GetLabIdx() << "]> ===\n"; + } + LogInfo::MapleLogger() << "\talignPower: " << iter->second << "\n"; + } +} + +bool CgAlignAnalysis::PhaseRun(maplebe::CGFunc &func) +{ + if (ALIGN_ANALYZE_DUMP_NEWPW) { + DotGenerator::GenerateDot("alignanalysis", func, func.GetMirModule(), true, func.GetName()); + } + MemPool *alignMemPool = GetPhaseMemPool(); + AlignAnalysis *alignAnalysis = func.GetCG()->CreateAlignAnalysis(*alignMemPool, func); + + CHECK_FATAL(alignAnalysis != nullptr, "AlignAnalysis instance create failure"); + alignAnalysis->AnalysisAlignment(); + if (ALIGN_ANALYZE_DUMP_NEWPW) { + alignAnalysis->Dump(); + } + return true; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/args.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/args.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ffe81841b2c1c1a58b6f65897e6c8b775750a357 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/args.cpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "args.h" +#include "cg.h" +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; +bool CgMoveRegArgs::PhaseRun(maplebe::CGFunc &f) +{ + MemPool *memPool = GetPhaseMemPool(); + MoveRegArgs *movRegArgs = nullptr; + movRegArgs = f.GetCG()->CreateMoveRegArgs(*memPool, f); + movRegArgs->Run(); + return true; +} + +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cfgo.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cfgo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9eae5cb1d1f056a455a829a48041cd8ad8635327 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cfgo.cpp @@ -0,0 +1,857 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cfgo.h" +#include "cgbb.h" +#include "cg.h" +#include "mpl_logging.h" + +/* + * This phase traverses all basic block of cgFunc and finds special + * basic block patterns, like continuous fallthrough basic block, continuous + * uncondition jump basic block, unreachable basic block and empty basic block, + * then do basic mergering, basic block placement transformations, + * unnecessary jumps elimination, and remove unreachable or empty basic block. + * This optimization is done on control flow graph basis. + */ +namespace maplebe { +using namespace maple; + +#define CFGO_DUMP_NEWPM CG_DEBUG_FUNC(f) + +/* return true if to is put after from and there is no real insns between from and to, */ +bool ChainingPattern::NoInsnBetween(const BB &from, const BB &to) const +{ + const BB *bb = nullptr; + for (bb = from.GetNext(); bb != nullptr && bb != &to && bb != cgFunc->GetLastBB(); bb = bb->GetNext()) { + if (!bb->IsEmptyOrCommentOnly() || bb->IsUnreachable() || bb->GetKind() != BB::kBBFallthru) { + return false; + } + } + return (bb == &to); +} + +/* return true if insns in bb1 and bb2 are the same except the last goto insn. */ +bool ChainingPattern::DoSameThing(const BB &bb1, const Insn &last1, const BB &bb2, const Insn &last2) const +{ + const Insn *insn1 = bb1.GetFirstInsn(); + const Insn *insn2 = bb2.GetFirstInsn(); + while (insn1 != nullptr && insn1 != last1.GetNext() && insn2 != nullptr && insn2 != last2.GetNext()) { + if (!insn1->IsMachineInstruction()) { + insn1 = insn1->GetNext(); + continue; + } + if (!insn2->IsMachineInstruction()) { + insn2 = insn2->GetNext(); + continue; + } + if (insn1->GetMachineOpcode() != insn2->GetMachineOpcode()) { + return false; + } + uint32 opndNum = insn1->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &op1 = insn1->GetOperand(i); + Operand &op2 = insn2->GetOperand(i); + if (&op1 == &op2) { + continue; + } + if (!op1.Equals(op2)) { + return false; + } + } + insn1 = insn1->GetNext(); + insn2 = insn2->GetNext(); + } + return (insn1 == last1.GetNext() && insn2 == last2.GetNext()); +} + +/* + * BB2 can be merged into BB1, if + * 1. BB1's kind is fallthrough; + * 2. BB2 has only one predecessor which is BB1 and BB2 is not the lastbb + * 3. BB2 is neither catch BB nor switch case BB + */ +bool ChainingPattern::MergeFallthuBB(BB &curBB) +{ + BB *sucBB = curBB.GetNext(); + if (sucBB == nullptr || IsLabelInLSDAOrSwitchTable(sucBB->GetLabIdx()) || + !cgFunc->GetTheCFG()->CanMerge(curBB, *sucBB)) { + return false; + } + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + if (sucBB == cgFunc->GetLastBB()) { + cgFunc->SetLastBB(curBB); + } + cgFunc->GetTheCFG()->MergeBB(curBB, *sucBB, *cgFunc); + keepPosition = true; + return true; +} + +bool ChainingPattern::MergeGotoBB(BB &curBB, BB &sucBB) +{ + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + cgFunc->GetTheCFG()->MergeBB(curBB, sucBB, *cgFunc); + keepPosition = true; + return true; +} + +bool ChainingPattern::MoveSuccBBAsCurBBNext(BB &curBB, BB &sucBB) +{ + /* + * without the judge below, there is + * Assembler Error: CFI state restore without previous remember + */ + if (sucBB.GetHasCfi() || (sucBB.GetFirstInsn() != nullptr && sucBB.GetFirstInsn()->IsCfiInsn())) { + return false; + } + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + /* put sucBB as curBB's next. */ + DEBUG_ASSERT(sucBB.GetPrev() != nullptr, "the target of current goto BB will not be the first bb"); + sucBB.GetPrev()->SetNext(sucBB.GetNext()); + if (sucBB.GetNext() != nullptr) { + sucBB.GetNext()->SetPrev(sucBB.GetPrev()); + } + sucBB.SetNext(curBB.GetNext()); + DEBUG_ASSERT(curBB.GetNext() != nullptr, "current goto BB will not be the last bb"); + curBB.GetNext()->SetPrev(&sucBB); + sucBB.SetPrev(&curBB); + curBB.SetNext(&sucBB); + curBB.RemoveInsn(*curBB.GetLastInsn()); + curBB.SetKind(BB::kBBFallthru); + return true; +} + +bool ChainingPattern::RemoveGotoInsn(BB &curBB, BB &sucBB) +{ + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + if (&sucBB != curBB.GetNext()) { + DEBUG_ASSERT(curBB.GetNext() != nullptr, "nullptr check"); + curBB.RemoveSuccs(sucBB); + curBB.PushBackSuccs(*curBB.GetNext()); + curBB.GetNext()->PushBackPreds(curBB); + sucBB.RemovePreds(curBB); + } + curBB.RemoveInsn(*curBB.GetLastInsn()); + curBB.SetKind(BB::kBBFallthru); + return true; +} + +bool ChainingPattern::ClearCurBBAndResetTargetBB(BB &curBB, BB &sucBB) +{ + if (curBB.GetHasCfi() || (curBB.GetFirstInsn() != nullptr && curBB.GetFirstInsn()->IsCfiInsn())) { + return false; + } + Insn *brInsn = nullptr; + for (brInsn = curBB.GetLastInsn(); brInsn != nullptr; brInsn = brInsn->GetPrev()) { + if (brInsn->IsUnCondBranch()) { + break; + } + } + DEBUG_ASSERT(brInsn != nullptr, "goto BB has no branch"); + BB *newTarget = sucBB.GetPrev(); + DEBUG_ASSERT(newTarget != nullptr, "get prev bb failed in ChainingPattern::ClearCurBBAndResetTargetBB"); + Insn *last1 = newTarget->GetLastInsn(); + if (newTarget->GetKind() == BB::kBBGoto) { + Insn *br = nullptr; + for (br = newTarget->GetLastInsn(); br != newTarget->GetFirstInsn()->GetPrev(); br = br->GetPrev()) { + if (br->IsUnCondBranch()) { + break; + } + } + DEBUG_ASSERT(br != nullptr, "goto BB has no branch"); + last1 = br->GetPrev(); + } + if (last1 == nullptr || !DoSameThing(*newTarget, *last1, curBB, *brInsn->GetPrev())) { + return false; + } + + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + + LabelIdx tgtLabIdx = newTarget->GetLabIdx(); + if (newTarget->GetLabIdx() == MIRLabelTable::GetDummyLabel()) { + tgtLabIdx = cgFunc->CreateLabel(); + newTarget->AddLabel(tgtLabIdx); + } + LabelOperand &brTarget = cgFunc->GetOrCreateLabelOperand(tgtLabIdx); + brInsn->SetOperand(0, brTarget); + curBB.RemoveInsnSequence(*curBB.GetFirstInsn(), *brInsn->GetPrev()); + + curBB.RemoveFromSuccessorList(sucBB); + curBB.PushBackSuccs(*newTarget); + sucBB.RemoveFromPredecessorList(curBB); + newTarget->PushBackPreds(curBB); + + sucBB.GetPrev()->SetUnreachable(false); + keepPosition = true; + return true; +} + +/* + * Following optimizations are performed: + * 1. Basic block merging + * 2. unnecessary jumps elimination + * 3. Remove duplicates Basic block. + */ +bool ChainingPattern::Optimize(BB &curBB) +{ + if (curBB.GetKind() == BB::kBBFallthru) { + return MergeFallthuBB(curBB); + } + + if (curBB.GetKind() == BB::kBBGoto && !curBB.IsEmpty()) { + Insn *last = curBB.GetLastInsn(); + if (last->IsTailCall()) { + return false; + } + + BB *sucBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); + /* + * BB2 can be merged into BB1, if + * 1. BB1 ends with a goto; + * 2. BB2 has only one predecessor which is BB1 + * 3. BB2 is of goto kind. Otherwise, the original fall through will be broken + * 4. BB2 is neither catch BB nor switch case BB + */ + if (sucBB == nullptr || curBB.GetEhSuccs().size() != sucBB->GetEhSuccs().size()) { + return false; + } + if (!curBB.GetEhSuccs().empty() && (curBB.GetEhSuccs().front() != sucBB->GetEhSuccs().front())) { + return false; + } + if (sucBB->GetKind() == BB::kBBGoto && !IsLabelInLSDAOrSwitchTable(sucBB->GetLabIdx()) && + cgFunc->GetTheCFG()->CanMerge(curBB, *sucBB)) { + return MergeGotoBB(curBB, *sucBB); + } else if (sucBB != &curBB && curBB.GetNext() != sucBB && sucBB != cgFunc->GetLastBB() && + !sucBB->IsPredecessor(*sucBB->GetPrev()) && + !(sucBB->GetNext() != nullptr && sucBB->GetNext()->IsPredecessor(*sucBB)) && + !IsLabelInLSDAOrSwitchTable(sucBB->GetLabIdx()) && sucBB->GetEhSuccs().empty() && + sucBB->GetKind() != BB::kBBThrow) { + return MoveSuccBBAsCurBBNext(curBB, *sucBB); + } + /* + * Last goto instruction can be removed, if: + * 1. The goto target is physically the next one to current BB. + */ + else if (sucBB == curBB.GetNext() || + (NoInsnBetween(curBB, *sucBB) && !IsLabelInLSDAOrSwitchTable(curBB.GetNext()->GetLabIdx()))) { + return RemoveGotoInsn(curBB, *sucBB); + } + /* + * Clear curBB and target it to sucBB->GetPrev() + * if sucBB->GetPrev() and curBB's insns are the same. + * + * curBB: curBB: + * insn_x0 b prevbb + * b sucBB ... + * ... ==> prevbb: + * prevbb: insn_x0 + * insn_x0 sucBB: + * sucBB: + */ + else if (sucBB != curBB.GetNext() && !curBB.IsSoloGoto() && !IsLabelInLSDAOrSwitchTable(curBB.GetLabIdx()) && + sucBB->GetKind() == BB::kBBReturn && sucBB->GetPreds().size() > 1 && sucBB->GetPrev() != nullptr && + sucBB->IsPredecessor(*sucBB->GetPrev()) && + (sucBB->GetPrev()->GetKind() == BB::kBBFallthru || sucBB->GetPrev()->GetKind() == BB::kBBGoto)) { + return ClearCurBBAndResetTargetBB(curBB, *sucBB); + } + } + return false; +} + +/* + * curBB: curBB: + * insn_x0 insn_x0 + * b targetBB b BB + * ... ==> ... + * targetBB: targetBB: + * b BB b BB + * ... ... + * BB: BB: + * *------------------------------ + * curBB: curBB: + * insn_x0 insn_x0 + * cond_br brBB cond_br BB + * ... ... + * brBB: ==> brBB: + * b BB b BB + * ... ... + * BB: BB: + * + * conditions: + * 1. only goto and comment in brBB; + */ +bool SequentialJumpPattern::Optimize(BB &curBB) +{ + if (curBB.IsUnreachable()) { + return false; + } + if (curBB.GetKind() == BB::kBBGoto && !curBB.IsEmpty()) { + BB *sucBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); + CHECK_FATAL(sucBB != nullptr, "sucBB is null in SequentialJumpPattern::Optimize"); + BB *tragetBB = CGCFG::GetTargetSuc(*sucBB); + if ((sucBB != &curBB) && sucBB->IsSoloGoto() && tragetBB != nullptr && tragetBB != sucBB) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + cgFunc->GetTheCFG()->RetargetJump(*sucBB, curBB); + SkipSucBB(curBB, *sucBB); + return true; + } + } else if (curBB.GetKind() == BB::kBBIf) { + for (BB *sucBB : curBB.GetSuccs()) { + BB *tragetBB = CGCFG::GetTargetSuc(*sucBB); + if (sucBB != curBB.GetNext() && sucBB->IsSoloGoto() && tragetBB != nullptr && tragetBB != sucBB) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + cgFunc->GetTheCFG()->RetargetJump(*sucBB, curBB); + SkipSucBB(curBB, *sucBB); + return true; + } + } + } else if (curBB.GetKind() == BB::kBBRangeGoto) { + bool changed = false; + for (BB *sucBB : curBB.GetSuccs()) { + if (sucBB != curBB.GetNext() && sucBB->IsSoloGoto() && + cgFunc->GetTheCFG()->GetTargetSuc(*sucBB) != nullptr) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + UpdateSwitchSucc(curBB, *sucBB); + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(*sucBB, *cgFunc); + changed = true; + } + } + return changed; + } + return false; +} + +void SequentialJumpPattern::UpdateSwitchSucc(BB &curBB, BB &sucBB) +{ + BB *gotoTarget = cgFunc->GetTheCFG()->GetTargetSuc(sucBB); + CHECK_FATAL(gotoTarget != nullptr, "gotoTarget is null in SequentialJumpPattern::UpdateSwitchSucc"); + const MapleVector &labelVec = curBB.GetRangeGotoLabelVec(); + bool isPred = false; + for (auto label : labelVec) { + if (label == gotoTarget->GetLabIdx()) { + isPred = true; + break; + } + } + for (size_t i = 0; i < labelVec.size(); ++i) { + if (labelVec[i] == sucBB.GetLabIdx()) { + curBB.SetRangeGotoLabel(i, gotoTarget->GetLabIdx()); + } + } + cgFunc->UpdateEmitSt(curBB, sucBB.GetLabIdx(), gotoTarget->GetLabIdx()); + + /* connect curBB, gotoTarget */ + for (auto it = gotoTarget->GetPredsBegin(); it != gotoTarget->GetPredsEnd(); ++it) { + if (*it == &sucBB) { + auto origIt = it; + if (isPred) { + break; + } + if (origIt != gotoTarget->GetPredsBegin()) { + --origIt; + gotoTarget->InsertPred(origIt, curBB); + } else { + gotoTarget->PushFrontPreds(curBB); + } + break; + } + } + for (auto it = curBB.GetSuccsBegin(); it != curBB.GetSuccsEnd(); ++it) { + if (*it == &sucBB) { + auto origIt = it; + curBB.EraseSuccs(it); + if (isPred) { + break; + } + if (origIt != curBB.GetSuccsBegin()) { + --origIt; + curBB.InsertSucc(origIt, *gotoTarget); + } else { + curBB.PushFrontSuccs(*gotoTarget); + } + break; + } + } + /* cut curBB -> sucBB */ + for (auto it = sucBB.GetPredsBegin(); it != sucBB.GetPredsEnd(); ++it) { + if (*it == &curBB) { + sucBB.ErasePreds(it); + } + } + for (auto it = curBB.GetSuccsBegin(); it != curBB.GetSuccsEnd(); ++it) { + if (*it == &sucBB) { + curBB.EraseSuccs(it); + } + } +} + +/* + * preCond: + * sucBB is one of curBB's successor. + * + * Change curBB's successor to sucBB's successor + */ +void SequentialJumpPattern::SkipSucBB(BB &curBB, BB &sucBB) +{ + BB *gotoTarget = cgFunc->GetTheCFG()->GetTargetSuc(sucBB); + CHECK_FATAL(gotoTarget != nullptr, "gotoTarget is null in SequentialJumpPattern::SkipSucBB"); + curBB.RemoveSuccs(sucBB); + curBB.PushBackSuccs(*gotoTarget); + sucBB.RemovePreds(curBB); + gotoTarget->PushBackPreds(curBB); + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(sucBB, *cgFunc); +} + +/* + * Found pattern + * curBB: curBB: + * ... ==> ... + * cond_br brBB cond1_br ftBB + * ftBB: brBB: + * bl throwfunc ... + * brBB: retBB: + * ... ... + * retBB: ftBB: + * ... bl throwfunc + */ +void FlipBRPattern::RelocateThrowBB(BB &curBB) +{ + BB *ftBB = curBB.GetNext(); + CHECK_FATAL(ftBB != nullptr, "ifBB has a fall through BB"); + CGCFG *theCFG = cgFunc->GetTheCFG(); + CHECK_FATAL(theCFG != nullptr, "nullptr check"); + BB *retBB = theCFG->FindLastRetBB(); + CHECK_FATAL(retBB != nullptr, "must have a return BB"); + if (ftBB->GetKind() != BB::kBBThrow || !ftBB->GetEhSuccs().empty() || + IsLabelInLSDAOrSwitchTable(ftBB->GetLabIdx()) || !retBB->GetEhSuccs().empty()) { + return; + } + BB *brBB = theCFG->GetTargetSuc(curBB); + if (brBB != ftBB->GetNext()) { + return; + } + + EHFunc *ehFunc = cgFunc->GetEHFunc(); + if (ehFunc != nullptr && ehFunc->GetLSDACallSiteTable() != nullptr) { + const MapleVector &callsiteTable = ehFunc->GetLSDACallSiteTable()->GetCallSiteTable(); + for (size_t i = 0; i < callsiteTable.size(); ++i) { + LSDACallSite *lsdaCallsite = callsiteTable[i]; + BB *endTry = cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLength.GetEndOffset()->GetLabelIdx()); + BB *startTry = cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLength.GetStartOffset()->GetLabelIdx()); + if (retBB->GetId() >= startTry->GetId() && retBB->GetId() <= endTry->GetId()) { + if (retBB->GetNext()->GetId() < startTry->GetId() || retBB->GetNext()->GetId() > endTry->GetId() || + curBB.GetId() < startTry->GetId() || curBB.GetId() > endTry->GetId()) { + return; + } + } else { + if ((retBB->GetNext()->GetId() >= startTry->GetId() && retBB->GetNext()->GetId() <= endTry->GetId()) || + (curBB.GetId() >= startTry->GetId() && curBB.GetId() <= endTry->GetId())) { + return; + } + } + } + } + /* get branch insn of curBB */ + Insn *curBBBranchInsn = theCFG->FindLastCondBrInsn(curBB); + CHECK_FATAL(curBBBranchInsn != nullptr, "curBB(it is a kBBif) has no branch"); + + /* Reverse the branch */ + uint32 targetIdx = GetJumpTargetIdx(*curBBBranchInsn); + MOperator mOp = FlipConditionOp(curBBBranchInsn->GetMachineOpcode()); + LabelOperand &brTarget = cgFunc->GetOrCreateLabelOperand(*ftBB); + curBBBranchInsn->SetMOP(cgFunc->GetCG()->GetTargetMd(mOp)); + curBBBranchInsn->SetOperand(targetIdx, brTarget); + + /* move ftBB after retBB */ + curBB.SetNext(brBB); + brBB->SetPrev(&curBB); + + retBB->GetNext()->SetPrev(ftBB); + ftBB->SetNext(retBB->GetNext()); + ftBB->SetPrev(retBB); + retBB->SetNext(ftBB); +} + +/* + * 1. relocate goto BB + * Found pattern (1) ftBB->GetPreds().size() == 1 + * curBB: curBB: cond1_br target + * ... ==> brBB: + * cond_br brBB ... + * ftBB: targetBB: (ftBB,targetBB) + * goto target (2) ftBB->GetPreds().size() > 1 + * brBB: curBB : cond1_br ftBB + * ... brBB: + * targetBB ... + * ftBB + * targetBB + * + * 2. relocate throw BB in RelocateThrowBB() + */ +bool FlipBRPattern::Optimize(BB &curBB) +{ + if (curBB.GetKind() == BB::kBBIf && !curBB.IsEmpty()) { + BB *ftBB = curBB.GetNext(); + DEBUG_ASSERT(ftBB != nullptr, "ftBB is null in FlipBRPattern::Optimize"); + BB *brBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); + DEBUG_ASSERT(brBB != nullptr, "brBB is null in FlipBRPattern::Optimize"); + /* Check if it can be optimized */ + if (ftBB->GetKind() == BB::kBBGoto && ftBB->GetNext() == brBB) { + if (!ftBB->GetEhSuccs().empty()) { + return false; + } + Insn *curBBBranchInsn = nullptr; + for (curBBBranchInsn = curBB.GetLastInsn(); curBBBranchInsn != nullptr; + curBBBranchInsn = curBBBranchInsn->GetPrev()) { + if (curBBBranchInsn->IsBranch()) { + break; + } + } + DEBUG_ASSERT(curBBBranchInsn != nullptr, "FlipBRPattern: curBB has no branch"); + Insn *brInsn = nullptr; + for (brInsn = ftBB->GetLastInsn(); brInsn != nullptr; brInsn = brInsn->GetPrev()) { + if (brInsn->IsUnCondBranch()) { + break; + } + } + DEBUG_ASSERT(brInsn != nullptr, "FlipBRPattern: ftBB has no branch"); + + /* Reverse the branch */ + uint32 targetIdx = GetJumpTargetIdx(*curBBBranchInsn); + MOperator mOp = FlipConditionOp(curBBBranchInsn->GetMachineOpcode()); + if (mOp == 0) { + return false; + } + auto it = ftBB->GetSuccsBegin(); + BB *tgtBB = *it; + if (ftBB->GetPreds().size() == 1 && + (ftBB->IsSoloGoto() || + (!IsLabelInLSDAOrSwitchTable(tgtBB->GetLabIdx()) && cgFunc->GetTheCFG()->CanMerge(*ftBB, *tgtBB)))) { + curBBBranchInsn->SetMOP(cgFunc->GetCG()->GetTargetMd(mOp)); + Operand &brTarget = brInsn->GetOperand(GetJumpTargetIdx(*brInsn)); + curBBBranchInsn->SetOperand(targetIdx, brTarget); + /* Insert ftBB's insn at the beginning of tgtBB. */ + if (!ftBB->IsSoloGoto()) { + ftBB->RemoveInsn(*brInsn); + tgtBB->InsertAtBeginning(*ftBB); + } + /* Patch pred and succ lists */ + ftBB->EraseSuccs(it); + ftBB->PushBackSuccs(*brBB); + it = curBB.GetSuccsBegin(); + CHECK_FATAL(*it != nullptr, "nullptr check"); + if (*it == brBB) { + curBB.EraseSuccs(it); + curBB.PushBackSuccs(*tgtBB); + } else { + ++it; + curBB.EraseSuccs(it); + curBB.PushFrontSuccs(*tgtBB); + } + for (it = tgtBB->GetPredsBegin(); it != tgtBB->GetPredsEnd(); ++it) { + if (*it == ftBB) { + tgtBB->ErasePreds(it); + break; + } + } + tgtBB->PushBackPreds(curBB); + for (it = brBB->GetPredsBegin(); it != brBB->GetPredsEnd(); ++it) { + if (*it == &curBB) { + brBB->ErasePreds(it); + break; + } + } + brBB->PushFrontPreds(*ftBB); + /* Remove instructions from ftBB so curBB falls thru to brBB */ + ftBB->SetFirstInsn(nullptr); + ftBB->SetLastInsn(nullptr); + ftBB->SetKind(BB::kBBFallthru); + } else if (!IsLabelInLSDAOrSwitchTable(ftBB->GetLabIdx()) && !tgtBB->IsPredecessor(*tgtBB->GetPrev())) { + curBBBranchInsn->SetMOP(cgFunc->GetCG()->GetTargetMd(mOp)); + LabelIdx tgtLabIdx = ftBB->GetLabIdx(); + if (ftBB->GetLabIdx() == MIRLabelTable::GetDummyLabel()) { + tgtLabIdx = cgFunc->CreateLabel(); + ftBB->AddLabel(tgtLabIdx); + } + LabelOperand &brTarget = cgFunc->GetOrCreateLabelOperand(tgtLabIdx); + curBBBranchInsn->SetOperand(targetIdx, brTarget); + curBB.SetNext(brBB); + brBB->SetPrev(&curBB); + ftBB->SetPrev(tgtBB->GetPrev()); + tgtBB->GetPrev()->SetNext(ftBB); + ftBB->SetNext(tgtBB); + tgtBB->SetPrev(ftBB); + + ftBB->RemoveInsn(*brInsn); + ftBB->SetKind(BB::kBBFallthru); + } + } else { + RelocateThrowBB(curBB); + } + } + return false; +} + +/* remove a basic block that contains nothing */ +bool EmptyBBPattern::Optimize(BB &curBB) +{ + if (curBB.IsUnreachable()) { + return false; + } + /* Empty bb but do not have cleanup label. */ + if (curBB.GetPrev() != nullptr && curBB.GetFirstStmt() != cgFunc->GetCleanupLabel() && + curBB.GetFirstInsn() == nullptr && curBB.GetLastInsn() == nullptr && &curBB != cgFunc->GetLastBB() && + curBB.GetKind() != BB::kBBReturn && !IsLabelInLSDAOrSwitchTable(curBB.GetLabIdx())) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + + BB *sucBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); + if (sucBB == nullptr || sucBB->GetFirstStmt() == cgFunc->GetCleanupLabel()) { + return false; + } + cgFunc->GetTheCFG()->RemoveBB(curBB); + /* removeBB may do nothing. since no need to repeat, always ret false here. */ + return false; + } + return false; +} + +/* + * remove unreachable BB + * condition: + * 1. unreachable BB can't have cfi instruction when postcfgo. + */ +bool UnreachBBPattern::Optimize(BB &curBB) +{ + if (curBB.IsUnreachable()) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + /* if curBB in exitbbsvec,return false. */ + if (cgFunc->IsExitBB(curBB)) { + curBB.SetUnreachable(false); + return false; + } + + if (curBB.GetHasCfi() || (curBB.GetFirstInsn() != nullptr && curBB.GetFirstInsn()->IsCfiInsn())) { + return false; + } + + EHFunc *ehFunc = cgFunc->GetEHFunc(); + /* if curBB InLSDA ,replace curBB's label with nextReachableBB before remove it. */ + if (ehFunc != nullptr && ehFunc->NeedFullLSDA() && cgFunc->GetTheCFG()->InLSDA(curBB.GetLabIdx(), *ehFunc)) { + /* find nextReachableBB */ + BB *nextReachableBB = nullptr; + for (BB *bb = &curBB; bb != nullptr; bb = bb->GetNext()) { + if (!bb->IsUnreachable()) { + nextReachableBB = bb; + break; + } + } + CHECK_FATAL(nextReachableBB != nullptr, "nextReachableBB not be nullptr"); + if (nextReachableBB->GetLabIdx() == 0) { + LabelIdx labIdx = cgFunc->CreateLabel(); + nextReachableBB->AddLabel(labIdx); + cgFunc->SetLab2BBMap(labIdx, *nextReachableBB); + } + + ehFunc->GetLSDACallSiteTable()->UpdateCallSite(curBB, *nextReachableBB); + } + + if (curBB.GetSuccs().empty() && curBB.GetEhSuccs().empty()) { + return false; + } + + if (curBB.GetPrev() != nullptr) { + curBB.GetPrev()->SetNext(curBB.GetNext()); + } + if (curBB.GetNext() != nullptr) { + curBB.GetNext()->SetPrev(curBB.GetPrev()); + } + + /* flush after remove; */ + for (BB *bb : curBB.GetSuccs()) { + bb->RemovePreds(curBB); + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(*bb, *cgFunc); + } + for (BB *bb : curBB.GetEhSuccs()) { + bb->RemoveEhPreds(curBB); + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(*bb, *cgFunc); + } + curBB.ClearSuccs(); + curBB.ClearEhSuccs(); + return true; + } + return false; +} + +/* BB_pred1: BB_pred1: + * b curBB insn_x0 + * ... b BB2 + * BB_pred2: ==> ... + * b curBB BB_pred2: + * ... insn_x0 + * curBB: b BB2 + * insn_x0 ... + * b BB2 curBB: + * insn_x0 + * b BB2 + * condition: + * 1. The number of instruct in curBB + * is less than THRESHOLD; + * 2. curBB can't have cfi instruction when postcfgo. + */ +bool DuplicateBBPattern::Optimize(BB &curBB) +{ + if (curBB.IsUnreachable()) { + return false; + } + if (CGOptions::IsNoDupBB() || CGOptions::OptimizeForSize()) { + return false; + } + + /* curBB can't be in try block */ + if (curBB.GetKind() != BB::kBBGoto || IsLabelInLSDAOrSwitchTable(curBB.GetLabIdx()) || + !curBB.GetEhSuccs().empty()) { + return false; + } + +#if TARGARM32 + FOR_BB_INSNS(insn, (&curBB)) { + if (insn->IsPCLoad() || insn->IsClinit()) { + return false; + } + } +#endif + /* It is possible curBB jump to itself */ + uint32 numPreds = curBB.NumPreds(); + for (BB *bb : curBB.GetPreds()) { + if (bb == &curBB) { + numPreds--; + } + } + + if (numPreds > 1 && cgFunc->GetTheCFG()->GetTargetSuc(curBB) != nullptr && + cgFunc->GetTheCFG()->GetTargetSuc(curBB)->NumPreds() > 1) { + std::vector candidates; + for (BB *bb : curBB.GetPreds()) { + if (bb->GetKind() == BB::kBBGoto && bb->GetNext() != &curBB && bb != &curBB && !bb->IsEmpty()) { + candidates.emplace_back(bb); + } + } + if (candidates.empty()) { + return false; + } + if (curBB.NumInsn() <= kThreshold) { + if (curBB.GetHasCfi() || (curBB.GetFirstInsn() != nullptr && curBB.GetFirstInsn()->IsCfiInsn())) { + return false; + } + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + bool changed = false; + for (BB *bb : candidates) { + if (curBB.GetEhSuccs().size() != bb->GetEhSuccs().size()) { + continue; + } + if (!curBB.GetEhSuccs().empty() && (curBB.GetEhSuccs().front() != bb->GetEhSuccs().front())) { + continue; + } + bb->RemoveInsn(*bb->GetLastInsn()); + FOR_BB_INSNS(insn, (&curBB)) { + Insn *clonedInsn = cgFunc->GetTheCFG()->CloneInsn(*insn); + clonedInsn->SetPrev(nullptr); + clonedInsn->SetNext(nullptr); + clonedInsn->SetBB(nullptr); + bb->AppendInsn(*clonedInsn); + } + bb->RemoveSuccs(curBB); + for (BB *item : curBB.GetSuccs()) { + bb->PushBackSuccs(*item); + item->PushBackPreds(*bb); + } + curBB.RemovePreds(*bb); + changed = true; + } + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(curBB, *cgFunc); + return changed; + } + } + return false; +} + +/* === new pm === */ +bool CgCfgo::PhaseRun(maplebe::CGFunc &f) +{ + CFGOptimizer *cfgOptimizer = f.GetCG()->CreateCFGOptimizer(*GetPhaseMemPool(), f); + const std::string &funcClass = f.GetFunction().GetBaseClassName(); + const std::string &funcName = f.GetFunction().GetBaseFuncName(); + const std::string &name = funcClass + funcName; + if (CFGO_DUMP_NEWPM) { + DotGenerator::GenerateDot("before-cfgo", f, f.GetMirModule()); + } + cfgOptimizer->Run(name); + if (CFGO_DUMP_NEWPM) { + DotGenerator::GenerateDot("after-cfgo", f, f.GetMirModule()); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgCfgo, cfgo) + +bool CgPostCfgo::PhaseRun(maplebe::CGFunc &f) +{ + CFGOptimizer *cfgOptimizer = f.GetCG()->CreateCFGOptimizer(*GetPhaseMemPool(), f); + const std::string &funcClass = f.GetFunction().GetBaseClassName(); + const std::string &funcName = f.GetFunction().GetBaseFuncName(); + const std::string &name = funcClass + funcName; + if (CFGO_DUMP_NEWPM) { + DotGenerator::GenerateDot("before-postcfgo", f, f.GetMirModule()); + } + cfgOptimizer->Run(name); + if (CFGO_DUMP_NEWPM) { + DotGenerator::GenerateDot("after-postcfgo", f, f.GetMirModule()); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPostCfgo, postcfgo) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cfi.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cfi.cpp new file mode 100644 index 0000000000000000000000000000000000000000..450d6b996ef71d4213493cb6517b5cd558ff844f --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cfi.cpp @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cfi.h" +#include "emit.h" + +namespace cfi { +using maplebe::CG; +using maplebe::Emitter; +using maplebe::MOperator; +using maplebe::Operand; +using maplebe::OpndDesc; + +struct CfiDescr { + const std::string name; + uint32 opndCount; + /* create 3 OperandType array to store cfi instruction's operand type */ + std::array opndTypes; +}; + +static CfiDescr cfiDescrTable[kOpCfiLast + 1] = { +#define CFI_DEFINE(k, sub, n, o0, o1, o2) {".cfi_" #k, n, {Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2}}, +#define ARM_DIRECTIVES_DEFINE(k, sub, n, o0, o1, o2) \ + {"." #k, n, {Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2}}, +#include "cfi.def" +#undef CFI_DEFINE +#undef ARM_DIRECTIVES_DEFINE + {".cfi_undef", 0, {Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef}}}; + +void CfiInsn::Dump() const +{ + MOperator mOp = GetMachineOpcode(); + CfiDescr &cfiDescr = cfiDescrTable[mOp]; + LogInfo::MapleLogger() << "CFI " << cfiDescr.name; + for (uint32 i = 0; i < static_cast(cfiDescr.opndCount); ++i) { + LogInfo::MapleLogger() << (i == 0 ? " : " : " "); + Operand &curOperand = GetOperand(i); + curOperand.Dump(); + } + LogInfo::MapleLogger() << "\n"; +} + +#if DEBUG +void CfiInsn::Check() const +{ + CfiDescr &cfiDescr = cfiDescrTable[GetMachineOpcode()]; + /* cfi instruction's 3rd /4th/5th operand must be null */ + for (uint32 i = 0; i < static_cast(cfiDescr.opndCount); ++i) { + Operand &opnd = GetOperand(i); + if (opnd.GetKind() != cfiDescr.opndTypes[i]) { + CHECK_FATAL(false, "incorrect operand in cfi insn"); + } + } +} +#endif + +void RegOperand::Dump() const +{ + LogInfo::MapleLogger() << "reg: " << regNO << "[ size: " << GetSize() << "] "; +} + +void ImmOperand::Dump() const +{ + LogInfo::MapleLogger() << "imm: " << val << "[ size: " << GetSize() << "] "; +} + +void StrOperand::Dump() const +{ + LogInfo::MapleLogger() << str; +} + +void LabelOperand::Dump() const +{ + LogInfo::MapleLogger() << "label:" << labelIndex; +} +void CFIOpndEmitVisitor::Visit(RegOperand *v) +{ + emitter.Emit(v->GetRegisterNO()); +} +void CFIOpndEmitVisitor::Visit(ImmOperand *v) +{ + emitter.Emit(v->GetValue()); +} +void CFIOpndEmitVisitor::Visit(SymbolOperand *v) +{ + CHECK_FATAL(false, "NIY"); +} +void CFIOpndEmitVisitor::Visit(StrOperand *v) +{ + emitter.Emit(v->GetStr()); +} +void CFIOpndEmitVisitor::Visit(LabelOperand *v) +{ + if (emitter.GetCG()->GetMIRModule()->IsCModule()) { + PUIdx pIdx = emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + const char *idx = strdup(std::to_string(pIdx).c_str()); + emitter.Emit(".label.").Emit(idx).Emit("__").Emit(v->GetIabelIdx()); + } else { + emitter.Emit(".label.").Emit(v->GetParentFunc()).Emit(v->GetIabelIdx()); + } +} +} /* namespace cfi */ + +namespace maplebe { +bool CgGenCfi::PhaseRun(maplebe::CGFunc &f) +{ + f.GenerateCfiPrologEpilog(); + return true; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgGenCfi, gencfi) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9a8904ba85a00ad97f4d08ac0d8b29df0090d573 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg.cpp @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "emit.h" + +namespace maplebe { +using namespace maple; + +#define JAVALANG (mirModule->IsJavaModule()) + +void Globals::SetTarget(CG &target) +{ + cg = ⌖ +} +const CG *Globals::GetTarget() const +{ + DEBUG_ASSERT(cg, " set target info please "); + return cg; +} + +CGFunc *CG::currentCGFunction = nullptr; +std::map> CG::funcWrapLabels; + +CG::~CG() +{ + if (emitter != nullptr) { + emitter->CloseOutput(); + } + delete memPool; + memPool = nullptr; + mirModule = nullptr; + emitter = nullptr; + currentCGFunction = nullptr; + instrumentationFunction = nullptr; + dbgTraceEnter = nullptr; + dbgTraceExit = nullptr; + dbgFuncProfile = nullptr; +} +/* This function intends to be a more general form of GenFieldOffsetmap. */ +void CG::GenExtraTypeMetadata(const std::string &classListFileName, const std::string &outputBaseName) +{ + const std::string &cMacroDefSuffix = ".macros.def"; + BECommon *beCommon = Globals::GetInstance()->GetBECommon(); + std::vector classesToGenerate; + + if (classListFileName.empty()) { + /* + * Class list not specified. Visit all classes. + */ + std::set visited; + + for (const auto &tyId : mirModule->GetClassList()) { + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyId); + if ((mirType->GetKind() != kTypeClass) && (mirType->GetKind() != kTypeClassIncomplete)) { + continue; /* Skip non-class. Too paranoid. We just enumerated classlist_! */ + } + MIRClassType *classType = static_cast(mirType); + const std::string &name = classType->GetName(); + + if (visited.find(name) != visited.end()) { + continue; /* Skip duplicated class definitions. */ + } + + (void)visited.insert(name); + classesToGenerate.emplace_back(classType); + } + } else { + /* Visit listed classes. */ + std::ifstream inFile(classListFileName); + CHECK_FATAL(inFile.is_open(), "Failed to open file: %s", classListFileName.c_str()); + std::string str; + + /* check each class name first and expose all unknown classes */ + while (inFile >> str) { + MIRType *type = GlobalTables::GetTypeTable().GetOrCreateClassType(str, *mirModule); + MIRClassType *classType = static_cast(type); + if (classType == nullptr) { + LogInfo::MapleLogger() << " >>>>>>>> unknown class: " << str.c_str() << "\n"; + return; + } + + classesToGenerate.emplace_back(classType); + } + } + + if (cgOption.GenDef()) { + const std::string &outputFileName = outputBaseName + cMacroDefSuffix; + FILE *outputFile = fopen(outputFileName.c_str(), "w"); + if (outputFile == nullptr) { + FATAL(kLncFatal, "open file failed in CG::GenExtraTypeMetadata"); + } + + for (auto classType : classesToGenerate) { + beCommon->GenObjSize(*classType, *outputFile); + beCommon->GenFieldOffsetMap(*classType, *outputFile); + } + fclose(outputFile); + } + + if (cgOption.GenGctib()) { + maple::LogInfo::MapleLogger(kLlErr) << "--gen-gctib-file option not implemented"; + } +} + +void CG::GenPrimordialObjectList(const std::string &outputBaseName) +{ + const std::string &kPrimorListSuffix = ".primordials.txt"; + if (!cgOption.GenPrimorList()) { + return; + } + + const std::string &outputFileName = outputBaseName + kPrimorListSuffix; + FILE *outputFile = fopen(outputFileName.c_str(), "w"); + if (outputFile == nullptr) { + FATAL(kLncFatal, "open file failed in CG::GenPrimordialObjectList"); + } + + for (StIdx stIdx : mirModule->GetSymbolSet()) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + DEBUG_ASSERT(symbol != nullptr, "get symbol from st idx failed"); + if (symbol->IsPrimordialObject()) { + const std::string &name = symbol->GetName(); + fprintf(outputFile, "%s\n", name.c_str()); + } + } + + fclose(outputFile); +} + +void CG::AddStackGuardvar() +{ + MIRSymbol *chkGuard = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + chkGuard->SetNameStrIdx(std::string("__stack_chk_guard")); + chkGuard->SetStorageClass(kScExtern); + chkGuard->SetSKind(kStVar); + CHECK_FATAL(GlobalTables::GetTypeTable().GetTypeTable().size() > PTY_u64, "out of vector range"); + chkGuard->SetTyIdx(GlobalTables::GetTypeTable().GetTypeTable()[PTY_u64]->GetTypeIndex()); + GlobalTables::GetGsymTable().AddToStringSymbolMap(*chkGuard); + + MIRSymbol *chkFunc = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + chkFunc->SetNameStrIdx(std::string("__stack_chk_fail")); + chkFunc->SetStorageClass(kScText); + chkFunc->SetSKind(kStFunc); + GlobalTables::GetGsymTable().AddToStringSymbolMap(*chkFunc); +} + +void CG::SetInstrumentationFunction(const std::string &name) +{ + instrumentationFunction = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + instrumentationFunction->SetNameStrIdx(std::string("__").append(name).append("__")); + instrumentationFunction->SetStorageClass(kScText); + instrumentationFunction->SetSKind(kStFunc); +} + +#define DBG_TRACE_ENTER MplDtEnter +#define DBG_TRACE_EXIT MplDtExit +#define XSTR(s) str(s) +#define str(s) #s + +void CG::DefineDebugTraceFunctions() +{ + dbgTraceEnter = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + dbgTraceEnter->SetNameStrIdx(std::string("__" XSTR(DBG_TRACE_ENTER) "__")); + dbgTraceEnter->SetStorageClass(kScText); + dbgTraceEnter->SetSKind(kStFunc); + + dbgTraceExit = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + dbgTraceExit->SetNameStrIdx(std::string("__" XSTR(DBG_TRACE_EXIT) "__")); + dbgTraceExit->SetStorageClass(kScText); + dbgTraceExit->SetSKind(kStFunc); + + dbgFuncProfile = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + dbgFuncProfile->SetNameStrIdx(std::string("__" XSTR(MplFuncProfile) "__")); + dbgFuncProfile->SetStorageClass(kScText); + dbgFuncProfile->SetSKind(kStFunc); +} + +/* + * Add the fields of curStructType to the result. Used to handle recursive + * structures. + */ +static void AppendReferenceOffsets64(const BECommon &beCommon, MIRStructType &curStructType, int64 &curOffset, + std::vector &result) +{ + /* + * We are going to reimplement BECommon::GetFieldOffset so that we can do + * this in one pass through all fields. + * + * The tricky part is to make sure the object layout described here is + * compatible with the rest of the system. This implies that we need + * something like a "Maple ABI" documented for each platform. + */ + if (curStructType.GetKind() == kTypeClass) { + MIRClassType &curClassTy = static_cast(curStructType); + auto maybeParent = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curClassTy.GetParentTyIdx()); + if (maybeParent != nullptr) { + if (maybeParent->GetKind() == kTypeClass) { + auto parentClassType = static_cast(maybeParent); + AppendReferenceOffsets64(beCommon, *parentClassType, curOffset, result); + } else { + LogInfo::MapleLogger() << "WARNING:: generating objmap for incomplete class\n"; + } + } + } + + for (const auto &fieldPair : curStructType.GetFields()) { + auto fieldNameIdx = fieldPair.first; + auto fieldTypeIdx = fieldPair.second.first; + + auto &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(fieldNameIdx); + auto fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTypeIdx); + auto &fieldTypeName = GlobalTables::GetStrTable().GetStringFromStrIdx(fieldType->GetNameStrIdx()); + auto fieldTypeKind = fieldType->GetKind(); + + auto fieldSize = beCommon.GetTypeSize(fieldTypeIdx); + auto fieldAlign = beCommon.GetTypeAlign(fieldTypeIdx); + int64 myOffset = static_cast(RoundUp(curOffset, fieldAlign)); + int64 nextOffset = myOffset + fieldSize; + + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " field: " << fieldName << "\n"; + LogInfo::MapleLogger() << " type: " << fieldTypeIdx << ": " << fieldTypeName << "\n"; + LogInfo::MapleLogger() << " type kind: " << fieldTypeKind << "\n"; + LogInfo::MapleLogger() << " size: " << fieldSize << "\n"; /* int64 */ + LogInfo::MapleLogger() << " align: " << static_cast(fieldAlign) << "\n"; /* int8_t */ + LogInfo::MapleLogger() << " field offset:" << myOffset << "\n"; /* int64 */ + } + + if (fieldTypeKind == kTypePointer) { + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " ** Is a pointer field.\n"; + } + result.emplace_back(myOffset); + } + + if ((fieldTypeKind == kTypeArray) || (fieldTypeKind == kTypeStruct) || (fieldTypeKind == kTypeClass) || + (fieldTypeKind == kTypeInterface)) { + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " ** ERROR: We are not expecting nested aggregate type. "; + LogInfo::MapleLogger() << "All Java classes are flat -- no nested structs. "; + LogInfo::MapleLogger() << "Please extend me if we are going to work with non-java languages.\n"; + } + } + + curOffset = nextOffset; + } +} + +/* Return a list of offsets of reference fields. */ +std::vector CG::GetReferenceOffsets64(const BECommon &beCommon, MIRStructType &structType) +{ + std::vector result; + /* java class layout has already been done in previous phase. */ + if (structType.GetKind() == kTypeClass) { + for (auto fieldInfo : beCommon.GetJClassLayout(static_cast(structType))) { + if (fieldInfo.IsRef()) { + result.emplace_back(static_cast(fieldInfo.GetOffset())); + } + } + } else if (structType.GetKind() != kTypeInterface) { /* interface doesn't have reference fields */ + int64 curOffset = 0; + AppendReferenceOffsets64(beCommon, structType, curOffset, result); + } + + return result; +} + +const std::string CG::ExtractFuncName(const std::string &str) +{ + /* 3: length of "_7C" */ + size_t offset = 3; + size_t pos1 = str.find("_7C"); + if (pos1 == std::string::npos) { + return str; + } + size_t pos2 = str.find("_7C", pos1 + offset); + if (pos2 == std::string::npos) { + return str; + } + std::string funcName = str.substr(pos1 + offset, pos2 - pos1 - offset); + /* avoid funcName like __LINE__ and __FILE__ which will be resolved by assembler */ + if (funcName.find("__") != std::string::npos) { + return str; + } + if (funcName == "_3Cinit_3E") { + return "init"; + } + if (funcName == "_3Cclinit_3E") { + return "clinit"; + } + return funcName; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_cfg.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_cfg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..af4a899207bc16c5600080c64ec0d88ef15e6a63 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_cfg.cpp @@ -0,0 +1,937 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cg_cfg.h" +#if TARGAARCH64 +#include "aarch64_insn.h" +#elif TARGRISCV64 +#include "riscv64_insn.h" +#endif +#if TARGARM32 +#include "arm32_insn.h" +#endif +#include "cg_option.h" +#include "mpl_logging.h" +#if TARGX86_64 +#include "x64_cgfunc.h" +#include "cg.h" +#endif +#include + +namespace { +using namespace maplebe; +bool CanBBThrow(const BB &bb) +{ + FOR_BB_INSNS_CONST(insn, &bb) { + if (insn->IsTargetInsn() && insn->CanThrow()) { + return true; + } + } + return false; +} +} // namespace + +namespace maplebe { +void CGCFG::BuildCFG() +{ + /* + * Second Pass: + * Link preds/succs in the BBs + */ + BB *firstBB = cgFunc->GetFirstBB(); + for (BB *curBB = firstBB; curBB != nullptr; curBB = curBB->GetNext()) { + BB::BBKind kind = curBB->GetKind(); + switch (kind) { + case BB::kBBIntrinsic: + /* + * An intrinsic BB append a MOP_wcbnz instruction at the end, check + * AArch64CGFunc::SelectIntrinCall(IntrinsiccallNode *intrinsiccallNode) for details + */ + if (!curBB->GetLastInsn()->IsBranch()) { + break; + } + /* else fall through */ + [[clang::fallthrough]]; + case BB::kBBIf: { + BB *fallthruBB = curBB->GetNext(); + curBB->PushBackSuccs(*fallthruBB); + fallthruBB->PushBackPreds(*curBB); + Insn *branchInsn = curBB->GetLastMachineInsn(); + CHECK_FATAL(branchInsn != nullptr, "machine instruction must be exist in ifBB"); + DEBUG_ASSERT(branchInsn->IsCondBranch(), "must be a conditional branch generated from an intrinsic"); + /* Assume the last non-null operand is the branch target */ + int lastOpndIndex = curBB->GetLastInsn()->GetOperandSize() - 1; + DEBUG_ASSERT(lastOpndIndex > -1, "lastOpndIndex's opnd is greater than -1"); + Operand &lastOpnd = branchInsn->GetOperand(static_cast(lastOpndIndex)); + DEBUG_ASSERT(lastOpnd.IsLabelOpnd(), "label Operand must be exist in branch insn"); + auto &labelOpnd = static_cast(lastOpnd); + BB *brToBB = cgFunc->GetBBFromLab2BBMap(labelOpnd.GetLabelIndex()); + if (fallthruBB->GetId() != brToBB->GetId()) { + curBB->PushBackSuccs(*brToBB); + brToBB->PushBackPreds(*curBB); + } + break; + } + case BB::kBBGoto: { + Insn *insn = curBB->GetLastMachineInsn(); + if (insn == nullptr) { + curBB->SetKind(BB::kBBFallthru); + continue; + } + CHECK_FATAL(insn != nullptr, "machine insn must be exist in gotoBB"); + DEBUG_ASSERT(insn->IsUnCondBranch(), "insn must be a unconditional branch insn"); + LabelIdx labelIdx = static_cast(insn->GetOperand(0)).GetLabelIndex(); + BB *gotoBB = cgFunc->GetBBFromLab2BBMap(labelIdx); + CHECK_FATAL(gotoBB != nullptr, "gotoBB is null"); + curBB->PushBackSuccs(*gotoBB); + gotoBB->PushBackPreds(*curBB); + break; + } + case BB::kBBIgoto: { + for (auto lidx : + CG::GetCurCGFunc()->GetMirModule().CurFunction()->GetLabelTab()->GetAddrTakenLabels()) { + BB *igotobb = cgFunc->GetBBFromLab2BBMap(lidx); + CHECK_FATAL(igotobb, "igotobb is null"); + curBB->PushBackSuccs(*igotobb); + igotobb->PushBackPreds(*curBB); + } + break; + } + case BB::kBBRangeGoto: { + std::set bbs; + for (auto labelIdx : curBB->GetRangeGotoLabelVec()) { + BB *gotoBB = cgFunc->GetBBFromLab2BBMap(labelIdx); + bbs.insert(gotoBB); + } + for (auto gotoBB : bbs) { + curBB->PushBackSuccs(*gotoBB); + gotoBB->PushBackPreds(*curBB); + } + break; + } + case BB::kBBThrow: + break; + case BB::kBBFallthru: { + BB *fallthruBB = curBB->GetNext(); + if (fallthruBB != nullptr) { + curBB->PushBackSuccs(*fallthruBB); + fallthruBB->PushBackPreds(*curBB); + } + break; + } + default: + break; + } /* end switch */ + + EHFunc *ehFunc = cgFunc->GetEHFunc(); + /* Check exception table. If curBB is in a try block, add catch BB to its succs */ + if (ehFunc != nullptr && ehFunc->GetLSDACallSiteTable() != nullptr) { + /* Determine if insn in bb can actually except */ + if (CanBBThrow(*curBB)) { + const MapleVector &callsiteTable = ehFunc->GetLSDACallSiteTable()->GetCallSiteTable(); + for (size_t i = 0; i < callsiteTable.size(); ++i) { + LSDACallSite *lsdaCallsite = callsiteTable[i]; + BB *endTry = cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLength.GetEndOffset()->GetLabelIdx()); + BB *startTry = cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLength.GetStartOffset()->GetLabelIdx()); + if (curBB->GetId() >= startTry->GetId() && curBB->GetId() <= endTry->GetId() && + lsdaCallsite->csLandingPad.GetEndOffset() != nullptr) { + BB *landingPad = + cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLandingPad.GetEndOffset()->GetLabelIdx()); + curBB->PushBackEhSuccs(*landingPad); + landingPad->PushBackEhPreds(*curBB); + } + } + } + } + } +} + +void CGCFG::CheckCFG() +{ + FOR_ALL_BB(bb, cgFunc) { + for (BB *sucBB : bb->GetSuccs()) { + bool found = false; + for (BB *sucPred : sucBB->GetPreds()) { + if (sucPred == bb) { + if (found == false) { + found = true; + } else { + LogInfo::MapleLogger() + << "dup pred " << sucPred->GetId() << " for sucBB " << sucBB->GetId() << "\n"; + } + } + } + if (found == false) { + LogInfo::MapleLogger() << "non pred for sucBB " << sucBB->GetId() << " for BB " << bb->GetId() << "\n"; + } + } + } + FOR_ALL_BB(bb, cgFunc) { + for (BB *predBB : bb->GetPreds()) { + bool found = false; + for (BB *predSucc : predBB->GetSuccs()) { + if (predSucc == bb) { + if (found == false) { + found = true; + } else { + LogInfo::MapleLogger() + << "dup succ " << predSucc->GetId() << " for predBB " << predBB->GetId() << "\n"; + } + } + } + if (found == false) { + LogInfo::MapleLogger() << "non succ for predBB " << predBB->GetId() << " for BB " << bb->GetId() + << "\n"; + } + } + } +} + +void CGCFG::CheckCFGFreq() +{ + auto verifyBBFreq = [this](const BB *bb, uint32 succFreq) { + uint32 res = bb->GetFrequency(); + if ((res != 0 && abs(static_cast(res - succFreq)) / res > 1.0) || (res == 0 && res != succFreq)) { + // Not included + if (bb->GetSuccs().size() > 1 && bb->GetPreds().size() > 1) { + return; + } + LogInfo::MapleLogger() << cgFunc->GetName() << " curBB: " << bb->GetId() << " freq: " << bb->GetFrequency() + << std::endl; + CHECK_FATAL(false, "Verifyfreq failure BB frequency!"); + } + }; + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsUnreachable() || bb->IsCleanup()) { + continue; + } + uint32 res = 0; + if (bb->GetSuccs().size() > 1) { + for (auto *succBB : bb->GetSuccs()) { + res += succBB->GetFrequency(); + if (succBB->GetPreds().size() > 1) { + LogInfo::MapleLogger() + << cgFunc->GetName() << " critical edges: curBB: " << bb->GetId() << std::endl; + CHECK_FATAL(false, "The CFG has critical edges!"); + } + } + verifyBBFreq(bb, res); + } else if (bb->GetSuccs().size() == 1) { + auto *succBB = bb->GetSuccs().front(); + if (succBB->GetPreds().size() == 1) { + verifyBBFreq(bb, succBB->GetFrequency()); + } else if (succBB->GetPreds().size() > 1) { + for (auto *pred : succBB->GetPreds()) { + res += pred->GetFrequency(); + } + verifyBBFreq(succBB, res); + } + } + } +} + +InsnVisitor *CGCFG::insnVisitor; + +void CGCFG::InitInsnVisitor(CGFunc &func) +{ + insnVisitor = func.NewInsnModifier(); +} + +Insn *CGCFG::CloneInsn(Insn &originalInsn) +{ + cgFunc->IncTotalNumberOfInstructions(); + return insnVisitor->CloneInsn(originalInsn); +} + +RegOperand *CGCFG::CreateVregFromReg(const RegOperand &pReg) +{ + return insnVisitor->CreateVregFromReg(pReg); +} + +/* + * return true if: + * mergee has only one predecessor which is merger, + * or mergee has other comments only predecessors & merger is soloGoto + * mergee can't have cfi instruction when postcfgo. + */ +bool CGCFG::BBJudge(const BB &first, const BB &second) const +{ + if (first.GetKind() == BB::kBBReturn || second.GetKind() == BB::kBBReturn) { + return false; + } + if (&first == &second) { + return false; + } + if (second.GetPreds().size() == 1 && second.GetPreds().front() == &first) { + return true; + } + for (BB *bb : second.GetPreds()) { + if (bb != &first && !AreCommentAllPreds(*bb)) { + return false; + } + } + return first.IsSoloGoto(); +} + +/* + * Check if a given BB mergee can be merged into BB merger. + * Returns true if: + * 1. mergee has only one predecessor which is merger, or mergee has + * other comments only predecessors. + * 2. merge has only one successor which is mergee. + * 3. mergee can't have cfi instruction when postcfgo. + */ +bool CGCFG::CanMerge(const BB &merger, const BB &mergee) const +{ + if (!BBJudge(merger, mergee)) { + return false; + } + if (mergee.GetFirstInsn() != nullptr && mergee.GetFirstInsn()->IsCfiInsn()) { + return false; + } + return (merger.GetSuccs().size() == 1) && (merger.GetSuccs().front() == &mergee); +} + +/* Check if the given BB contains only comments and all its predecessors are comments */ +bool CGCFG::AreCommentAllPreds(const BB &bb) +{ + if (!bb.IsCommentBB()) { + return false; + } + for (BB *pred : bb.GetPreds()) { + if (!AreCommentAllPreds(*pred)) { + return false; + } + } + return true; +} + +/* Merge sucBB into curBB. */ +void CGCFG::MergeBB(BB &merger, BB &mergee, CGFunc &func) +{ + MergeBB(merger, mergee); + if (mergee.GetKind() == BB::kBBReturn) { + for (size_t i = 0; i < func.ExitBBsVecSize(); ++i) { + if (func.GetExitBB(i) == &mergee) { + func.EraseExitBBsVec(func.GetExitBBsVec().begin() + i); + } + } + func.PushBackExitBBsVec(merger); + } + if (mergee.GetKind() == BB::kBBRangeGoto) { + func.AddEmitSt(merger.GetId(), *func.GetEmitSt(mergee.GetId())); + func.DeleteEmitSt(mergee.GetId()); + } +} + +void CGCFG::MergeBB(BB &merger, BB &mergee) +{ + if (merger.GetKind() == BB::kBBGoto) { + if (!merger.GetLastInsn()->IsBranch()) { + CHECK_FATAL(false, "unexpected insn kind"); + } + merger.RemoveInsn(*merger.GetLastInsn()); + } + merger.AppendBBInsns(mergee); + if (mergee.GetPrev() != nullptr) { + mergee.GetPrev()->SetNext(mergee.GetNext()); + } + if (mergee.GetNext() != nullptr) { + mergee.GetNext()->SetPrev(mergee.GetPrev()); + } + merger.RemoveSuccs(mergee); + if (!merger.GetEhSuccs().empty()) { +#if DEBUG + for (BB *bb : merger.GetEhSuccs()) { + DEBUG_ASSERT((bb != &mergee), "CGCFG::MergeBB: Merging of EH bb"); + } +#endif + } + if (!mergee.GetEhSuccs().empty()) { + for (BB *bb : mergee.GetEhSuccs()) { + bb->RemoveEhPreds(mergee); + bb->PushBackEhPreds(merger); + merger.PushBackEhSuccs(*bb); + } + } + for (BB *bb : mergee.GetSuccs()) { + bb->RemovePreds(mergee); + bb->PushBackPreds(merger); + merger.PushBackSuccs(*bb); + } + merger.SetKind(mergee.GetKind()); + mergee.SetNext(nullptr); + mergee.SetPrev(nullptr); + mergee.ClearPreds(); + mergee.ClearSuccs(); + mergee.ClearEhPreds(); + mergee.ClearEhSuccs(); + mergee.SetFirstInsn(nullptr); + mergee.SetLastInsn(nullptr); +} + +/* + * Find all reachable BBs by dfs in cgfunc and mark their field false, then all other bbs should be + * unreachable. + */ +void CGCFG::FindAndMarkUnreachable(CGFunc &func) +{ + BB *firstBB = func.GetFirstBB(); + std::stack toBeAnalyzedBBs; + toBeAnalyzedBBs.push(firstBB); + std::unordered_set instackBBs; + + BB *bb = firstBB; + /* set all bb's unreacable to true */ + while (bb != nullptr) { + /* Check if bb is the first or the last BB of the function */ + if (bb->GetFirstStmt() == func.GetCleanupLabel() || InSwitchTable(bb->GetLabIdx(), func) || + bb == func.GetFirstBB() || bb == func.GetLastBB()) { + toBeAnalyzedBBs.push(bb); + } else if (bb->IsLabelTaken() == false) { + bb->SetUnreachable(true); + } + bb = bb->GetNext(); + } + + /* do a dfs to see which bbs are reachable */ + while (!toBeAnalyzedBBs.empty()) { + bb = toBeAnalyzedBBs.top(); + toBeAnalyzedBBs.pop(); + (void)instackBBs.insert(bb->GetId()); + + bb->SetUnreachable(false); + + for (BB *succBB : bb->GetSuccs()) { + if (instackBBs.count(succBB->GetId()) == 0) { + toBeAnalyzedBBs.push(succBB); + (void)instackBBs.insert(succBB->GetId()); + } + } + for (BB *succBB : bb->GetEhSuccs()) { + if (instackBBs.count(succBB->GetId()) == 0) { + toBeAnalyzedBBs.push(succBB); + (void)instackBBs.insert(succBB->GetId()); + } + } + } +} + +/* + * Theoretically, every time you remove from a bb's preds, you should consider invoking this method. + * + * @param bb + * @param func + */ +void CGCFG::FlushUnReachableStatusAndRemoveRelations(BB &bb, const CGFunc &func) const +{ + /* Check if bb is the first or the last BB of the function */ + bool isFirstBBInfunc = (&bb == func.GetFirstBB()); + bool isLastBBInfunc = (&bb == func.GetLastBB()); + if (bb.GetFirstStmt() == func.GetCleanupLabel() || InSwitchTable(bb.GetLabIdx(), func) || isFirstBBInfunc || + isLastBBInfunc) { + return; + } + std::stack toBeAnalyzedBBs; + toBeAnalyzedBBs.push(&bb); + std::set instackBBs; + BB *it = nullptr; + while (!toBeAnalyzedBBs.empty()) { + it = toBeAnalyzedBBs.top(); + (void)instackBBs.insert(it->GetId()); + toBeAnalyzedBBs.pop(); + /* Check if bb is the first or the last BB of the function */ + isFirstBBInfunc = (it == func.GetFirstBB()); + isLastBBInfunc = (it == func.GetLastBB()); + bool needFlush = !isFirstBBInfunc && !isLastBBInfunc && it->GetFirstStmt() != func.GetCleanupLabel() && + (it->GetPreds().empty() || (it->GetPreds().size() == 1 && it->GetEhPreds().front() == it)) && + it->GetEhPreds().empty() && !InSwitchTable(it->GetLabIdx(), *cgFunc) && + !cgFunc->IsExitBB(*it) && (it->IsLabelTaken() == false); + if (!needFlush) { + continue; + } + it->SetUnreachable(true); + it->SetFirstInsn(nullptr); + it->SetLastInsn(nullptr); + for (BB *succ : it->GetSuccs()) { + if (instackBBs.count(succ->GetId()) == 0) { + toBeAnalyzedBBs.push(succ); + (void)instackBBs.insert(succ->GetId()); + } + succ->RemovePreds(*it); + succ->RemoveEhPreds(*it); + } + it->ClearSuccs(); + for (BB *succ : it->GetEhSuccs()) { + if (instackBBs.count(succ->GetId()) == 0) { + toBeAnalyzedBBs.push(succ); + (void)instackBBs.insert(succ->GetId()); + } + succ->RemoveEhPreds(*it); + succ->RemovePreds(*it); + } + it->ClearEhSuccs(); + } +} + +void CGCFG::RemoveBB(BB &curBB, bool isGotoIf) +{ + BB *sucBB = CGCFG::GetTargetSuc(curBB, false, isGotoIf); + if (sucBB != nullptr) { + sucBB->RemovePreds(curBB); + } + BB *fallthruSuc = nullptr; + if (isGotoIf) { + for (BB *succ : curBB.GetSuccs()) { + if (succ == sucBB) { + continue; + } + fallthruSuc = succ; + break; + } + + DEBUG_ASSERT(fallthruSuc == curBB.GetNext(), "fallthru succ should be its next bb."); + if (fallthruSuc != nullptr) { + fallthruSuc->RemovePreds(curBB); + } + } + + for (BB *preBB : curBB.GetPreds()) { + if (preBB->GetKind() == BB::kBBIgoto) { + return; + } + /* + * If curBB is the target of its predecessor, change + * the jump target. + */ + if (&curBB == GetTargetSuc(*preBB, true, isGotoIf)) { + LabelIdx targetLabel; + if (curBB.GetNext()->GetLabIdx() == 0) { + targetLabel = insnVisitor->GetCGFunc()->CreateLabel(); + curBB.GetNext()->SetLabIdx(targetLabel); + } else { + targetLabel = curBB.GetNext()->GetLabIdx(); + } + insnVisitor->ModifyJumpTarget(targetLabel, *preBB); + } + if (fallthruSuc != nullptr && !fallthruSuc->IsPredecessor(*preBB)) { + preBB->PushBackSuccs(*fallthruSuc); + fallthruSuc->PushBackPreds(*preBB); + } + if (sucBB != nullptr && !sucBB->IsPredecessor(*preBB)) { + preBB->PushBackSuccs(*sucBB); + sucBB->PushBackPreds(*preBB); + } + preBB->RemoveSuccs(curBB); + } + for (BB *ehSucc : curBB.GetEhSuccs()) { + ehSucc->RemoveEhPreds(curBB); + } + for (BB *ehPred : curBB.GetEhPreds()) { + ehPred->RemoveEhSuccs(curBB); + } + curBB.GetNext()->RemovePreds(curBB); + curBB.GetPrev()->SetNext(curBB.GetNext()); + curBB.GetNext()->SetPrev(curBB.GetPrev()); + cgFunc->ClearBBInVec(curBB.GetId()); + /* remove callsite */ + EHFunc *ehFunc = cgFunc->GetEHFunc(); + /* only java try has ehFunc->GetLSDACallSiteTable */ + if (ehFunc != nullptr && ehFunc->GetLSDACallSiteTable() != nullptr) { + ehFunc->GetLSDACallSiteTable()->RemoveCallSite(curBB); + } +} + +void CGCFG::RetargetJump(BB &srcBB, BB &targetBB) +{ + insnVisitor->ModifyJumpTarget(srcBB, targetBB); +} + +BB *CGCFG::GetTargetSuc(BB &curBB, bool branchOnly, bool isGotoIf) +{ + switch (curBB.GetKind()) { + case BB::kBBGoto: + case BB::kBBIntrinsic: + case BB::kBBIf: { + const Insn *origLastInsn = curBB.GetLastMachineInsn(); + if (isGotoIf && (curBB.GetPrev() != nullptr) && + (curBB.GetKind() == BB::kBBGoto || curBB.GetKind() == BB::kBBIf) && + (curBB.GetPrev()->GetKind() == BB::kBBGoto || curBB.GetPrev()->GetKind() == BB::kBBIf)) { + origLastInsn = curBB.GetPrev()->GetLastMachineInsn(); + } + LabelIdx label = insnVisitor->GetJumpLabel(*origLastInsn); + for (BB *bb : curBB.GetSuccs()) { + if (bb->GetLabIdx() == label) { + return bb; + } + } + break; + } + case BB::kBBIgoto: { + for (Insn *insn = curBB.GetLastInsn(); insn != nullptr; insn = insn->GetPrev()) { +#if TARGAARCH64 + if (insn->GetMachineOpcode() == MOP_adrp_label) { + int64 label = static_cast(insn->GetOperand(1)).GetValue(); + for (BB *bb : curBB.GetSuccs()) { + if (bb->GetLabIdx() == static_cast(label)) { + return bb; + } + } + } +#endif + } + /* can also be a MOP_xbr. */ + return nullptr; + } + case BB::kBBFallthru: { + return (branchOnly ? nullptr : curBB.GetNext()); + } + case BB::kBBThrow: + return nullptr; + default: + return nullptr; + } + return nullptr; +} + +bool CGCFG::InLSDA(LabelIdx label, const EHFunc &ehFunc) +{ + if (!label || ehFunc.GetLSDACallSiteTable() == nullptr) { + return false; + } + if (label == ehFunc.GetLSDACallSiteTable()->GetCSTable().GetEndOffset()->GetLabelIdx() || + label == ehFunc.GetLSDACallSiteTable()->GetCSTable().GetStartOffset()->GetLabelIdx()) { + return true; + } + return ehFunc.GetLSDACallSiteTable()->InCallSiteTable(label); +} + +bool CGCFG::InSwitchTable(LabelIdx label, const CGFunc &func) +{ + if (!label) { + return false; + } + return func.InSwitchTable(label); +} + +bool CGCFG::IsCompareAndBranchInsn(const Insn &insn) const +{ + return insnVisitor->IsCompareAndBranchInsn(insn); +} + +bool CGCFG::IsAddOrSubInsn(const Insn &insn) const +{ + return insnVisitor->IsAddOrSubInsn(insn); +} + +Insn *CGCFG::FindLastCondBrInsn(BB &bb) const +{ + if (bb.GetKind() != BB::kBBIf) { + return nullptr; + } + FOR_BB_INSNS_REV(insn, (&bb)) { + if (insn->IsBranch()) { + return insn; + } + } + return nullptr; +} + +void CGCFG::MarkLabelTakenBB() +{ + if (cgFunc->GetMirModule().GetSrcLang() != kSrcLangC) { + return; + } + for (BB *bb = cgFunc->GetFirstBB(); bb != nullptr; bb = bb->GetNext()) { + if (cgFunc->GetFunction().GetLabelTab()->GetAddrTakenLabels().find(bb->GetLabIdx()) != + cgFunc->GetFunction().GetLabelTab()->GetAddrTakenLabels().end()) { + cgFunc->SetHasTakenLabel(); + bb->SetLabelTaken(); + } + } +} + +/* + * analyse the CFG to find the BBs that are not reachable from function entries + * and delete them + */ +void CGCFG::UnreachCodeAnalysis() +{ + if (cgFunc->GetMirModule().GetSrcLang() == kSrcLangC && + (cgFunc->HasTakenLabel() || (cgFunc->GetEHFunc() && cgFunc->GetEHFunc()->GetLSDAHeader()))) { + return; + } + /* + * Find all reachable BBs by dfs in cgfunc and mark their field false, + * then all other bbs should be unreachable. + */ + BB *firstBB = cgFunc->GetFirstBB(); + std::forward_list toBeAnalyzedBBs; + toBeAnalyzedBBs.push_front(firstBB); + std::set unreachBBs; + + BB *bb = firstBB; + /* set all bb's unreacable to true */ + while (bb != nullptr) { + /* Check if bb is the first or the last BB of the function */ + if (bb->GetFirstStmt() == cgFunc->GetCleanupLabel() || InSwitchTable(bb->GetLabIdx(), *cgFunc) || + bb == cgFunc->GetFirstBB() || bb == cgFunc->GetLastBB() || bb->GetKind() == BB::kBBReturn) { + toBeAnalyzedBBs.push_front(bb); + } else { + (void)unreachBBs.insert(bb); + } + if (bb->IsLabelTaken() == false) { + bb->SetUnreachable(true); + } + bb = bb->GetNext(); + } + + /* do a dfs to see which bbs are reachable */ + while (!toBeAnalyzedBBs.empty()) { + bb = toBeAnalyzedBBs.front(); + toBeAnalyzedBBs.pop_front(); + if (!bb->IsUnreachable()) { + continue; + } + bb->SetUnreachable(false); + for (BB *succBB : bb->GetSuccs()) { + toBeAnalyzedBBs.push_front(succBB); + unreachBBs.erase(succBB); + } + for (BB *succBB : bb->GetEhSuccs()) { + toBeAnalyzedBBs.push_front(succBB); + unreachBBs.erase(succBB); + } + } + /* Don't remove unreach code if withDwarf is enabled. */ + if (cgFunc->GetCG()->GetCGOptions().WithDwarf()) { + return; + } + /* remove unreachable bb */ + std::set::iterator it; + for (it = unreachBBs.begin(); it != unreachBBs.end(); it++) { + BB *unreachBB = *it; + DEBUG_ASSERT(unreachBB != nullptr, "unreachBB must not be nullptr"); + if (cgFunc->IsExitBB(*unreachBB)) { + unreachBB->SetUnreachable(false); + } + EHFunc *ehFunc = cgFunc->GetEHFunc(); + /* if unreachBB InLSDA ,replace unreachBB's label with nextReachableBB before remove it. */ + if (ehFunc != nullptr && ehFunc->NeedFullLSDA() && + cgFunc->GetTheCFG()->InLSDA(unreachBB->GetLabIdx(), *ehFunc)) { + /* find next reachable BB */ + BB *nextReachableBB = nullptr; + for (BB *curBB = unreachBB; curBB != nullptr; curBB = curBB->GetNext()) { + if (!curBB->IsUnreachable()) { + nextReachableBB = curBB; + break; + } + } + CHECK_FATAL(nextReachableBB != nullptr, "nextReachableBB not be nullptr"); + if (nextReachableBB->GetLabIdx() == 0) { + LabelIdx labelIdx = cgFunc->CreateLabel(); + nextReachableBB->AddLabel(labelIdx); + cgFunc->SetLab2BBMap(labelIdx, *nextReachableBB); + } + + ehFunc->GetLSDACallSiteTable()->UpdateCallSite(*unreachBB, *nextReachableBB); + } + + unreachBB->GetPrev()->SetNext(unreachBB->GetNext()); + unreachBB->GetNext()->SetPrev(unreachBB->GetPrev()); + + for (BB *sucBB : unreachBB->GetSuccs()) { + sucBB->RemovePreds(*unreachBB); + } + for (BB *ehSucBB : unreachBB->GetEhSuccs()) { + ehSucBB->RemoveEhPreds(*unreachBB); + } + + unreachBB->ClearSuccs(); + unreachBB->ClearEhSuccs(); + + /* Clear insns in GOT Map. */ + cgFunc->ClearUnreachableGotInfos(*unreachBB); + cgFunc->ClearUnreachableConstInfos(*unreachBB); + } +} + +void CGCFG::FindWillExitBBs(BB *bb, std::set *visitedBBs) +{ + if (visitedBBs->count(bb) != 0) { + return; + } + visitedBBs->insert(bb); + for (BB *predbb : bb->GetPreds()) { + FindWillExitBBs(predbb, visitedBBs); + } +} + +/* + * analyse the CFG to find the BBs that will not reach any function exit; these + * are BBs inside infinite loops; mark their wontExit flag and create + * artificial edges from them to commonExitBB + */ +void CGCFG::WontExitAnalysis() +{ + std::set visitedBBs; + FindWillExitBBs(cgFunc->GetCommonExitBB(), &visitedBBs); + BB *bb = cgFunc->GetFirstBB(); + while (bb != nullptr) { + if (visitedBBs.count(bb) == 0) { + bb->SetWontExit(true); + if (bb->GetKind() == BB::kBBGoto || bb->GetKind() == BB::kBBThrow) { + // make this bb a predecessor of commonExitBB + cgFunc->GetCommonExitBB()->PushBackPreds(*bb); + } + } + bb = bb->GetNext(); + } +} + +BB *CGCFG::FindLastRetBB() +{ + FOR_ALL_BB_REV(bb, cgFunc) { + if (bb->GetKind() == BB::kBBReturn) { + return bb; + } + } + return nullptr; +} + +void CGCFG::UpdatePredsSuccsAfterSplit(BB &pred, BB &succ, BB &newBB) +{ + /* connext newBB -> succ */ + for (auto it = succ.GetPredsBegin(); it != succ.GetPredsEnd(); ++it) { + if (*it == &pred) { + auto origIt = it; + succ.ErasePreds(it); + if (origIt != succ.GetPredsBegin()) { + --origIt; + succ.InsertPred(origIt, newBB); + } else { + succ.PushFrontPreds(newBB); + } + break; + } + } + newBB.PushBackSuccs(succ); + + /* connext pred -> newBB */ + for (auto it = pred.GetSuccsBegin(); it != pred.GetSuccsEnd(); ++it) { + if (*it == &succ) { + auto origIt = it; + pred.EraseSuccs(it); + if (origIt != succ.GetSuccsBegin()) { + --origIt; + pred.InsertSucc(origIt, newBB); + } else { + pred.PushFrontSuccs(newBB); + } + break; + } + } + newBB.PushBackPreds(pred); + + /* maintain eh info */ + for (auto it = pred.GetEhSuccs().begin(); it != pred.GetEhSuccs().end(); ++it) { + newBB.PushBackEhSuccs(**it); + } + for (auto it = pred.GetEhPredsBegin(); it != pred.GetEhPredsEnd(); ++it) { + newBB.PushBackEhPreds(**it); + } + + /* update phi */ + for (auto phiInsnIt : succ.GetPhiInsns()) { + auto &phiList = static_cast(phiInsnIt.second->GetOperand(kInsnSecondOpnd)); + for (auto phiOpndIt : phiList.GetOperands()) { + uint32 fBBId = phiOpndIt.first; + DEBUG_ASSERT(fBBId != 0, "GetFromBBID = 0"); + BB *predBB = cgFunc->GetBBFromID(fBBId); + if (predBB == &pred) { + phiList.UpdateOpnd(fBBId, newBB.GetId(), *phiOpndIt.second); + break; + } + } + } +} + +#if TARGAARCH64 +void CGCFG::BreakCriticalEdge(BB &pred, BB &succ) +{ + LabelIdx newLblIdx = cgFunc->CreateLabel(); + BB *newBB = cgFunc->CreateNewBB(newLblIdx, false, BB::kBBGoto, pred.GetFrequency()); + newBB->SetCritical(true); + bool isFallThru = pred.GetNext() == ≻ + /* set prev, next */ + if (isFallThru) { + BB *origNext = pred.GetNext(); + origNext->SetPrev(newBB); + newBB->SetNext(origNext); + pred.SetNext(newBB); + newBB->SetPrev(&pred); + newBB->SetKind(BB::kBBFallthru); + } else { + BB *exitBB = cgFunc->GetExitBBsVec().size() == 0 ? nullptr : cgFunc->GetExitBB(0); + if (exitBB == nullptr) { + cgFunc->GetLastBB()->AppendBB(*newBB); + cgFunc->SetLastBB(*newBB); + } else { + exitBB->AppendBB(*newBB); + } + newBB->AppendInsn( + cgFunc->GetInsnBuilder()->BuildInsn(MOP_xuncond, cgFunc->GetOrCreateLabelOperand(succ.GetLabIdx()))); + } + + /* update offset if succ is goto target */ + if (pred.GetKind() == BB::kBBIf) { + Insn *brInsn = FindLastCondBrInsn(pred); + DEBUG_ASSERT(brInsn != nullptr, "null ptr check"); + LabelOperand &brTarget = static_cast(brInsn->GetOperand(AArch64isa::GetJumpTargetIdx(*brInsn))); + if (brTarget.GetLabelIndex() == succ.GetLabIdx()) { + brInsn->SetOperand(AArch64isa::GetJumpTargetIdx(*brInsn), cgFunc->GetOrCreateLabelOperand(newLblIdx)); + } + } else if (pred.GetKind() == BB::kBBRangeGoto) { + const MapleVector &labelVec = pred.GetRangeGotoLabelVec(); + for (size_t i = 0; i < labelVec.size(); ++i) { + if (labelVec[i] == succ.GetLabIdx()) { + /* single edge for multi jump target, so have to replace all. */ + pred.SetRangeGotoLabel(i, newLblIdx); + } + } + cgFunc->UpdateEmitSt(pred, succ.GetLabIdx(), newLblIdx); + } else { + DEBUG_ASSERT(0, "unexpeced bb kind in BreakCriticalEdge"); + } + + /* update pred, succ */ + UpdatePredsSuccsAfterSplit(pred, succ, *newBB); +} +#endif + +bool CgHandleCFG::PhaseRun(maplebe::CGFunc &f) +{ + CGCFG *cfg = f.GetMemoryPool()->New(f); + f.SetTheCFG(cfg); + /* build control flow graph */ + f.GetTheCFG()->BuildCFG(); + /* analysis unreachable code */ + f.GetTheCFG()->UnreachCodeAnalysis(); + f.EraseUnreachableStackMapInsns(); + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgHandleCFG, handlecfg) + +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_critical_edge.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_critical_edge.cpp new file mode 100644 index 0000000000000000000000000000000000000000..907ff844236b9fdbbafb95f1cbd0420b04d11e3e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_critical_edge.cpp @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cg.h" +#include "cg_critical_edge.h" +#include "cg_ssa.h" + +namespace maplebe { +void CriticalEdge::SplitCriticalEdges() +{ + for (auto it = criticalEdges.begin(); it != criticalEdges.end(); ++it) { + cgFunc->GetTheCFG()->BreakCriticalEdge(*((*it).first), *((*it).second)); + } +} + +void CriticalEdge::CollectCriticalEdges() +{ + constexpr int multiPredsNum = 2; + FOR_ALL_BB(bb, cgFunc) { + const auto &preds = bb->GetPreds(); + if (preds.size() < multiPredsNum) { + continue; + } + // current BB is a merge + for (BB *pred : preds) { + if (pred->GetKind() == BB::kBBGoto || pred->GetKind() == BB::kBBIgoto) { + continue; + } + if (pred->GetSuccs().size() > 1) { + // pred has more than one succ + criticalEdges.push_back(std::make_pair(pred, bb)); + } + } + } +} + +bool CgCriticalEdge::PhaseRun(maplebe::CGFunc &f) +{ + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2 && f.NumBBs() < kBBLimit) { + MemPool *memPool = GetPhaseMemPool(); + CriticalEdge *split = memPool->New(f, *memPool); + f.GetTheCFG()->InitInsnVisitor(f); + split->CollectCriticalEdges(); + split->SplitCriticalEdges(); + } + return false; +} + +void CgCriticalEdge::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgCriticalEdge, cgsplitcriticaledge) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_dce.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_dce.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7c00637322475a513e128b77c6998aea1618c011 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_dce.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cg_dce.h" +#include "cg.h" +namespace maplebe { +void CGDce::DoDce() +{ + bool tryDceAgain = false; + do { + tryDceAgain = false; + for (auto &ssaIt : GetSSAInfo()->GetAllSSAOperands()) { + if (ssaIt.second != nullptr && !ssaIt.second->IsDeleted()) { + if (RemoveUnuseDef(*ssaIt.second)) { + tryDceAgain = true; + } + } + } + } while (tryDceAgain); +} + +bool CgDce::PhaseRun(maplebe::CGFunc &f) +{ + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + CGDce *cgDce = f.GetCG()->CreateCGDce(*GetPhaseMemPool(), f, *ssaInfo); + cgDce->DoDce(); + return false; +} + +void CgDce::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgDce, cgdeadcodeelimination) +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_dominance.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_dominance.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9f2f6b77892b9625e15e4d2c6d044a35ebfbb3d9 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_dominance.cpp @@ -0,0 +1,514 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cg_dominance.h" +#include +#include "cg_option.h" +#include "cgfunc.h" + +/* + * This phase build dominance + */ +namespace maplebe { +constexpr uint32 kBBVectorInitialSize = 2; +void DomAnalysis::PostOrderWalk(const BB &bb, int32 &pid, MapleVector &visitedMap) +{ + DEBUG_ASSERT(bb.GetId() < visitedMap.size(), "index out of range in Dominance::PostOrderWalk"); + if (visitedMap[bb.GetId()]) { + return; + } + visitedMap[bb.GetId()] = true; + for (const BB *suc : bb.GetSuccs()) { + PostOrderWalk(*suc, pid, visitedMap); + } + DEBUG_ASSERT(bb.GetId() < postOrderIDVec.size(), "index out of range in Dominance::PostOrderWalk"); + postOrderIDVec[bb.GetId()] = pid++; +} + +void DomAnalysis::GenPostOrderID() +{ + DEBUG_ASSERT(!bbVec.empty(), "size to be allocated is 0"); + MapleVector visitedMap(bbVec.size() + 1, false, cgFunc.GetFuncScopeAllocator()->Adapter()); + int32 postOrderID = 0; + PostOrderWalk(commonEntryBB, postOrderID, visitedMap); + // initialize reversePostOrder + int32 maxPostOrderID = postOrderID - 1; + reversePostOrder.resize(static_cast(maxPostOrderID + 1)); + for (size_t i = 0; i < postOrderIDVec.size(); ++i) { + int32 postOrderNo = postOrderIDVec[i]; + if (postOrderNo == -1) { + continue; + } + reversePostOrder[static_cast(maxPostOrderID - postOrderNo)] = bbVec[i]; + } +} + +BB *DomAnalysis::Intersect(BB &bb1, const BB &bb2) +{ + auto *ptrBB1 = &bb1; + auto *ptrBB2 = &bb2; + while (ptrBB1 != ptrBB2) { + while (postOrderIDVec[ptrBB1->GetId()] < postOrderIDVec[ptrBB2->GetId()]) { + ptrBB1 = GetDom(ptrBB1->GetId()); + } + while (postOrderIDVec[ptrBB2->GetId()] < postOrderIDVec[ptrBB1->GetId()]) { + ptrBB2 = GetDom(ptrBB2->GetId()); + } + } + return ptrBB1; +} + +bool DominanceBase::CommonEntryBBIsPred(const BB &bb) const +{ + for (const BB *suc : commonEntryBB.GetSuccs()) { + if (suc == &bb) { + return true; + } + } + return false; +} + +// Figure 3 in "A Simple, Fast Dominance Algorithm" by Keith Cooper et al. +void DomAnalysis::ComputeDominance() +{ + SetDom(commonEntryBB.GetId(), &commonEntryBB); + bool changed; + do { + changed = false; + for (size_t i = 1; i < reversePostOrder.size(); ++i) { + BB *bb = reversePostOrder[i]; + if (bb == nullptr) { + continue; + } + BB *pre = nullptr; + auto it = bb->GetPredsBegin(); + if (CommonEntryBBIsPred(*bb) || bb->GetPreds().empty()) { + pre = &commonEntryBB; + } else { + pre = *it; + } + ++it; + while ((GetDom(pre->GetId()) == nullptr || pre == bb) && it != bb->GetPredsEnd()) { + pre = *it; + ++it; + } + BB *newIDom = pre; + for (; it != bb->GetPredsEnd(); ++it) { + pre = *it; + if (GetDom(pre->GetId()) != nullptr && pre != bb) { + newIDom = Intersect(*pre, *newIDom); + } + } + if (GetDom(bb->GetId()) != newIDom) { + SetDom(bb->GetId(), newIDom); + changed = true; + } + } + } while (changed); +} + +// Figure 5 in "A Simple, Fast Dominance Algorithm" by Keith Cooper et al. +void DomAnalysis::ComputeDomFrontiers() +{ + for (const BB *bb : bbVec) { + if (bb == nullptr || bb == &commonExitBB) { + continue; + } + if (bb->GetPreds().size() < kBBVectorInitialSize) { + continue; + } + for (BB *pre : bb->GetPreds()) { + BB *runner = pre; + while (runner != nullptr && runner != GetDom(bb->GetId()) && runner != &commonEntryBB) { + if (!HasDomFrontier(runner->GetId(), bb->GetId())) { + domFrontier[runner->GetId()].push_back(bb->GetId()); + } + runner = GetDom(runner->GetId()); + } + } + } + // check entry bb's predBB, such as : + // bb1 is commonEntryBB, bb2 is entryBB, bb2 is domFrontier of bb3 and bb7. + // 1 + // | + // 2 <- + // / | + // 3 | + // / \ | + // 4 7--- + // / \ ^ + // | | | + // 5-->6-- + for (BB *succ : commonEntryBB.GetSuccs()) { + if (succ->GetPreds().size() != 1) { // Only deal with one pred bb. + continue; + } + for (BB *pre : succ->GetPreds()) { + BB *runner = pre; + while (runner != GetDom(succ->GetId()) && runner != &commonEntryBB && runner != succ) { + if (!HasDomFrontier(runner->GetId(), succ->GetId())) { + domFrontier[runner->GetId()].push_back(succ->GetId()); + } + runner = GetDom(runner->GetId()); + } + } + } +} + +void DomAnalysis::ComputeDomChildren() +{ + for (auto *bb : reversePostOrder) { + if (bb == nullptr || GetDom(bb->GetId()) == nullptr) { + continue; + } + BB *parent = GetDom(bb->GetId()); + if (parent == bb) { + continue; + } + domChildren[parent->GetId()].push_back(bb->GetId()); + } +} + +// bbidMarker indicates that the iterDomFrontier results for bbid < bbidMarker +// have been computed +void DomAnalysis::GetIterDomFrontier(const BB *bb, MapleSet *dfset, uint32 bbidMarker, + std::vector &visitedMap) +{ + if (visitedMap[bb->GetId()]) { + return; + } + visitedMap[bb->GetId()] = true; + for (uint32 frontierbbid : domFrontier[bb->GetId()]) { + (void)dfset->insert(frontierbbid); + if (frontierbbid < bbidMarker) { // union with its computed result + dfset->insert(iterDomFrontier[frontierbbid].begin(), iterDomFrontier[frontierbbid].end()); + } else { // recursive call + BB *frontierbb = bbVec[frontierbbid]; + GetIterDomFrontier(frontierbb, dfset, bbidMarker, visitedMap); + } + } +} + +void DomAnalysis::ComputeIterDomFrontiers() +{ + for (BB *bb : bbVec) { + if (bb == nullptr || bb == &commonExitBB) { + continue; + } + std::vector visitedMap(bbVec.size(), false); + GetIterDomFrontier(bb, &iterDomFrontier[bb->GetId()], bb->GetId(), visitedMap); + } +} + +uint32 DomAnalysis::ComputeDtPreorder(const BB &bb, uint32 &num) +{ + DEBUG_ASSERT(num < dtPreOrder.size(), "index out of range in Dominance::ComputeDtPreorder"); + dtPreOrder[num] = bb.GetId(); + dtDfn[bb.GetId()] = num; + uint32 maxDtDfnOut = num; + ++num; + + for (uint32 k : domChildren[bb.GetId()]) { + maxDtDfnOut = ComputeDtPreorder(*bbVec[k], num); + } + + dtDfnOut[bb.GetId()] = maxDtDfnOut; + return maxDtDfnOut; +} + +// true if b1 dominates b2 +bool DomAnalysis::Dominate(const BB &bb1, const BB &bb2) +{ + return dtDfn[bb1.GetId()] <= dtDfn[bb2.GetId()] && dtDfnOut[bb1.GetId()] >= dtDfnOut[bb2.GetId()]; +} + +void DomAnalysis::Compute() +{ + GenPostOrderID(); + ComputeDominance(); + ComputeDomFrontiers(); + ComputeDomChildren(); + ComputeIterDomFrontiers(); + uint32 num = 0; + (void)ComputeDtPreorder(*cgFunc.GetFirstBB(), num); + GetDtPreOrder().resize(num); +} + +void DomAnalysis::Dump() +{ + for (BB *bb : reversePostOrder) { + LogInfo::MapleLogger() << "postorder no " << postOrderIDVec[bb->GetId()]; + LogInfo::MapleLogger() << " is bb:" << bb->GetId(); + LogInfo::MapleLogger() << " im_dom is bb:" << GetDom(bb->GetId())->GetId(); + LogInfo::MapleLogger() << " domfrontier: ["; + for (uint32 id : domFrontier[bb->GetId()]) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "] domchildren: ["; + for (uint32 id : domChildren[bb->GetId()]) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + LogInfo::MapleLogger() << "\npreorder traversal of dominator tree:"; + for (uint32 id : dtPreOrder) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "\n\n"; +} + +/* ================= for PostDominance ================= */ +void PostDomAnalysis::PdomPostOrderWalk(const BB &bb, int32 &pid, MapleVector &visitedMap) +{ + DEBUG_ASSERT(bb.GetId() < visitedMap.size(), "index out of range in Dominance::PdomPostOrderWalk"); + if (bbVec[bb.GetId()] == nullptr) { + return; + } + if (visitedMap[bb.GetId()]) { + return; + } + visitedMap[bb.GetId()] = true; + for (BB *pre : bb.GetPreds()) { + PdomPostOrderWalk(*pre, pid, visitedMap); + } + DEBUG_ASSERT(bb.GetId() < pdomPostOrderIDVec.size(), "index out of range in Dominance::PdomPostOrderWalk"); + pdomPostOrderIDVec[bb.GetId()] = pid++; +} + +void PostDomAnalysis::PdomGenPostOrderID() +{ + DEBUG_ASSERT(!bbVec.empty(), "call calloc failed in Dominance::PdomGenPostOrderID"); + MapleVector visitedMap(bbVec.size(), false, cgFunc.GetFuncScopeAllocator()->Adapter()); + int32 postOrderID = 0; + PdomPostOrderWalk(commonExitBB, postOrderID, visitedMap); + // initialize pdomReversePostOrder + int32 maxPostOrderID = postOrderID - 1; + pdomReversePostOrder.resize(static_cast(maxPostOrderID + 1)); + for (size_t i = 0; i < pdomPostOrderIDVec.size(); ++i) { + int32 postOrderNo = pdomPostOrderIDVec[i]; + if (postOrderNo == -1) { + continue; + } + pdomReversePostOrder[static_cast(maxPostOrderID - postOrderNo)] = bbVec[i]; + } +} + +BB *PostDomAnalysis::PdomIntersect(BB &bb1, const BB &bb2) +{ + auto *ptrBB1 = &bb1; + auto *ptrBB2 = &bb2; + while (ptrBB1 != ptrBB2) { + while (pdomPostOrderIDVec[ptrBB1->GetId()] < pdomPostOrderIDVec[ptrBB2->GetId()]) { + ptrBB1 = GetPdom(ptrBB1->GetId()); + } + while (pdomPostOrderIDVec[ptrBB2->GetId()] < pdomPostOrderIDVec[ptrBB1->GetId()]) { + ptrBB2 = GetPdom(ptrBB2->GetId()); + } + } + return ptrBB1; +} + +// Figure 3 in "A Simple, Fast Dominance Algorithm" by Keith Cooper et al. +void PostDomAnalysis::ComputePostDominance() +{ + SetPdom(commonExitBB.GetId(), &commonExitBB); + bool changed = false; + do { + changed = false; + for (size_t i = 1; i < pdomReversePostOrder.size(); ++i) { + BB *bb = pdomReversePostOrder[i]; + BB *suc = nullptr; + auto it = bb->GetSuccsBegin(); + if (cgFunc.IsExitBB(*bb) || bb->GetSuccs().empty() || (bb->IsWontExit() && bb->GetKind() == BB::kBBGoto)) { + suc = &commonExitBB; + } else { + suc = *it; + } + ++it; + while ((GetPdom(suc->GetId()) == nullptr || suc == bb) && it != bb->GetSuccsEnd()) { + suc = *it; + ++it; + } + if (GetPdom(suc->GetId()) == nullptr) { + suc = &commonExitBB; + } + BB *newIDom = suc; + for (; it != bb->GetSuccsEnd(); ++it) { + suc = *it; + if (GetPdom(suc->GetId()) != nullptr && suc != bb) { + newIDom = PdomIntersect(*suc, *newIDom); + } + } + if (GetPdom(bb->GetId()) != newIDom) { + SetPdom(bb->GetId(), newIDom); + DEBUG_ASSERT(GetPdom(newIDom->GetId()) != nullptr, "null ptr check"); + changed = true; + } + } + } while (changed); +} + +// Figure 5 in "A Simple, Fast Dominance Algorithm" by Keith Cooper et al. +void PostDomAnalysis::ComputePdomFrontiers() +{ + for (const BB *bb : bbVec) { + if (bb == nullptr || bb == &commonEntryBB) { + continue; + } + if (bb->GetSuccs().size() < kBBVectorInitialSize) { + continue; + } + for (BB *suc : bb->GetSuccs()) { + BB *runner = suc; + while (runner != GetPdom(bb->GetId()) && runner != &commonEntryBB) { + if (!HasPdomFrontier(runner->GetId(), bb->GetId())) { + pdomFrontier[runner->GetId()].push_back(bb->GetId()); + } + DEBUG_ASSERT(GetPdom(runner->GetId()) != nullptr, "ComputePdomFrontiers: pdoms[] is nullptr"); + runner = GetPdom(runner->GetId()); + } + } + } +} + +void PostDomAnalysis::ComputePdomChildren() +{ + for (const BB *bb : bbVec) { + if (bb == nullptr || GetPdom(bb->GetId()) == nullptr) { + continue; + } + const BB *parent = GetPdom(bb->GetId()); + if (parent == bb) { + continue; + } + pdomChildren[parent->GetId()].push_back(bb->GetId()); + } +} + +// bbidMarker indicates that the iterPdomFrontier results for bbid < bbidMarker +// have been computed +void PostDomAnalysis::GetIterPdomFrontier(const BB *bb, MapleSet *dfset, uint32 bbidMarker, + std::vector &visitedMap) +{ + if (visitedMap[bb->GetId()]) { + return; + } + visitedMap[bb->GetId()] = true; + for (uint32 frontierbbid : pdomFrontier[bb->GetId()]) { + (void)dfset->insert(frontierbbid); + if (frontierbbid < bbidMarker) { // union with its computed result + dfset->insert(iterPdomFrontier[frontierbbid].begin(), iterPdomFrontier[frontierbbid].end()); + } else { // recursive call + BB *frontierbb = bbVec[frontierbbid]; + GetIterPdomFrontier(frontierbb, dfset, bbidMarker, visitedMap); + } + } +} + +void PostDomAnalysis::ComputeIterPdomFrontiers() +{ + for (BB *bb : bbVec) { + if (bb == nullptr || bb == &commonEntryBB) { + continue; + } + std::vector visitedMap(bbVec.size(), false); + GetIterPdomFrontier(bb, &iterPdomFrontier[bb->GetId()], bb->GetId(), visitedMap); + } +} + +uint32 PostDomAnalysis::ComputePdtPreorder(const BB &bb, uint32 &num) +{ + DEBUG_ASSERT(num < pdtPreOrder.size(), "index out of range in Dominance::ComputePdtPreOrder"); + pdtPreOrder[num] = bb.GetId(); + pdtDfn[bb.GetId()] = num; + uint32 maxDtDfnOut = num; + ++num; + + for (uint32 k : pdomChildren[bb.GetId()]) { + maxDtDfnOut = ComputePdtPreorder(*bbVec[k], num); + } + + pdtDfnOut[bb.GetId()] = maxDtDfnOut; + return maxDtDfnOut; +} + +// true if b1 postdominates b2 +bool PostDomAnalysis::PostDominate(const BB &bb1, const BB &bb2) +{ + return pdtDfn[bb1.GetId()] <= pdtDfn[bb2.GetId()] && pdtDfnOut[bb1.GetId()] >= pdtDfnOut[bb2.GetId()]; +} + +void PostDomAnalysis::Dump() +{ + for (BB *bb : pdomReversePostOrder) { + LogInfo::MapleLogger() << "pdom_postorder no " << pdomPostOrderIDVec[bb->GetId()]; + LogInfo::MapleLogger() << " is bb:" << bb->GetId(); + LogInfo::MapleLogger() << " im_pdom is bb:" << GetPdom(bb->GetId())->GetId(); + LogInfo::MapleLogger() << " pdomfrontier: ["; + for (uint32 id : pdomFrontier[bb->GetId()]) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "] pdomchildren: ["; + for (uint32 id : pdomChildren[bb->GetId()]) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "preorder traversal of post-dominator tree:"; + for (uint32 id : pdtPreOrder) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "\n\n"; +} + +void PostDomAnalysis::Compute() +{ + PdomGenPostOrderID(); + ComputePostDominance(); + ComputePdomFrontiers(); + ComputePdomChildren(); + ComputeIterPdomFrontiers(); + uint32 num = 0; + (void)ComputePdtPreorder(GetCommonExitBB(), num); + ResizePdtPreOrder(num); +} + +bool CgDomAnalysis::PhaseRun(maplebe::CGFunc &f) +{ + MemPool *domMemPool = GetPhaseMemPool(); + domAnalysis = + domMemPool->New(f, *domMemPool, *domMemPool, f.GetAllBBs(), *f.GetFirstBB(), *f.GetCommonExitBB()); + domAnalysis->Compute(); + if (CG_DEBUG_FUNC(f)) { + domAnalysis->Dump(); + } + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgDomAnalysis, domanalysis) + +bool CgPostDomAnalysis::PhaseRun(maplebe::CGFunc &f) +{ + MemPool *pdomMemPool = GetPhaseMemPool(); + pdomAnalysis = pdomMemPool->New(f, *pdomMemPool, *pdomMemPool, f.GetAllBBs(), *f.GetFirstBB(), + *f.GetCommonExitBB()); + pdomAnalysis->Compute(); + if (CG_DEBUG_FUNC(f)) { + pdomAnalysis->Dump(); + } + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgPostDomAnalysis, pdomanalysis) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_irbuilder.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_irbuilder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..324a33d679807ca0c58cb05224e1d2ef065c87d6 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_irbuilder.cpp @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cg_irbuilder.h" +#include "isa.h" +#include "cg.h" +#include "cfi.h" +#include "dbg.h" + +namespace maplebe { +Insn &InsnBuilder::BuildInsn(MOperator opCode, const InsnDesc &idesc) +{ + auto *newInsn = mp->New(*mp, opCode); + newInsn->SetInsnDescrption(idesc); + IncreaseInsnNum(); + return *newInsn; +} + +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0) +{ + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + return BuildInsn(opCode, tMd).AddOpndChain(o0); +} +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1) +{ + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + return BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1); +} +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2) +{ + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + return BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1).AddOpndChain(o2); +} + +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3) +{ + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + return BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1).AddOpndChain(o2).AddOpndChain(o3); +} + +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3, Operand &o4) +{ + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + Insn &nI = BuildInsn(opCode, tMd); + return nI.AddOpndChain(o0).AddOpndChain(o1).AddOpndChain(o2).AddOpndChain(o3).AddOpndChain(o4); +} + +Insn &InsnBuilder::BuildInsn(MOperator opCode, std::vector &opnds) +{ + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + Insn &nI = BuildInsn(opCode, tMd); + for (auto *opnd : opnds) { + nI.AddOperand(*opnd); + } + return nI; +} + +Insn &InsnBuilder::BuildCfiInsn(MOperator opCode) +{ + auto *nI = mp->New(*mp, opCode); + IncreaseInsnNum(); + return *nI; +} +Insn &InsnBuilder::BuildDbgInsn(MOperator opCode) +{ + auto *nI = mp->New(*mp, opCode); + IncreaseInsnNum(); + return *nI; +} + +VectorInsn &InsnBuilder::BuildVectorInsn(MOperator opCode, const InsnDesc &idesc) +{ + auto *newInsn = mp->New(*mp, opCode); + newInsn->SetInsnDescrption(idesc); + IncreaseInsnNum(); + return *newInsn; +} + +ImmOperand &OperandBuilder::CreateImm(uint32 size, int64 value, MemPool *mp) +{ + return mp ? *mp->New(value, size, false) : *alloc.New(value, size, false); +} + +ImmOperand &OperandBuilder::CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp) +{ + return mp ? *mp->New(symbol, offset, relocs, false) + : *alloc.New(symbol, offset, relocs, false); +} + +MemOperand &OperandBuilder::CreateMem(uint32 size, MemPool *mp) +{ + return mp ? *mp->New(size) : *alloc.New(size); +} + +MemOperand &OperandBuilder::CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size) +{ + MemOperand *memOprand = &CreateMem(size); + memOprand->SetBaseRegister(baseOpnd); + memOprand->SetOffsetOperand(CreateImm(baseOpnd.GetSize(), offset)); + return *memOprand; +} + +RegOperand &OperandBuilder::CreateVReg(uint32 size, RegType type, MemPool *mp) +{ + virtualRegNum++; + regno_t vRegNO = baseVirtualRegNO + virtualRegNum; + return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); +} + +RegOperand &OperandBuilder::CreateVReg(regno_t vRegNO, uint32 size, RegType type, MemPool *mp) +{ + return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); +} + +RegOperand &OperandBuilder::CreatePReg(regno_t pRegNO, uint32 size, RegType type, MemPool *mp) +{ + return mp ? *mp->New(pRegNO, size, type) : *alloc.New(pRegNO, size, type); +} + +ListOperand &OperandBuilder::CreateList(MemPool *mp) +{ + return mp ? *mp->New(alloc) : *alloc.New(alloc); +} + +FuncNameOperand &OperandBuilder::CreateFuncNameOpnd(MIRSymbol &symbol, MemPool *mp) +{ + return mp ? *mp->New(symbol) : *alloc.New(symbol); +} + +LabelOperand &OperandBuilder::CreateLabel(const char *parent, LabelIdx idx, MemPool *mp) +{ + return mp ? *mp->New(parent, idx) : *alloc.New(parent, idx); +} + +CommentOperand &OperandBuilder::CreateComment(const std::string &s, MemPool *mp) +{ + return mp ? *mp->New(s, *mp) : *alloc.New(s, *mp); +} + +CommentOperand &OperandBuilder::CreateComment(const MapleString &s, MemPool *mp) +{ + return mp ? *mp->New(s.c_str(), *mp) : *alloc.New(s.c_str(), *mp); +} + +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_occur.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_occur.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5e43157d8bdee23e6a312c247df2c0152a79aa17 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_occur.cpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cg_occur.h" +#include "cg_pre.h" + +/* The methods associated with the data structures that represent occurrences and work candidates for PRE */ +namespace maplebe { +/* return if this occur dominate occ */ +bool CgOccur::IsDominate(DomAnalysis &dom, CgOccur &occ) +{ + return dom.Dominate(*GetBB(), *occ.GetBB()); +} + +/* compute bucket index for the work candidate in workCandHashTable */ +uint32 PreWorkCandHashTable::ComputeWorkCandHashIndex(const Operand &opnd) +{ + uint32 hashIdx = static_cast(reinterpret_cast(&opnd) >> k4ByteSize); + return hashIdx % workCandHashLength; +} + +uint32 PreWorkCandHashTable::ComputeStmtWorkCandHashIndex(const Insn &insn) +{ + uint32 hIdx = (static_cast(insn.GetMachineOpcode())) << k3ByteSize; + return hIdx % workCandHashLength; +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_option.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_option.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8bdf5b200660d4d0b77de35943c2a34ecc782317 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_option.cpp @@ -0,0 +1,1221 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cg_option.h" +#include +#include +#include "cg_options.h" +#include "driver_options.h" +#include "mpl_logging.h" +#include "parser_opt.h" +#include "mir_parser.h" +#include "string_utils.h" +#include "triple.h" + +namespace maplebe { +using namespace maple; + +const std::string kMplcgVersion = ""; + +bool CGOptions::timePhases = false; +std::string CGOptions::targetArch = ""; +std::unordered_set CGOptions::dumpPhases = {}; +std::unordered_set CGOptions::skipPhases = {}; +std::unordered_map> CGOptions::cyclePatternMap = {}; +std::string CGOptions::skipFrom = ""; +std::string CGOptions::skipAfter = ""; +std::string CGOptions::dumpFunc = "*"; +std::string CGOptions::globalVarProfile = ""; +std::string CGOptions::profileData = ""; +std::string CGOptions::profileFuncData = ""; +std::string CGOptions::profileClassData = ""; +#ifdef TARGARM32 +std::string CGOptions::duplicateAsmFile = ""; +#else +std::string CGOptions::duplicateAsmFile = "maple/mrt/codetricks/arch/arm64/duplicateFunc.s"; +#endif +Range CGOptions::range = Range(); +std::string CGOptions::fastFuncsAsmFile = ""; +Range CGOptions::spillRanges = Range(); +uint8 CGOptions::fastAllocMode = 0; /* 0: fast, 1: spill all */ +bool CGOptions::fastAlloc = false; +uint64 CGOptions::lsraBBOptSize = 150000; +uint64 CGOptions::lsraInsnOptSize = 200000; +uint64 CGOptions::overlapNum = 28; +uint8 CGOptions::rematLevel = 2; +bool CGOptions::optForSize = false; +bool CGOptions::enableHotColdSplit = false; +uint32 CGOptions::alignMinBBSize = 16; +uint32 CGOptions::alignMaxBBSize = 96; +uint32 CGOptions::loopAlignPow = 4; +uint32 CGOptions::jumpAlignPow = 5; +uint32 CGOptions::funcAlignPow = 5; +#if TARGAARCH64 || TARGRISCV64 +bool CGOptions::useBarriersForVolatile = false; +#else +bool CGOptions::useBarriersForVolatile = true; +#endif +bool CGOptions::exclusiveEH = false; +bool CGOptions::doEBO = false; +bool CGOptions::doCGSSA = false; +bool CGOptions::doIPARA = true; +bool CGOptions::doCFGO = false; +bool CGOptions::doICO = false; +bool CGOptions::doStoreLoadOpt = false; +bool CGOptions::doGlobalOpt = false; +bool CGOptions::doVregRename = false; +bool CGOptions::doMultiPassColorRA = true; +bool CGOptions::doPrePeephole = false; +bool CGOptions::doPeephole = false; +bool CGOptions::doRetMerge = false; +bool CGOptions::doSchedule = false; +bool CGOptions::doWriteRefFieldOpt = false; +bool CGOptions::dumpOptimizeCommonLog = false; +bool CGOptions::checkArrayStore = false; +bool CGOptions::doPIC = false; +bool CGOptions::noDupBB = false; +bool CGOptions::noCalleeCFI = true; +bool CGOptions::emitCyclePattern = false; +bool CGOptions::insertYieldPoint = false; +bool CGOptions::mapleLinker = false; +bool CGOptions::printFunction = false; +bool CGOptions::nativeOpt = false; +bool CGOptions::lazyBinding = false; +bool CGOptions::hotFix = false; +bool CGOptions::debugSched = false; +bool CGOptions::bruteForceSched = false; +bool CGOptions::simulateSched = false; +CGOptions::ABIType CGOptions::abiType = kABIHard; +CGOptions::EmitFileType CGOptions::emitFileType = kAsm; +bool CGOptions::genLongCalls = false; +bool CGOptions::functionSections = false; +bool CGOptions::useFramePointer = false; +bool CGOptions::gcOnly = false; +bool CGOptions::quiet = false; +bool CGOptions::doPatchLongBranch = false; +bool CGOptions::doPreSchedule = false; +bool CGOptions::emitBlockMarker = true; +bool CGOptions::inRange = false; +bool CGOptions::doPreLSRAOpt = false; +bool CGOptions::doLocalRefSpill = false; +bool CGOptions::doCalleeToSpill = false; +bool CGOptions::doRegSavesOpt = false; +bool CGOptions::useSsaPreSave = false; +bool CGOptions::useSsuPreRestore = false; +bool CGOptions::replaceASM = false; +bool CGOptions::generalRegOnly = false; +bool CGOptions::fastMath = false; +bool CGOptions::doAlignAnalysis = false; +bool CGOptions::doCondBrAlign = false; +bool CGOptions::cgBigEndian = false; +bool CGOptions::arm64ilp32 = false; +bool CGOptions::noCommon = false; + +CGOptions &CGOptions::GetInstance() +{ + static CGOptions instance; + return instance; +} + +void CGOptions::DecideMplcgRealLevel(bool isDebug) +{ + if (opts::cg::o0) { + if (isDebug) { + LogInfo::MapleLogger() << "Real Mplcg level: O0\n"; + } + EnableO0(); + } + + if (opts::cg::o1) { + if (isDebug) { + LogInfo::MapleLogger() << "Real Mplcg level: O1\n"; + } + EnableO1(); + } + + if (opts::cg::o2 || opts::cg::os) { + if (opts::cg::os) { + optForSize = true; + } + if (isDebug) { + std::string oLog = (opts::cg::os == true) ? "Os" : "O2"; + LogInfo::MapleLogger() << "Real Mplcg level: " << oLog << "\n"; + } + EnableO2(); + } + if (opts::cg::olitecg) { + if (isDebug) { + LogInfo::MapleLogger() << "Real Mplcg level: LiteCG\n"; + } + EnableLiteCG(); + } +} + +bool CGOptions::SolveOptions(bool isDebug) +{ + DecideMplcgRealLevel(isDebug); + + for (const auto &opt : cgCategory.GetEnabledOptions()) { + std::string printOpt; + if (isDebug) { + for (const auto &val : opt->GetRawValues()) { + printOpt += opt->GetName() + " " + val + " "; + } + LogInfo::MapleLogger() << "cg options: " << printOpt << '\n'; + } + } + + if (opts::cg::quiet.IsEnabledByUser()) { + SetQuiet(true); + } + + if (opts::verbose.IsEnabledByUser()) { + SetQuiet(false); + } + + if (opts::cg::pie.IsEnabledByUser()) { + opts::cg::pie ? SetOption(CGOptions::kGenPie) : ClearOption(CGOptions::kGenPie); + } + + if (opts::cg::fpic.IsEnabledByUser()) { + if (opts::cg::fpic) { + EnablePIC(); + SetOption(CGOptions::kGenPic); + } else { + DisablePIC(); + ClearOption(CGOptions::kGenPic); + } + } + + if (opts::cg::verboseAsm.IsEnabledByUser()) { + opts::cg::verboseAsm ? SetOption(CGOptions::kVerboseAsm) : ClearOption(CGOptions::kVerboseAsm); + } + + if (opts::cg::verboseCg.IsEnabledByUser()) { + opts::cg::verboseCg ? SetOption(CGOptions::kVerboseCG) : ClearOption(CGOptions::kVerboseCG); + } + + if (opts::cg::maplelinker.IsEnabledByUser()) { + opts::cg::maplelinker ? EnableMapleLinker() : DisableMapleLinker(); + } + + if (opts::cg::fastAlloc.IsEnabledByUser()) { + EnableFastAlloc(); + SetFastAllocMode(opts::cg::fastAlloc); + } + + if (opts::cg::useBarriersForVolatile.IsEnabledByUser()) { + opts::cg::useBarriersForVolatile ? EnableBarriersForVolatile() : DisableBarriersForVolatile(); + } + + if (opts::cg::spillRange.IsEnabledByUser()) { + SetRange(opts::cg::spillRange, "--pill-range", GetSpillRanges()); + } + + if (opts::cg::range.IsEnabledByUser()) { + SetRange(opts::cg::range, "--range", GetRange()); + } + + if (opts::cg::timePhases.IsEnabledByUser()) { + opts::cg::timePhases ? EnableTimePhases() : DisableTimePhases(); + } + + if (opts::cg::dumpFunc.IsEnabledByUser()) { + SetDumpFunc(opts::cg::dumpFunc); + } + + if (opts::cg::duplicateAsmList.IsEnabledByUser()) { + SetDuplicateAsmFile(opts::cg::duplicateAsmList); + } + + if (opts::cg::duplicateAsmList2.IsEnabledByUser()) { + SetFastFuncsAsmFile(opts::cg::duplicateAsmList2); + } + + if (opts::cg::insertCall.IsEnabledByUser()) { + SetOption(kGenInsertCall); + SetInstrumentationFunction(opts::cg::insertCall); + SetInsertCall(true); + } + + if (opts::cg::stackProtectorStrong.IsEnabledByUser()) { + SetOption(kUseStackProtectorStrong); + } + + if (opts::cg::stackProtectorAll.IsEnabledByUser()) { + SetOption(kUseStackProtectorAll); + } + + if (opts::cg::debug.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + ClearOption(kSuppressFileInfo); + } + + if (opts::cg::gdwarf.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithDwarf); + SetParserOption(kWithDbgInfo); + ClearOption(kSuppressFileInfo); + } + + if (opts::cg::gsrc.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + ClearOption(kWithMpl); + } + + if (opts::cg::gmixedsrc.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + SetOption(kWithMpl); + } + + if (opts::cg::gmixedasm.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + SetOption(kWithMpl); + SetOption(kWithAsm); + } + + if (opts::cg::profile.IsEnabledByUser()) { + SetOption(kWithProfileCode); + SetParserOption(kWithProfileInfo); + } + + if (opts::cg::withRaLinearScan.IsEnabledByUser()) { + SetOption(kDoLinearScanRegAlloc); + ClearOption(kDoColorRegAlloc); + } + + if (opts::cg::withRaGraphColor.IsEnabledByUser()) { + SetOption(kDoColorRegAlloc); + ClearOption(kDoLinearScanRegAlloc); + } + + if (opts::cg::printFunc.IsEnabledByUser()) { + opts::cg::printFunc ? EnablePrintFunction() : DisablePrintFunction(); + } + + if (opts::cg::addDebugTrace.IsEnabledByUser()) { + SetOption(kAddDebugTrace); + } + + if (opts::cg::addFuncProfile.IsEnabledByUser()) { + SetOption(kAddFuncProfile); + } + + if (opts::cg::suppressFileinfo.IsEnabledByUser()) { + SetOption(kSuppressFileInfo); + } + + if (opts::cg::patchLongBranch.IsEnabledByUser()) { + SetOption(kPatchLongBranch); + } + + if (opts::cg::constFold.IsEnabledByUser()) { + opts::cg::constFold ? SetOption(kConstFold) : ClearOption(kConstFold); + } + + if (opts::cg::dumpCfg.IsEnabledByUser()) { + SetOption(kDumpCFG); + } + + if (opts::cg::classListFile.IsEnabledByUser()) { + SetClassListFile(opts::cg::classListFile); + } + + if (opts::cg::genCMacroDef.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kCMacroDef, opts::cg::genCMacroDef); + } + + if (opts::cg::genGctibFile.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGctib, opts::cg::genGctibFile); + } + + if (opts::cg::yieldpoint.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGenYieldPoint, opts::cg::yieldpoint); + } + + if (opts::cg::localRc.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGenLocalRc, opts::cg::localRc); + } + + if (opts::cg::ehExclusiveList.IsEnabledByUser()) { + SetEHExclusiveFile(opts::cg::ehExclusiveList); + EnableExclusiveEH(); + ParseExclusiveFunc(opts::cg::ehExclusiveList); + } + + if (opts::cg::cyclePatternList.IsEnabledByUser()) { + SetCyclePatternFile(opts::cg::cyclePatternList); + EnableEmitCyclePattern(); + ParseCyclePattern(opts::cg::cyclePatternList); + } + + if (opts::cg::cg.IsEnabledByUser()) { + SetRunCGFlag(opts::cg::cg); + opts::cg::cg ? SetOption(CGOptions::kDoCg) : ClearOption(CGOptions::kDoCg); + } + + if (opts::cg::objmap.IsEnabledByUser()) { + SetGenerateObjectMap(opts::cg::objmap); + } + + if (opts::cg::replaceAsm.IsEnabledByUser()) { + opts::cg::replaceAsm ? EnableReplaceASM() : DisableReplaceASM(); + } + + if (opts::cg::generalRegOnly.IsEnabledByUser()) { + opts::cg::generalRegOnly ? EnableGeneralRegOnly() : DisableGeneralRegOnly(); + } + + if (opts::cg::lazyBinding.IsEnabledByUser()) { + opts::cg::lazyBinding ? EnableLazyBinding() : DisableLazyBinding(); + } + + if (opts::cg::hotFix.IsEnabledByUser()) { + opts::cg::hotFix ? EnableHotFix() : DisableHotFix(); + } + + if (opts::cg::soeCheck.IsEnabledByUser()) { + SetOption(CGOptions::kSoeCheckInsert); + } + + if (opts::cg::checkArraystore.IsEnabledByUser()) { + opts::cg::checkArraystore ? EnableCheckArrayStore() : DisableCheckArrayStore(); + } + + if (opts::cg::ebo.IsEnabledByUser()) { + opts::cg::ebo ? EnableEBO() : DisableEBO(); + } + + if (opts::cg::cfgo.IsEnabledByUser()) { + opts::cg::cfgo ? EnableCFGO() : DisableCFGO(); + } + + if (opts::cg::ico.IsEnabledByUser()) { + opts::cg::ico ? EnableICO() : DisableICO(); + } + + if (opts::cg::storeloadopt.IsEnabledByUser()) { + opts::cg::storeloadopt ? EnableStoreLoadOpt() : DisableStoreLoadOpt(); + } + + if (opts::cg::globalopt.IsEnabledByUser()) { + opts::cg::globalopt ? EnableGlobalOpt() : DisableGlobalOpt(); + } + + if (opts::cg::prelsra.IsEnabledByUser()) { + opts::cg::prelsra ? EnablePreLSRAOpt() : DisablePreLSRAOpt(); + } + + if (opts::cg::lsraLvarspill.IsEnabledByUser()) { + opts::cg::lsraLvarspill ? EnableLocalRefSpill() : DisableLocalRefSpill(); + } + + if (opts::cg::lsraOptcallee.IsEnabledByUser()) { + opts::cg::lsraOptcallee ? EnableCalleeToSpill() : DisableCalleeToSpill(); + } + + if (opts::cg::prepeep.IsEnabledByUser()) { + opts::cg::prepeep ? EnablePrePeephole() : DisablePrePeephole(); + } + + if (opts::cg::peep.IsEnabledByUser()) { + opts::cg::peep ? EnablePeephole() : DisablePeephole(); + } + + if (opts::cg::retMerge.IsEnabledByUser()) { + opts::cg::retMerge ? EnableRetMerge() : DisableRetMerge(); + } + + if (opts::cg::preschedule.IsEnabledByUser()) { + opts::cg::preschedule ? EnablePreSchedule() : DisablePreSchedule(); + } + + if (opts::cg::schedule.IsEnabledByUser()) { + opts::cg::schedule ? EnableSchedule() : DisableSchedule(); + } + + if (opts::cg::vregRename.IsEnabledByUser()) { + opts::cg::vregRename ? EnableVregRename() : DisableVregRename(); + } + + if (opts::cg::fullcolor.IsEnabledByUser()) { + opts::cg::fullcolor ? EnableMultiPassColorRA() : DisableMultiPassColorRA(); + } + + if (opts::cg::writefieldopt.IsEnabledByUser()) { + opts::cg::writefieldopt ? EnableWriteRefFieldOpt() : DisableWriteRefFieldOpt(); + } + + if (opts::cg::dumpOlog.IsEnabledByUser()) { + opts::cg::dumpOlog ? EnableDumpOptimizeCommonLog() : DisableDumpOptimizeCommonLog(); + } + + if (opts::cg::nativeopt.IsEnabledByUser()) { + // FIXME: Disabling Looks strage: should be checked by author of the code + DisableNativeOpt(); + } + + if (opts::cg::dupBb.IsEnabledByUser()) { + opts::cg::dupBb ? DisableNoDupBB() : EnableNoDupBB(); + } + + if (opts::cg::calleeCfi.IsEnabledByUser()) { + opts::cg::calleeCfi ? DisableNoCalleeCFI() : EnableNoCalleeCFI(); + } + + if (opts::cg::proepilogue.IsEnabledByUser()) { + opts::cg::proepilogue ? SetOption(CGOptions::kProEpilogueOpt) : ClearOption(CGOptions::kProEpilogueOpt); + } + + if (opts::cg::tailcall.IsEnabledByUser()) { + opts::cg::tailcall ? SetOption(CGOptions::kTailCallOpt) : ClearOption(CGOptions::kTailCallOpt); + } + + if (opts::cg::calleeregsPlacement.IsEnabledByUser()) { + opts::cg::calleeregsPlacement ? EnableRegSavesOpt() : DisableRegSavesOpt(); + } + + if (opts::cg::ssapreSave.IsEnabledByUser()) { + opts::cg::ssapreSave ? EnableSsaPreSave() : DisableSsaPreSave(); + } + + if (opts::cg::ssupreRestore.IsEnabledByUser()) { + opts::cg::ssupreRestore ? EnableSsuPreRestore() : DisableSsuPreRestore(); + } + + if (opts::cg::lsraBb.IsEnabledByUser()) { + SetLSRABBOptSize(opts::cg::lsraBb); + } + + if (opts::cg::lsraInsn.IsEnabledByUser()) { + SetLSRAInsnOptSize(opts::cg::lsraInsn); + } + + if (opts::cg::lsraOverlap.IsEnabledByUser()) { + SetOverlapNum(opts::cg::lsraOverlap); + } + + if (opts::cg::remat.IsEnabledByUser()) { + SetRematLevel(opts::cg::remat); + } + + if (opts::cg::dumpPhases.IsEnabledByUser()) { + SplitPhases(opts::cg::dumpPhases, GetDumpPhases()); + } + + if (opts::cg::target.IsEnabledByUser()) { + SetTargetMachine(opts::cg::target); + } + + if (opts::cg::skipPhases.IsEnabledByUser()) { + SplitPhases(opts::cg::skipPhases, GetSkipPhases()); + } + + if (opts::cg::skipFrom.IsEnabledByUser()) { + SetSkipFrom(opts::cg::skipFrom); + } + + if (opts::cg::skipAfter.IsEnabledByUser()) { + SetSkipAfter(opts::cg::skipAfter); + } + + if (opts::cg::debugSchedule.IsEnabledByUser()) { + opts::cg::debugSchedule ? EnableDebugSched() : DisableDebugSched(); + } + + if (opts::cg::bruteforceSchedule.IsEnabledByUser()) { + opts::cg::bruteforceSchedule ? EnableDruteForceSched() : DisableDruteForceSched(); + } + + if (opts::cg::simulateSchedule.IsEnabledByUser()) { + opts::cg::simulateSchedule ? EnableSimulateSched() : DisableSimulateSched(); + } + + if (opts::cg::quiet.IsEnabledByUser()) { + SetQuiet(true); + } + + if (opts::verbose.IsEnabledByUser()) { + SetQuiet(false); + } + + if (opts::cg::pie.IsEnabledByUser()) { + opts::cg::pie ? SetOption(CGOptions::kGenPie) : ClearOption(CGOptions::kGenPie); + } + + if (opts::cg::fpic.IsEnabledByUser()) { + if (opts::cg::fpic) { + EnablePIC(); + SetOption(CGOptions::kGenPic); + } else { + DisablePIC(); + ClearOption(CGOptions::kGenPic); + } + } + + if (opts::cg::verboseAsm.IsEnabledByUser()) { + opts::cg::verboseAsm ? SetOption(CGOptions::kVerboseAsm) : ClearOption(CGOptions::kVerboseAsm); + } + + if (opts::cg::verboseCg.IsEnabledByUser()) { + opts::cg::verboseCg ? SetOption(CGOptions::kVerboseCG) : ClearOption(CGOptions::kVerboseCG); + } + + if (opts::cg::maplelinker.IsEnabledByUser()) { + opts::cg::maplelinker ? EnableMapleLinker() : DisableMapleLinker(); + } + + if (opts::cg::fastAlloc.IsEnabledByUser()) { + EnableFastAlloc(); + SetFastAllocMode(opts::cg::fastAlloc); + } + + if (opts::cg::useBarriersForVolatile.IsEnabledByUser()) { + opts::cg::useBarriersForVolatile ? EnableBarriersForVolatile() : DisableBarriersForVolatile(); + } + + if (opts::cg::spillRange.IsEnabledByUser()) { + SetRange(opts::cg::spillRange, "--pill-range", GetSpillRanges()); + } + + if (opts::cg::range.IsEnabledByUser()) { + SetRange(opts::cg::range, "--range", GetRange()); + } + + if (opts::cg::timePhases.IsEnabledByUser()) { + opts::cg::timePhases ? EnableTimePhases() : DisableTimePhases(); + } + + if (opts::cg::dumpFunc.IsEnabledByUser()) { + SetDumpFunc(opts::cg::dumpFunc); + } + + if (opts::cg::duplicateAsmList.IsEnabledByUser()) { + SetDuplicateAsmFile(opts::cg::duplicateAsmList); + } + + if (opts::cg::duplicateAsmList2.IsEnabledByUser()) { + SetFastFuncsAsmFile(opts::cg::duplicateAsmList2); + } + + if (opts::cg::insertCall.IsEnabledByUser()) { + SetOption(kGenInsertCall); + SetInstrumentationFunction(opts::cg::insertCall); + SetInsertCall(true); + } + + if (opts::cg::stackProtectorStrong.IsEnabledByUser()) { + SetOption(kUseStackProtectorStrong); + } + + if (opts::cg::stackProtectorAll.IsEnabledByUser()) { + SetOption(kUseStackProtectorAll); + } + + if (opts::cg::debug.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + ClearOption(kSuppressFileInfo); + } + + if (opts::cg::gdwarf.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithDwarf); + SetParserOption(kWithDbgInfo); + ClearOption(kSuppressFileInfo); + } + + if (opts::cg::gsrc.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + ClearOption(kWithMpl); + } + + if (opts::cg::gmixedsrc.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + SetOption(kWithMpl); + } + + if (opts::cg::gmixedasm.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + SetOption(kWithMpl); + SetOption(kWithAsm); + } + + if (opts::cg::profile.IsEnabledByUser()) { + SetOption(kWithProfileCode); + SetParserOption(kWithProfileInfo); + } + + if (opts::cg::withRaLinearScan.IsEnabledByUser()) { + SetOption(kDoLinearScanRegAlloc); + ClearOption(kDoColorRegAlloc); + } + + if (opts::cg::withRaGraphColor.IsEnabledByUser()) { + SetOption(kDoColorRegAlloc); + ClearOption(kDoLinearScanRegAlloc); + } + + if (opts::cg::printFunc.IsEnabledByUser()) { + opts::cg::printFunc ? EnablePrintFunction() : DisablePrintFunction(); + } + + if (opts::cg::addDebugTrace.IsEnabledByUser()) { + SetOption(kAddDebugTrace); + } + + if (opts::cg::addFuncProfile.IsEnabledByUser()) { + SetOption(kAddFuncProfile); + } + + if (opts::cg::suppressFileinfo.IsEnabledByUser()) { + SetOption(kSuppressFileInfo); + } + + if (opts::cg::patchLongBranch.IsEnabledByUser()) { + SetOption(kPatchLongBranch); + } + + if (opts::cg::constFold.IsEnabledByUser()) { + opts::cg::constFold ? SetOption(kConstFold) : ClearOption(kConstFold); + } + + if (opts::cg::dumpCfg.IsEnabledByUser()) { + SetOption(kDumpCFG); + } + + if (opts::cg::classListFile.IsEnabledByUser()) { + SetClassListFile(opts::cg::classListFile); + } + + if (opts::cg::genCMacroDef.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kCMacroDef, opts::cg::genCMacroDef); + } + + if (opts::cg::genGctibFile.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGctib, opts::cg::genGctibFile); + } + + if (opts::cg::yieldpoint.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGenYieldPoint, opts::cg::yieldpoint); + } + + if (opts::cg::localRc.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGenLocalRc, opts::cg::localRc); + } + + if (opts::cg::ehExclusiveList.IsEnabledByUser()) { + SetEHExclusiveFile(opts::cg::ehExclusiveList); + EnableExclusiveEH(); + ParseExclusiveFunc(opts::cg::ehExclusiveList); + } + + if (opts::cg::cyclePatternList.IsEnabledByUser()) { + SetCyclePatternFile(opts::cg::cyclePatternList); + EnableEmitCyclePattern(); + ParseCyclePattern(opts::cg::cyclePatternList); + } + + if (opts::cg::cg.IsEnabledByUser()) { + SetRunCGFlag(opts::cg::cg); + opts::cg::cg ? SetOption(CGOptions::kDoCg) : ClearOption(CGOptions::kDoCg); + } + + if (opts::cg::objmap.IsEnabledByUser()) { + SetGenerateObjectMap(opts::cg::objmap); + } + + if (opts::cg::replaceAsm.IsEnabledByUser()) { + opts::cg::replaceAsm ? EnableReplaceASM() : DisableReplaceASM(); + } + + if (opts::cg::generalRegOnly.IsEnabledByUser()) { + opts::cg::generalRegOnly ? EnableGeneralRegOnly() : DisableGeneralRegOnly(); + } + + if (opts::cg::lazyBinding.IsEnabledByUser()) { + opts::cg::lazyBinding ? EnableLazyBinding() : DisableLazyBinding(); + } + + if (opts::cg::hotFix.IsEnabledByUser()) { + opts::cg::hotFix ? EnableHotFix() : DisableHotFix(); + } + + if (opts::cg::soeCheck.IsEnabledByUser()) { + SetOption(CGOptions::kSoeCheckInsert); + } + + if (opts::cg::checkArraystore.IsEnabledByUser()) { + opts::cg::checkArraystore ? EnableCheckArrayStore() : DisableCheckArrayStore(); + } + + if (opts::cg::ebo.IsEnabledByUser()) { + opts::cg::ebo ? EnableEBO() : DisableEBO(); + } + + if (opts::cg::cfgo.IsEnabledByUser()) { + opts::cg::cfgo ? EnableCFGO() : DisableCFGO(); + } + + if (opts::cg::ico.IsEnabledByUser()) { + opts::cg::ico ? EnableICO() : DisableICO(); + } + + if (opts::cg::storeloadopt.IsEnabledByUser()) { + opts::cg::storeloadopt ? EnableStoreLoadOpt() : DisableStoreLoadOpt(); + } + + if (opts::cg::globalopt.IsEnabledByUser()) { + opts::cg::globalopt ? EnableGlobalOpt() : DisableGlobalOpt(); + } + + if (opts::cg::hotcoldsplit.IsEnabledByUser()) { + opts::cg::hotcoldsplit ? EnableHotColdSplit() : DisableHotColdSplit(); + } + + if (opts::cg::prelsra.IsEnabledByUser()) { + opts::cg::prelsra ? EnablePreLSRAOpt() : DisablePreLSRAOpt(); + } + + if (opts::cg::lsraLvarspill.IsEnabledByUser()) { + opts::cg::lsraLvarspill ? EnableLocalRefSpill() : DisableLocalRefSpill(); + } + + if (opts::cg::lsraOptcallee.IsEnabledByUser()) { + opts::cg::lsraOptcallee ? EnableCalleeToSpill() : DisableCalleeToSpill(); + } + + if (opts::cg::prepeep.IsEnabledByUser()) { + opts::cg::prepeep ? EnablePrePeephole() : DisablePrePeephole(); + } + + if (opts::cg::peep.IsEnabledByUser()) { + opts::cg::peep ? EnablePeephole() : DisablePeephole(); + } + + if (opts::cg::retMerge.IsEnabledByUser()) { + opts::cg::retMerge ? EnableRetMerge() : DisableRetMerge(); + } + + if (opts::cg::preschedule.IsEnabledByUser()) { + opts::cg::preschedule ? EnablePreSchedule() : DisablePreSchedule(); + } + + if (opts::cg::schedule.IsEnabledByUser()) { + opts::cg::schedule ? EnableSchedule() : DisableSchedule(); + } + + if (opts::cg::vregRename.IsEnabledByUser()) { + opts::cg::vregRename ? EnableVregRename() : DisableVregRename(); + } + + if (opts::cg::fullcolor.IsEnabledByUser()) { + opts::cg::fullcolor ? EnableMultiPassColorRA() : DisableMultiPassColorRA(); + } + + if (opts::cg::writefieldopt.IsEnabledByUser()) { + opts::cg::writefieldopt ? EnableWriteRefFieldOpt() : DisableWriteRefFieldOpt(); + } + + if (opts::cg::dumpOlog.IsEnabledByUser()) { + opts::cg::dumpOlog ? EnableDumpOptimizeCommonLog() : DisableDumpOptimizeCommonLog(); + } + + if (opts::cg::nativeopt.IsEnabledByUser()) { + DisableNativeOpt(); + } + + if (opts::cg::dupBb.IsEnabledByUser()) { + opts::cg::dupBb ? DisableNoDupBB() : EnableNoDupBB(); + } + + if (opts::cg::calleeCfi.IsEnabledByUser()) { + opts::cg::calleeCfi ? DisableNoCalleeCFI() : EnableNoCalleeCFI(); + } + + if (opts::cg::proepilogue.IsEnabledByUser()) { + opts::cg::proepilogue ? SetOption(CGOptions::kProEpilogueOpt) : ClearOption(CGOptions::kProEpilogueOpt); + } + + if (opts::cg::tailcall.IsEnabledByUser()) { + opts::cg::tailcall ? SetOption(CGOptions::kTailCallOpt) : ClearOption(CGOptions::kTailCallOpt); + } + + if (opts::cg::calleeregsPlacement.IsEnabledByUser()) { + opts::cg::calleeregsPlacement ? EnableRegSavesOpt() : DisableRegSavesOpt(); + } + + if (opts::cg::ssapreSave.IsEnabledByUser()) { + opts::cg::ssapreSave ? EnableSsaPreSave() : DisableSsaPreSave(); + } + + if (opts::cg::ssupreRestore.IsEnabledByUser()) { + opts::cg::ssupreRestore ? EnableSsuPreRestore() : DisableSsuPreRestore(); + } + + if (opts::cg::lsraBb.IsEnabledByUser()) { + SetLSRABBOptSize(opts::cg::lsraBb); + } + + if (opts::cg::lsraInsn.IsEnabledByUser()) { + SetLSRAInsnOptSize(opts::cg::lsraInsn); + } + + if (opts::cg::lsraOverlap.IsEnabledByUser()) { + SetOverlapNum(opts::cg::lsraOverlap); + } + + if (opts::cg::remat.IsEnabledByUser()) { + SetRematLevel(opts::cg::remat); + } + + if (opts::cg::dumpPhases.IsEnabledByUser()) { + SplitPhases(opts::cg::dumpPhases, GetDumpPhases()); + } + + if (opts::cg::target.IsEnabledByUser()) { + SetTargetMachine(opts::cg::target); + } + + if (opts::cg::skipPhases.IsEnabledByUser()) { + SplitPhases(opts::cg::skipPhases, GetSkipPhases()); + } + + if (opts::cg::skipFrom.IsEnabledByUser()) { + SetSkipFrom(opts::cg::skipFrom); + } + + if (opts::cg::skipAfter.IsEnabledByUser()) { + SetSkipAfter(opts::cg::skipAfter); + } + + if (opts::cg::debugSchedule.IsEnabledByUser()) { + opts::cg::debugSchedule ? EnableDebugSched() : DisableDebugSched(); + } + + if (opts::cg::bruteforceSchedule.IsEnabledByUser()) { + opts::cg::bruteforceSchedule ? EnableDruteForceSched() : DisableDruteForceSched(); + } + + if (opts::cg::simulateSchedule.IsEnabledByUser()) { + opts::cg::simulateSchedule ? EnableSimulateSched() : DisableSimulateSched(); + } + + if (opts::cg::floatAbi.IsEnabledByUser()) { + SetABIType(opts::cg::floatAbi); + } + + if (opts::cg::filetype.IsEnabledByUser()) { + SetEmitFileType(opts::cg::filetype); + } + + if (opts::cg::longCalls.IsEnabledByUser()) { + opts::cg::longCalls ? EnableLongCalls() : DisableLongCalls(); + } + + if (opts::cg::functionSections.IsEnabledByUser()) { + opts::cg::functionSections ? EnableFunctionSections() : DisableFunctionSections(); + } + + if (opts::cg::omitFramePointer.IsEnabledByUser()) { + opts::cg::omitFramePointer ? DisableFramePointer() : EnableFramePointer(); + } + + if (opts::cg::fastMath.IsEnabledByUser()) { + opts::cg::fastMath ? EnableFastMath() : DisableFastMath(); + } + + if (opts::cg::alignAnalysis.IsEnabledByUser()) { + opts::cg::alignAnalysis ? EnableAlignAnalysis() : DisableAlignAnalysis(); + } + + if (opts::cg::condbrAlign.IsEnabledByUser()) { + opts::cg::condbrAlign ? EnableCondBrAlign() : DisableCondBrAlign(); + } + + /* big endian can be set with several options: --target, -Be. + * Triple takes to account all these options and allows to detect big endian with IsBigEndian() interface */ + Triple::GetTriple().IsBigEndian() ? EnableBigEndianInCG() : DisableBigEndianInCG(); + (maple::Triple::GetTriple().GetEnvironment() == Triple::GNUILP32) ? EnableArm64ilp32() : DisableArm64ilp32(); + + if (opts::cg::cgSsa.IsEnabledByUser()) { + opts::cg::cgSsa ? EnableCGSSA() : DisableCGSSA(); + } + + if (opts::cg::common.IsEnabledByUser()) { + opts::cg::common ? EnableCommon() : DisableCommon(); + } + + if (opts::cg::alignMinBbSize.IsEnabledByUser()) { + SetAlignMinBBSize(opts::cg::alignMinBbSize); + } + + if (opts::cg::alignMaxBbSize.IsEnabledByUser()) { + SetAlignMaxBBSize(opts::cg::alignMaxBbSize); + } + + if (opts::cg::loopAlignPow.IsEnabledByUser()) { + SetLoopAlignPow(opts::cg::loopAlignPow); + } + + if (opts::cg::jumpAlignPow.IsEnabledByUser()) { + SetJumpAlignPow(opts::cg::jumpAlignPow); + } + + if (opts::cg::funcAlignPow.IsEnabledByUser()) { + SetFuncAlignPow(opts::cg::funcAlignPow); + } + + /* override some options when loc, dwarf is generated */ + if (WithLoc()) { + DisableSchedule(); + SetOption(kWithSrc); + } + if (WithDwarf()) { + DisableEBO(); + DisableCFGO(); + DisableICO(); + DisableSchedule(); + SetOption(kDebugFriendly); + SetOption(kWithSrc); + SetOption(kWithLoc); + ClearOption(kSuppressFileInfo); + } + + return true; +} + +void CGOptions::ParseExclusiveFunc(const std::string &fileName) +{ + std::ifstream file(fileName); + if (!file.is_open()) { + ERR(kLncErr, "%s open failed!", fileName.c_str()); + return; + } + std::string content; + while (file >> content) { + ehExclusiveFunctionName.push_back(content); + } +} + +void CGOptions::ParseCyclePattern(const std::string &fileName) +{ + std::ifstream file(fileName); + if (!file.is_open()) { + ERR(kLncErr, "%s open failed!", fileName.c_str()); + return; + } + std::string content; + std::string classStr("class: "); + while (getline(file, content)) { + if (content.compare(0, classStr.length(), classStr) == 0) { + std::vector classPatternContent; + std::string patternContent; + while (getline(file, patternContent)) { + if (patternContent.length() == 0) { + break; + } + classPatternContent.push_back(patternContent); + } + std::string className = content.substr(classStr.length()); + CGOptions::cyclePatternMap[className] = std::move(classPatternContent); + } + } +} + +void CGOptions::SetRange(const std::string &str, const std::string &cmd, Range &subRange) +{ + const std::string &tmpStr = str; + size_t comma = tmpStr.find_first_of(",", 0); + subRange.enable = true; + + if (comma != std::string::npos) { + subRange.begin = std::stoul(tmpStr.substr(0, comma), nullptr); + subRange.end = std::stoul(tmpStr.substr(comma + 1, std::string::npos - (comma + 1)), nullptr); + } + CHECK_FATAL(range.begin < range.end, "invalid values for %s=%lu,%lu", cmd.c_str(), subRange.begin, subRange.end); +} + +/* Set default options according to different languages. */ +void CGOptions::SetDefaultOptions(const maple::MIRModule &mod) +{ + if (mod.IsJavaModule()) { + generateFlag = generateFlag | kGenYieldPoint | kGenLocalRc | kGrootList | kPrimorList; + } + insertYieldPoint = GenYieldPoint(); +} + +void CGOptions::EnableO0() +{ + optimizeLevel = kLevel0; + doEBO = false; + doCGSSA = false; + doCFGO = false; + doICO = false; + doPrePeephole = false; + doPeephole = false; + doStoreLoadOpt = false; + doGlobalOpt = false; + doPreLSRAOpt = false; + doLocalRefSpill = false; + doCalleeToSpill = false; + doPreSchedule = false; + doSchedule = false; + doRegSavesOpt = false; + useSsaPreSave = false; + useSsuPreRestore = false; + doWriteRefFieldOpt = false; + doAlignAnalysis = false; + doCondBrAlign = false; + + if (maple::Triple::GetTriple().GetEnvironment() == Triple::GNUILP32) { + ClearOption(kUseStackProtectorStrong); + ClearOption(kUseStackProtectorAll); + } else { + SetOption(kUseStackProtectorStrong); + SetOption(kUseStackProtectorAll); + } + + ClearOption(kConstFold); + ClearOption(kProEpilogueOpt); + ClearOption(kTailCallOpt); +} + +void CGOptions::EnableO1() +{ + optimizeLevel = kLevel1; + doPreLSRAOpt = true; + doCalleeToSpill = true; + SetOption(kConstFold); + SetOption(kProEpilogueOpt); + SetOption(kTailCallOpt); + ClearOption(kUseStackProtectorStrong); + ClearOption(kUseStackProtectorAll); +} + +void CGOptions::EnableO2() +{ + optimizeLevel = kLevel2; + doEBO = true; + doCGSSA = true; + doCFGO = true; + doICO = true; + doPrePeephole = true; + doPeephole = true; + doStoreLoadOpt = true; + doGlobalOpt = true; + doPreSchedule = true; + doSchedule = true; + doAlignAnalysis = true; + doCondBrAlign = true; + SetOption(kConstFold); + ClearOption(kUseStackProtectorStrong); + ClearOption(kUseStackProtectorAll); +#if TARGARM32 + doPreLSRAOpt = false; + doLocalRefSpill = false; + doCalleeToSpill = false; + doWriteRefFieldOpt = false; + ClearOption(kProEpilogueOpt); + ClearOption(kTailCallOpt); +#else + doPreLSRAOpt = true; + doLocalRefSpill = true; + doCalleeToSpill = true; + doRegSavesOpt = false; + useSsaPreSave = false; + useSsuPreRestore = true; + doWriteRefFieldOpt = true; + SetOption(kProEpilogueOpt); + SetOption(kTailCallOpt); +#endif +} + +void CGOptions::EnableLiteCG() +{ + optimizeLevel = kLevelLiteCG; + doEBO = false; + doCGSSA = false; + doCFGO = true; + doICO = false; + doPrePeephole = false; + doPeephole = true; + doStoreLoadOpt = false; + doGlobalOpt = false; + doPreLSRAOpt = false; + doLocalRefSpill = false; + doCalleeToSpill = false; + doPreSchedule = false; + doSchedule = false; + doRegSavesOpt = false; + useSsaPreSave = false; + useSsuPreRestore = false; + doWriteRefFieldOpt = false; + doAlignAnalysis = false; + doCondBrAlign = false; + + ClearOption(kUseStackProtectorStrong); + ClearOption(kUseStackProtectorAll); + ClearOption(kConstFold); + ClearOption(kProEpilogueOpt); + ClearOption(kTailCallOpt); +} + +void CGOptions::SetTargetMachine(const std::string &str) +{ + if (str == "aarch64") { + targetArch = "aarch64"; + } else if (str == "x86_64") { + targetArch = "x86_64"; + } + CHECK_FATAL(false, "unknown target. not implement yet"); +} + +void CGOptions::SplitPhases(const std::string &str, std::unordered_set &set) +{ + const std::string &tmpStr {str}; + if ((tmpStr.compare("*") == 0) || (tmpStr.compare("cgir") == 0)) { + (void)set.insert(tmpStr); + return; + } + StringUtils::Split(tmpStr, set, ','); +} + +bool CGOptions::DumpPhase(const std::string &phase) +{ + return (IS_STR_IN_SET(dumpPhases, "*") || IS_STR_IN_SET(dumpPhases, "cgir") || IS_STR_IN_SET(dumpPhases, phase)); +} + +/* match sub std::string of function name */ +bool CGOptions::FuncFilter(const std::string &name) +{ + return dumpFunc == "*" || dumpFunc == name; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_options.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_options.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a1468e6328b1847867cd4cf3e7e6486de5eeb090 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_options.cpp @@ -0,0 +1,548 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "driver_options.h" + +#include +#include + +namespace opts::cg { + +maplecl::Option pie({"-fPIE", "--pie", "-pie"}, + " --pie \tGenerate position-independent executable\n" + " --no-pie\n", + {cgCategory, driverCategory, ldCategory}, maplecl::DisableWith("--no-pie")); + +maplecl::Option fpic({"-fPIC", "--fpic", "-fpic"}, + " --fpic \tGenerate position-independent shared library\n" + " --no-fpic\n", + {cgCategory, driverCategory, ldCategory}, maplecl::DisableWith("--no-fpic")); + +maplecl::Option verboseAsm({"--verbose-asm"}, + " --verbose-asm \tAdd comments to asm output\n" + " --no-verbose-asm\n", + {cgCategory}, maplecl::DisableWith("--no-verbose-asm")); + +maplecl::Option verboseCg({"--verbose-cg"}, + " --verbose-cg \tAdd comments to cg output\n" + " --no-verbose-cg\n", + {cgCategory}, maplecl::DisableWith("--no-verbose-cg")); + +maplecl::Option maplelinker({"--maplelinker"}, + " --maplelinker \tGenerate the MapleLinker .s format\n" + " --no-maplelinker\n", + {cgCategory}, maplecl::DisableWith("--no-maplelinker")); + +maplecl::Option quiet({"--quiet"}, + " --quiet \tBe quiet (don't output debug messages)\n" + " --no-quiet\n", + {cgCategory}, maplecl::DisableWith("--no-quiet")); + +maplecl::Option cg({"--cg"}, + " --cg \tGenerate the output .s file\n" + " --no-cg\n", + {cgCategory}, maplecl::DisableWith("--no-cg")); + +maplecl::Option replaceAsm({"--replaceasm"}, + " --replaceasm \tReplace the the assembly code\n" + " --no-replaceasm\n", + {cgCategory}, maplecl::DisableWith("--no-replaceasm")); + +maplecl::Option generalRegOnly( + {"--general-reg-only"}, + " --general-reg-only \tdisable floating-point or Advanced SIMD registers\n" + " --no-general-reg-only\n", + {cgCategory}, maplecl::DisableWith("--no-general-reg-only")); + +maplecl::Option lazyBinding({"--lazy-binding"}, + " --lazy-binding \tBind class symbols lazily[default off]\n", + {cgCategory}, maplecl::DisableWith("--no-lazy-binding")); + +maplecl::Option hotFix({"--hot-fix"}, + " --hot-fix \tOpen for App hot fix[default off]\n" + " --no-hot-fix\n", + {cgCategory}, maplecl::DisableWith("--no-hot-fix")); + +maplecl::Option ebo({"--ebo"}, + " --ebo \tPerform Extend block optimization\n" + " --no-ebo\n", + {cgCategory}, maplecl::DisableWith("--no-ebo")); + +maplecl::Option cfgo({"--cfgo"}, + " --cfgo \tPerform control flow optimization\n" + " --no-cfgo\n", + {cgCategory}, maplecl::DisableWith("--no-cfgo")); + +maplecl::Option ico({"--ico"}, + " --ico \tPerform if-conversion optimization\n" + " --no-ico\n", + {cgCategory}, maplecl::DisableWith("--no-ico")); + +maplecl::Option storeloadopt({"--storeloadopt"}, + " --storeloadopt \tPerform global store-load optimization\n" + " --no-storeloadopt\n", + {cgCategory}, maplecl::DisableWith("--no-storeloadopt")); + +maplecl::Option globalopt({"--globalopt"}, + " --globalopt \tPerform global optimization\n" + " --no-globalopt\n", + {cgCategory}, maplecl::DisableWith("--no-globalopt")); + +maplecl::Option hotcoldsplit({"--hotcoldsplit"}, + " --hotcoldsplit \tPerform HotColdSplit optimization\n" + " --no-hotcoldsplit\n", + {cgCategory}, maplecl::DisableWith("--no-hotcoldsplit")); + +maplecl::Option prelsra({"--prelsra"}, + " --prelsra \tPerform live interval simplification in LSRA\n" + " --no-prelsra\n", + {cgCategory}, maplecl::DisableWith("--no-prelsra")); + +maplecl::Option lsraLvarspill({"--lsra-lvarspill"}, + " --lsra-lvarspill" + " \tPerform LSRA spill using local ref var stack locations\n" + " --no-lsra-lvarspill\n", + {cgCategory}, maplecl::DisableWith("--no-lsra-lvarspill")); + +maplecl::Option lsraOptcallee({"--lsra-optcallee"}, + " --lsra-optcallee \tSpill callee if only one def to use\n" + " --no-lsra-optcallee\n", + {cgCategory}, maplecl::DisableWith("--no-lsra-optcallee")); + +maplecl::Option calleeregsPlacement( + {"--calleeregs-placement"}, + " --calleeregs-placement \tOptimize placement of callee-save registers\n" + " --no-calleeregs-placement\n", + {cgCategory}, maplecl::DisableWith("--no-calleeregs-placement")); + +maplecl::Option ssapreSave({"--ssapre-save"}, + " --ssapre-save \tUse ssapre algorithm to save callee-save registers\n" + " --no-ssapre-save\n", + {cgCategory}, maplecl::DisableWith("--no-ssapre-save")); + +maplecl::Option ssupreRestore({"--ssupre-restore"}, + " --ssupre-restore" + " \tUse ssupre algorithm to restore callee-save registers\n" + " --no-ssupre-restore\n", + {cgCategory}, maplecl::DisableWith("--no-ssupre-restore")); + +maplecl::Option prepeep({"--prepeep"}, + " --prepeep \tPerform peephole optimization before RA\n" + " --no-prepeep\n", + {cgCategory}, maplecl::DisableWith("--no-prepeep")); + +maplecl::Option peep({"--peep"}, + " --peep \tPerform peephole optimization after RA\n" + " --no-peep\n", + {cgCategory}, maplecl::DisableWith("--no-peep")); + +maplecl::Option preschedule({"--preschedule"}, + " --preschedule \tPerform prescheduling\n" + " --no-preschedule\n", + {cgCategory}, maplecl::DisableWith("--no-preschedule")); + +maplecl::Option schedule({"--schedule"}, + " --schedule \tPerform scheduling\n" + " --no-schedule\n", + {cgCategory}, maplecl::DisableWith("--no-schedule")); + +maplecl::Option retMerge({"--ret-merge"}, + " --ret-merge \tMerge return bb into a single destination\n" + " --no-ret-merge \tallows for multiple return bb\n", + {cgCategory}, maplecl::DisableWith("--no-ret-merge")); + +maplecl::Option vregRename({"--vreg-rename"}, + " --vreg-rename" + " \tPerform rename of long live range around loops in coloring RA\n" + " --no-vreg-rename\n", + {cgCategory}, maplecl::DisableWith("--no-vreg-rename")); + +maplecl::Option fullcolor({"--fullcolor"}, + " --fullcolor \tPerform multi-pass coloring RA\n" + " --no-fullcolor\n", + {cgCategory}, maplecl::DisableWith("--no-fullcolor")); + +maplecl::Option writefieldopt({"--writefieldopt"}, + " --writefieldopt \tPerform WriteRefFieldOpt\n" + " --no-writefieldopt\n", + {cgCategory}, maplecl::DisableWith("--no-writefieldopt")); + +maplecl::Option dumpOlog({"--dump-olog"}, + " --dump-olog \tDump CFGO and ICO debug information\n" + " --no-dump-olog\n", + {cgCategory}, maplecl::DisableWith("--no-dump-olog")); + +maplecl::Option nativeopt({"--nativeopt"}, + " --nativeopt \tEnable native opt\n" + " --no-nativeopt\n", + {cgCategory}, maplecl::DisableWith("--no-nativeopt")); + +maplecl::Option objmap({"--objmap"}, + " --objmap" + " \tCreate object maps (GCTIBs) inside the main output (.s) file\n" + " --no-objmap\n", + {cgCategory}, maplecl::DisableWith("--no-objmap")); + +maplecl::Option yieldpoint({"--yieldpoint"}, + " --yieldpoint \tGenerate yieldpoints [default]\n" + " --no-yieldpoint\n", + {cgCategory}, maplecl::DisableWith("--no-yieldpoint")); + +maplecl::Option proepilogue({"--proepilogue"}, + " --proepilogue \tDo tail call optimization and" + " eliminate unnecessary prologue and epilogue.\n" + " --no-proepilogue\n", + {cgCategory}, maplecl::DisableWith("--no-proepilogue")); + +maplecl::Option localRc({"--local-rc"}, + " --local-rc \tHandle Local Stack RC [default]\n" + " --no-local-rc\n", + {cgCategory}, maplecl::DisableWith("--no-local-rc")); + +maplecl::Option insertCall({"--insert-call"}, + " --insert-call=name \tInsert a call to the named function\n", + {cgCategory}); + +maplecl::Option addDebugTrace({"--add-debug-trace"}, + " --add-debug-trace" + " \tInstrument the output .s file to print call traces at runtime\n", + {cgCategory}); + +maplecl::Option addFuncProfile({"--add-func-profile"}, + " --add-func-profile" + " \tInstrument the output .s file to record func at runtime\n", + {cgCategory}); + +maplecl::Option classListFile( + {"--class-list-file"}, + " --class-list-file" + " \tSet the class list file for the following generation options,\n" + " \tif not given, " + "generate for all visible classes\n" + " \t--class-list-file=class_list_file\n", + {cgCategory}); + +maplecl::Option genCMacroDef( + {"--gen-c-macro-def"}, + " --gen-c-macro-def" + " \tGenerate a .def file that contains extra type metadata, including the\n" + " \tclass instance sizes and field offsets (default)\n" + " --no-gen-c-macro-def\n", + {cgCategory}, maplecl::DisableWith("--no-gen-c-macro-def")); + +maplecl::Option genGctibFile({"--gen-gctib-file"}, + " --gen-gctib-file" + " \tGenerate a separate .s file for GCTIBs. Usually used together with\n" + " \t--no-objmap (not implemented yet)\n" + " --no-gen-gctib-file\n", + {cgCategory}, maplecl::DisableWith("--no-gen-gctib-file")); + +maplecl::Option stackProtectorStrong( + {"--stack-protector-strong", "-fstack-protector", "-fstack-protector-strong"}, + " --stack-protector-strong \tadd stack guard for some function \n" + " --no-stack-protector-strong \n", + {cgCategory, driverCategory}, maplecl::DisableEvery({"--no-stack-protector-strong", "-fno-stack-protector"})); + +maplecl::Option stackProtectorAll({"--stack-protector-all"}, + " --stack-protector-all \tadd stack guard for all functions \n" + " --no-stack-protector-all\n", + {cgCategory}, maplecl::DisableWith("--no-stack-protector-all")); + +maplecl::Option debug({"-g", "--g"}, " -g \tGenerate debug information\n", + {cgCategory}); + +maplecl::Option gdwarf({"--gdwarf"}, " --gdwarf \tGenerate dwarf infomation\n", {cgCategory}); + +maplecl::Option gsrc( + {"--gsrc"}, " --gsrc \tUse original source file instead of mpl file for debugging\n", + {cgCategory}); + +maplecl::Option gmixedsrc({"--gmixedsrc"}, + " --gmixedsrc" + " \tUse both original source file and mpl file for debugging\n", + {cgCategory}); + +maplecl::Option gmixedasm({"--gmixedasm"}, + " --gmixedasm" + " \tComment out both original source file and mpl file for debugging\n", + {cgCategory}); + +maplecl::Option profile({"--p", "-p"}, " -p \tGenerate profiling infomation\n", + {cgCategory}); + +maplecl::Option withRaLinearScan({"--with-ra-linear-scan"}, + " --with-ra-linear-scan \tDo linear-scan register allocation\n", + {cgCategory}); + +maplecl::Option withRaGraphColor({"--with-ra-graph-color"}, + " --with-ra-graph-color \tDo coloring-based register allocation\n", + {cgCategory}); + +maplecl::Option patchLongBranch({"--patch-long-branch"}, + " --patch-long-branch" + " \tEnable patching long distance branch with jumping pad\n", + {cgCategory}); + +maplecl::Option constFold({"--const-fold"}, + " --const-fold \tEnable constant folding\n" + " --no-const-fold\n", + {cgCategory}, maplecl::DisableWith("--no-const-fold")); + +maplecl::Option ehExclusiveList( + {"--eh-exclusive-list"}, + " --eh-exclusive-list \tFor generating gold files in unit testing\n" + " \t--eh-exclusive-list=list_file\n", + {cgCategory}); + +maplecl::Option o0({"-O0", "--O0"}, " -O0 \tNo optimization.\n", {cgCategory}); + +maplecl::Option o1({"-O1", "--O1"}, " -O1 \tDo some optimization.\n", {cgCategory}); + +maplecl::Option o2({"-O2", "--O2"}, " -O2 \tDo some optimization.\n", {cgCategory}); + +maplecl::Option os({"-Os", "--Os"}, " -Os \tOptimize for size, based on O2.\n", + {cgCategory}); + +maplecl::Option olitecg({"-Olitecg", "--Olitecg"}, " -Olitecg \tOptimize for litecg.\n", + {cgCategory}); + +maplecl::Option lsraBb({"--lsra-bb"}, + " --lsra-bb=NUM" + " \tSwitch to spill mode if number of bb in function exceeds NUM\n", + {cgCategory}); + +maplecl::Option lsraInsn( + {"--lsra-insn"}, + " --lsra-insn=NUM" + " \tSwitch to spill mode if number of instructons in function exceeds NUM\n", + {cgCategory}); + +maplecl::Option lsraOverlap({"--lsra-overlap"}, + " --lsra-overlap=NUM \toverlap NUM to decide pre spill in lsra\n", + {cgCategory}); + +maplecl::Option remat({"--remat"}, + " --remat \tEnable rematerialization during register allocation\n" + " \t 0: no rematerialization (default)\n" + " \t >= 1: rematerialize constants\n" + " \t >= 2: rematerialize addresses\n" + " \t >= 3: rematerialize local dreads\n" + " \t >= 4: rematerialize global dreads\n", + {cgCategory}); + +maplecl::Option suppressFileinfo({"--suppress-fileinfo"}, + " --suppress-fileinfo \tFor generating gold files in unit testing\n", + {cgCategory}); + +maplecl::Option dumpCfg({"--dump-cfg"}, " --dump-cfg\n", {cgCategory}); + +maplecl::Option target({"--target"}, " --target=TARGETMACHINE \t generate code for TARGETMACHINE\n", + {cgCategory}, maplecl::optionalValue); + +maplecl::Option dumpPhases({"--dump-phases"}, + " --dump-phases=PHASENAME,..." + " \tEnable debug trace for specified phases in the comma separated list\n", + {cgCategory}); + +maplecl::Option skipPhases({"--skip-phases"}, + " --skip-phases=PHASENAME,..." + " \tSkip the phases specified in the comma separated list\n", + {cgCategory}); + +maplecl::Option skipFrom({"--skip-from"}, + " --skip-from=PHASENAME \tSkip the rest phases from PHASENAME(included)\n", + {cgCategory}); + +maplecl::Option skipAfter( + {"--skip-after"}, " --skip-after=PHASENAME \tSkip the rest phases after PHASENAME(excluded)\n", {cgCategory}); + +maplecl::Option dumpFunc( + {"--dump-func"}, + " --dump-func=FUNCNAME" + " \tDump/trace only for functions whose names contain FUNCNAME as substring\n" + " \t(can only specify once)\n", + {cgCategory}); + +maplecl::Option timePhases( + {"--time-phases"}, + " --time-phases \tCollect compilation time stats for each phase\n" + " --no-time-phases \tDon't Collect compilation time stats for each phase\n", + {cgCategory}, maplecl::DisableWith("--no-time-phases")); + +maplecl::Option useBarriersForVolatile({"--use-barriers-for-volatile"}, + " --use-barriers-for-volatile \tOptimize volatile load/str\n" + " --no-use-barriers-for-volatile\n", + {cgCategory}, maplecl::DisableWith("--no-use-barriers-for-volatile")); + +maplecl::Option range( + {"--range"}, " --range=NUM0,NUM1 \tOptimize only functions in the range [NUM0, NUM1]\n", {cgCategory}); + +maplecl::Option fastAlloc({"--fast-alloc"}, + " --fast-alloc=[0/1] \tO2 RA fast mode, set to 1 to spill all registers\n", + {cgCategory}); + +maplecl::Option spillRange( + {"--spill_range"}, " --spill_range=NUM0,NUM1 \tO2 RA spill registers in the range [NUM0, NUM1]\n", + {cgCategory}); + +maplecl::Option dupBb({"--dup-bb"}, + " --dup-bb \tAllow cfg optimizer to duplicate bb\n" + " --no-dup-bb \tDon't allow cfg optimizer to duplicate bb\n", + {cgCategory}, maplecl::DisableWith("--no-dup-bb")); + +maplecl::Option calleeCfi({"--callee-cfi"}, + " --callee-cfi \tcallee cfi message will be generated\n" + " --no-callee-cfi \tcallee cfi message will not be generated\n", + {cgCategory}, maplecl::DisableWith("--no-callee-cfi")); + +maplecl::Option printFunc({"--print-func"}, + " --print-func\n" + " --no-print-func\n", + {cgCategory}, maplecl::DisableWith("--no-print-func")); + +maplecl::Option cyclePatternList({"--cycle-pattern-list"}, + " --cycle-pattern-list \tFor generating cycle pattern meta\n" + " \t--cycle-pattern-list=list_file\n", + {cgCategory}); + +maplecl::Option duplicateAsmList( + {"--duplicate_asm_list"}, + " --duplicate_asm_list \tDuplicate asm functions to delete plt call\n" + " \t--duplicate_asm_list=list_file\n", + {cgCategory}); + +maplecl::Option duplicateAsmList2({"--duplicate_asm_list2"}, + " --duplicate_asm_list2" + " \tDuplicate more asm functions to delete plt call\n" + " \t--duplicate_asm_list2=list_file\n", + {cgCategory}); + +maplecl::Option blockMarker({"--block-marker"}, + " --block-marker" + " \tEmit block marker symbols in emitted assembly files\n", + {cgCategory}); + +maplecl::Option soeCheck({"--soe-check"}, + " --soe-check \tInsert a soe check instruction[default off]\n", + {cgCategory}); + +maplecl::Option checkArraystore({"--check-arraystore"}, + " --check-arraystore \tcheck arraystore exception[default off]\n" + " --no-check-arraystore\n", + {cgCategory}, maplecl::DisableWith("--no-check-arraystore")); + +maplecl::Option debugSchedule({"--debug-schedule"}, + " --debug-schedule \tdump scheduling information\n" + " --no-debug-schedule\n", + {cgCategory}, maplecl::DisableWith("--no-debug-schedule")); + +maplecl::Option bruteforceSchedule({"--bruteforce-schedule"}, + " --bruteforce-schedule \tdo brute force schedule\n" + " --no-bruteforce-schedule\n", + {cgCategory}, maplecl::DisableWith("--no-bruteforce-schedule")); + +maplecl::Option simulateSchedule({"--simulate-schedule"}, + " --simulate-schedule \tdo simulate schedule\n" + " --no-simulate-schedule\n", + {cgCategory}, maplecl::DisableWith("--no-simulate-schedule")); + +maplecl::Option crossLoc({"--cross-loc"}, + " --cross-loc \tcross loc insn schedule\n" + " --no-cross-loc\n", + {cgCategory}, maplecl::DisableWith("--no-cross-loc")); + +maplecl::Option floatAbi({"--float-abi"}, + " --float-abi=name \tPrint the abi type.\n" + " \tname=hard: abi-hard (Default)\n" + " \tname=soft: abi-soft\n" + " \tname=softfp: abi-softfp\n", + {cgCategory}); + +maplecl::Option filetype({"--filetype"}, + " --filetype=name \tChoose a file type.\n" + " \tname=asm: Emit an assembly file (Default)\n" + " \tname=obj: Emit an object file\n" + " \tname=null: not support yet\n", + {cgCategory}); + +maplecl::Option longCalls({"--long-calls"}, + " --long-calls \tgenerate long call\n" + " --no-long-calls\n", + {cgCategory}, maplecl::DisableWith("--no-long-calls")); + +maplecl::Option functionSections({"--function-sections"}, + " --function-sections \t \n" + " --no-function-sections\n", + {cgCategory}, maplecl::DisableWith("--no-function-sections")); + +maplecl::Option omitFramePointer({"--omit-frame-pointer", "-fomit-frame-pointer"}, + " --omit-frame-pointer \t do not use frame pointer \n" + " --no-omit-frame-pointer\n", + {cgCategory, driverCategory}, + maplecl::DisableEvery({"--no-omit-frame-pointer", "-fno-omit-frame-pointer"})); + +maplecl::Option fastMath({"--fast-math"}, + " --fast-math \tPerform fast math\n" + " --no-fast-math\n", + {cgCategory}, maplecl::DisableWith("--no-fast-math")); + +maplecl::Option tailcall({"--tailcall"}, + " --tailcall \tDo tail call optimization\n" + " --no-tailcall\n", + {cgCategory}, maplecl::DisableWith("--no-tailcall")); + +maplecl::Option alignAnalysis({"--align-analysis"}, + " --align-analysis \tPerform alignanalysis\n" + " --no-align-analysis\n", + {cgCategory}, maplecl::DisableWith("--no-align-analysis")); + +maplecl::Option cgSsa({"--cg-ssa"}, + " --cg-ssa \tPerform cg ssa\n" + " --no-cg-ssa\n", + {cgCategory}, maplecl::DisableWith("--no-cg-ssa")); + +maplecl::Option common({"--common", "-fcommon"}, + " --common \t \n" + " --no-common\n", + {cgCategory, driverCategory}, maplecl::DisableEvery({"--no-common", "-fno-common"})); + +maplecl::Option condbrAlign({"--condbr-align"}, + " --condbr-align \tPerform condbr align\n" + " --no-condbr-align\n", + {cgCategory}, maplecl::DisableWith("--no-condbr-align")); + +maplecl::Option alignMinBbSize({"--align-min-bb-size"}, + " --align-min-bb-size=NUM" + " \tO2 Minimum bb size for alignment unit:byte\n", + {cgCategory}); + +maplecl::Option alignMaxBbSize({"--align-max-bb-size"}, + " --align-max-bb-size=NUM" + " \tO2 Maximum bb size for alignment unit:byte\n", + {cgCategory}); + +maplecl::Option loopAlignPow( + {"--loop-align-pow"}, " --loop-align-pow=NUM \tO2 loop bb align pow (NUM == 0, no loop-align)\n", + {cgCategory}); + +maplecl::Option jumpAlignPow( + {"--jump-align-pow"}, " --jump-align-pow=NUM \tO2 jump bb align pow (NUM == 0, no jump-align)\n", + {cgCategory}); + +maplecl::Option funcAlignPow( + {"--func-align-pow"}, " --func-align-pow=NUM \tO2 func bb align pow (NUM == 0, no func-align)\n", + {cgCategory}); + +} // namespace opts::cg diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_phasemanager.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_phasemanager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..354467b3af1ea4306cb997bb36214b8415b07a0a --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_phasemanager.cpp @@ -0,0 +1,602 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cg_phasemanager.h" +#include +#include +#include "cg_option.h" +#include "args.h" +#include "label_creation.h" +#include "isel.h" +#include "offset_adjust.h" +#include "alignment.h" +#include "yieldpoint.h" +#include "emit.h" +#include "reg_alloc.h" +#if TARGAARCH64 +#include "aarch64_emitter.h" +#include "aarch64_obj_emitter.h" +#include "aarch64_cg.h" +#elif TARGRISCV64 +#include "riscv64_emitter.h" +#elif TARGX86_64 +#include "x64_cg.h" +#include "x64_emitter.h" +#include "string_utils.h" +#endif + +namespace maplebe { +#define JAVALANG (module.IsJavaModule()) +#define CLANG (module.GetSrcLang() == kSrcLangC) + +#define RELEASE(pointer) \ + do { \ + if (pointer != nullptr) { \ + delete pointer; \ + pointer = nullptr; \ + } \ + } while (0) + +namespace { + +void DumpMIRFunc(MIRFunction &func, const char *msg, bool printAlways = false, const char *extraMsg = nullptr) +{ + bool dumpAll = (CGOptions::GetDumpPhases().find("*") != CGOptions::GetDumpPhases().end()); + bool dumpFunc = CGOptions::FuncFilter(func.GetName()); + + if (printAlways || (dumpAll && dumpFunc)) { + LogInfo::MapleLogger() << msg << '\n'; + func.Dump(); + + if (extraMsg) { + LogInfo::MapleLogger() << extraMsg << '\n'; + } + } +} + +} /* anonymous namespace */ + +void CgFuncPM::GenerateOutPutFile(MIRModule &m) +{ + CHECK_FATAL(cg != nullptr, "cg is null"); + CHECK_FATAL(cg->GetEmitter(), "emitter is null"); +#if TARGX86_64 + assembler::Assembler &assm = static_cast(*cg->GetEmitter()).GetAssembler(); + if (!cgOptions->SuppressFileInfo()) { + assm.InitialFileInfo(m.GetInputFileName()); + } + // TODO: Dwarf info + if (cgOptions->WithDwarf()) { + assm.EmitDIHeader(); + } +#else + if (CGOptions::GetEmitFileType() == CGOptions::kAsm) { + if (!cgOptions->SuppressFileInfo()) { + cg->GetEmitter()->EmitFileInfo(m.GetInputFileName()); + } + if (cgOptions->WithDwarf()) { + cg->GetEmitter()->EmitDIHeader(); + } + } +#endif + InitProfile(m); +} + +bool CgFuncPM::FuncLevelRun(CGFunc &cgFunc, AnalysisDataManager &serialADM) +{ + bool changed = false; + for (size_t i = 0; i < phasesSequence.size(); ++i) { + SolveSkipFrom(CGOptions::GetSkipFromPhase(), i); + const MaplePhaseInfo *curPhase = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(phasesSequence[i]); + if (!IsQuiet()) { + LogInfo::MapleLogger() << "---Run MplCG " << (curPhase->IsAnalysis() ? "analysis" : "transform") + << " Phase [ " << curPhase->PhaseName() << " ]---\n"; + } + if (curPhase->IsAnalysis()) { + changed |= RunAnalysisPhase, CGFunc>(*curPhase, serialADM, cgFunc); + } else { + changed |= RunTransformPhase, CGFunc>(*curPhase, serialADM, cgFunc); + DumpFuncCGIR(cgFunc, curPhase->PhaseName()); + } + SolveSkipAfter(CGOptions::GetSkipAfterPhase(), i); + } + return changed; +} + +void CgFuncPM::PostOutPut(MIRModule &m) +{ +#if TARGX86_64 + X64Emitter *x64Emitter = static_cast(cg->GetEmitter()); + assembler::Assembler &assm = x64Emitter->GetAssembler(); + if (cgOptions->WithDwarf()) { + assm.EmitDIFooter(); + } + x64Emitter->EmitGlobalVariable(*cg); + x64Emitter->EmitDebugInfo(*cg); + assm.FinalizeFileInfo(); + assm.CloseOutput(); +#else + if (CGOptions::GetEmitFileType() == CGOptions::kAsm) { + cg->GetEmitter()->EmitHugeSoRoutines(true); + if (cgOptions->WithDwarf()) { + cg->GetEmitter()->EmitDIFooter(); + } + /* Emit global info */ + EmitGlobalInfo(m); + } else { + cg->GetEmitter()->Finish(); + cg->GetEmitter()->CloseOutput(); + } +#endif +} + +void MarkUsedStaticSymbol(const StIdx &symbolIdx); +std::map visitedSym; + +void CollectStaticSymbolInVar(MIRConst *mirConst) +{ + if (mirConst->GetKind() == kConstAddrof) { + auto *addrSymbol = static_cast(mirConst); + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(addrSymbol->GetSymbolIndex().Idx(), true); + if (sym != nullptr) { + MarkUsedStaticSymbol(sym->GetStIdx()); + } + } else if (mirConst->GetKind() == kConstAggConst) { + auto &constVec = static_cast(mirConst)->GetConstVec(); + for (auto &cst : constVec) { + CollectStaticSymbolInVar(cst); + } + } +} + +void MarkUsedStaticSymbol(const StIdx &symbolIdx) +{ + if (!symbolIdx.IsGlobal()) { + return; + } + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolIdx.Idx(), true); + if (symbol == nullptr) { + return; + } + if (visitedSym[symbolIdx]) { + return; + } else { + visitedSym[symbolIdx] = true; + } + symbol->ResetIsDeleted(); + if (symbol->IsConst()) { + auto *konst = symbol->GetKonst(); + CollectStaticSymbolInVar(konst); + } +} + +void RecursiveMarkUsedStaticSymbol(const BaseNode *baseNode) +{ + if (baseNode == nullptr) { + return; + } + Opcode op = baseNode->GetOpCode(); + switch (op) { + case OP_block: { + const BlockNode *blk = static_cast(baseNode); + for (auto &stmt : blk->GetStmtNodes()) { + RecursiveMarkUsedStaticSymbol(&stmt); + } + break; + } + case OP_dassign: { + const DassignNode *dassignNode = static_cast(baseNode); + MarkUsedStaticSymbol(dassignNode->GetStIdx()); + break; + } + case OP_addrof: + case OP_addrofoff: + case OP_dread: { + const AddrofNode *dreadNode = static_cast(baseNode); + MarkUsedStaticSymbol(dreadNode->GetStIdx()); + break; + } + default: { + break; + } + } + for (size_t i = 0; i < baseNode->NumOpnds(); ++i) { + RecursiveMarkUsedStaticSymbol(baseNode->Opnd(i)); + } +} + +void CollectStaticSymbolInFunction(MIRFunction &func) +{ + RecursiveMarkUsedStaticSymbol(func.GetBody()); +} + +void CgFuncPM::SweepUnusedStaticSymbol(MIRModule &m) +{ + if (!m.IsCModule()) { + return; + } + size_t size = GlobalTables::GetGsymTable().GetSymbolTableSize(); + for (size_t i = 0; i < size; ++i) { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(static_cast(i)); + if (mirSymbol != nullptr && (mirSymbol->GetSKind() == kStVar || mirSymbol->GetSKind() == kStConst) && + (mirSymbol->GetStorageClass() == kScFstatic || mirSymbol->GetStorageClass() == kScPstatic)) { + mirSymbol->SetIsDeleted(); + } + } + + visitedSym.clear(); + /* scan all funtions */ + std::vector &funcTable = GlobalTables::GetFunctionTable().GetFuncTable(); + /* don't optimize this loop to iterator or range-base loop + * because AddCallGraphNode(mirFunc) will change GlobalTables::GetFunctionTable().GetFuncTable() + */ + for (size_t index = 0; index < funcTable.size(); ++index) { + MIRFunction *mirFunc = funcTable.at(index); + if (mirFunc == nullptr || mirFunc->GetBody() == nullptr) { + continue; + } + m.SetCurFunction(mirFunc); + CollectStaticSymbolInFunction(*mirFunc); + /* scan function symbol declaration + * find addrof static const */ + MIRSymbolTable *funcSymTab = mirFunc->GetSymTab(); + if (funcSymTab) { + size_t localSymSize = funcSymTab->GetSymbolTableSize(); + for (uint32 i = 0; i < localSymSize; ++i) { + MIRSymbol *st = funcSymTab->GetSymbolFromStIdx(i); + if (st && st->IsConst()) { + MIRConst *mirConst = st->GetKonst(); + CollectStaticSymbolInVar(mirConst); + } + } + } + } + /* scan global symbol declaration + * find addrof static const */ + auto &symbolSet = m.GetSymbolSet(); + for (auto sit = symbolSet.begin(); sit != symbolSet.end(); ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx(sit->Idx(), true); + if (s->IsConst()) { + MIRConst *mirConst = s->GetKonst(); + CollectStaticSymbolInVar(mirConst); + } + } +} + +/* =================== new phase manager =================== */ +#ifdef RA_PERF_ANALYSIS +extern void printLSRATime(); +extern void printRATime(); +#endif + +bool CgFuncPM::PhaseRun(MIRModule &m) +{ + CreateCGAndBeCommon(m); + bool changed = false; + /* reserve static symbol for debugging */ + if (!cgOptions->WithDwarf()) { + SweepUnusedStaticSymbol(m); + } + if (cgOptions->IsRunCG()) { + GenerateOutPutFile(m); + + /* Run the cg optimizations phases */ + PrepareLower(m); + + uint32 countFuncId = 0; + unsigned long rangeNum = 0; + + auto userDefinedOptLevel = cgOptions->GetOptimizeLevel(); + cg->EnrollTargetPhases(this); + + auto admMempool = AllocateMemPoolInPhaseManager("cg phase manager's analysis data manager mempool"); + auto *serialADM = GetManagerMemPool()->New(*(admMempool.get())); + for (auto it = m.GetFunctionList().begin(); it != m.GetFunctionList().end(); ++it) { + DEBUG_ASSERT(serialADM->CheckAnalysisInfoEmpty(), "clean adm before function run"); + MIRFunction *mirFunc = *it; + if (mirFunc->GetBody() == nullptr) { + continue; + } + if (userDefinedOptLevel == CGOptions::kLevel2 && m.HasPartO2List()) { + if (m.IsInPartO2List(mirFunc->GetNameStrIdx())) { + cgOptions->EnableO2(); + } else { + cgOptions->EnableO0(); + } + ClearAllPhases(); + cg->EnrollTargetPhases(this); + cg->UpdateCGOptions(*cgOptions); + Globals::GetInstance()->SetOptimLevel(cgOptions->GetOptimizeLevel()); + } + if (!IsQuiet()) { + LogInfo::MapleLogger() << ">>>>>>>>>>>>>>>>>>>>>>>>>>>>> Optimizing Function < " << mirFunc->GetName() + << " id=" << mirFunc->GetPuidxOrigin() << " >---\n"; + } + /* LowerIR. */ + m.SetCurFunction(mirFunc); + if (cg->DoConstFold()) { + DumpMIRFunc(*mirFunc, "************* before ConstantFold **************"); + ConstantFold cf(m); + (void)cf.Simplify(mirFunc->GetBody()); + } + + if (m.GetFlavor() != MIRFlavor::kFlavorLmbc) { + DoFuncCGLower(m, *mirFunc); + } + /* create CGFunc */ + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(mirFunc->GetStIdx().Idx()); + auto funcMp = std::make_unique(memPoolCtrler, funcSt->GetName()); + auto stackMp = std::make_unique(funcMp->GetCtrler(), ""); + MapleAllocator funcScopeAllocator(funcMp.get()); + mirFunc->SetPuidxOrigin(++countFuncId); + CGFunc *cgFunc = + cg->CreateCGFunc(m, *mirFunc, *beCommon, *funcMp, *stackMp, funcScopeAllocator, countFuncId); + CHECK_FATAL(cgFunc != nullptr, "Create CG Function failed in cg_phase_manager"); + CG::SetCurCGFunc(*cgFunc); + + if (cgOptions->WithDwarf()) { + cgFunc->SetDebugInfo(m.GetDbgInfo()); + } + /* Run the cg optimizations phases. */ + if (CGOptions::UseRange() && rangeNum >= CGOptions::GetRangeBegin() && + rangeNum <= CGOptions::GetRangeEnd()) { + CGOptions::EnableInRange(); + } + changed = FuncLevelRun(*cgFunc, *serialADM); + /* Delete mempool. */ + mirFunc->ReleaseCodeMemory(); + ++rangeNum; + CGOptions::DisableInRange(); + } + PostOutPut(m); +#ifdef RA_PERF_ANALYSIS + if (cgOptions->IsEnableTimePhases()) { + printLSRATime(); + printRATime(); + } +#endif + } else { + LogInfo::MapleLogger(kLlErr) << "Skipped generating .s because -no-cg is given" << '\n'; + } + RELEASE(cg); + RELEASE(beCommon); + return changed; +} + +void CgFuncPM::DumpFuncCGIR(const CGFunc &f, const std::string &phaseName) const +{ + if (CGOptions::DumpPhase(phaseName) && CGOptions::FuncFilter(f.GetName())) { + LogInfo::MapleLogger() << "\n******** CG IR After " << phaseName << ": *********\n"; + f.DumpCGIR(); + } +} + +void CgFuncPM::EmitGlobalInfo(MIRModule &m) const +{ + EmitDuplicatedAsmFunc(m); + EmitFastFuncs(m); + if (cgOptions->IsGenerateObjectMap()) { + cg->GenerateObjectMaps(*beCommon); + } + cg->GetEmitter()->EmitGlobalVariable(); + EmitDebugInfo(m); + cg->GetEmitter()->CloseOutput(); +} + +void CgFuncPM::InitProfile(MIRModule &m) const +{ + if (!CGOptions::IsProfileDataEmpty()) { + uint32 dexNameIdx = m.GetFileinfo(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("INFO_filename")); + const std::string &dexName = GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(dexNameIdx)); + bool deCompressSucc = m.GetProfile().DeCompress(CGOptions::GetProfileData(), dexName); + if (!deCompressSucc) { + LogInfo::MapleLogger() << "WARN: DeCompress() " << CGOptions::GetProfileData() << "failed in mplcg()\n"; + } + } +} + +void CgFuncPM::CreateCGAndBeCommon(MIRModule &m) +{ + DEBUG_ASSERT(cgOptions != nullptr, "New cg phase manager running FAILED :: cgOptions unset"); + auto outputFileName = m.GetOutputFileName(); +#if TARGAARCH64 || TARGRISCV64 + cg = new AArch64CG(m, *cgOptions, cgOptions->GetEHExclusiveFunctionNameVec(), CGOptions::GetCyclePatternMap()); + if (CGOptions::GetEmitFileType() == CGOptions::kAsm) { + cg->SetEmitter(*m.GetMemPool()->New(*cg, outputFileName)); + } else { + outputFileName = outputFileName.replace(outputFileName.length() - 1, 1, 1, 'o'); + cg->SetEmitter(*m.GetMemPool()->New(*cg, outputFileName)); + } +#elif TARGARM32 + cg = new Arm32CG(m, *cgOptions, cgOptions->GetEHExclusiveFunctionNameVec(), CGOptions::GetCyclePatternMap()); + cg->SetEmitter(*m.GetMemPool()->New(*cg, outputFileName)); +#elif TARGX86_64 + cg = new X64CG(m, *cgOptions); + if (CGOptions::GetEmitFileType() == CGOptions::kAsm) { + assembler::Assembler *assembler = new assembler::AsmAssembler(outputFileName); + cg->SetEmitter(*m.GetMemPool()->New(*cg, *assembler)); + } else { + outputFileName = outputFileName.replace(outputFileName.length() - 1, 1, 1, 'o'); + assembler::Assembler *assembler = new assembler::ElfAssembler(outputFileName); + cg->SetEmitter(*m.GetMemPool()->New(*cg, *assembler)); + } +#else +#error "unknown platform" +#endif + + /* + * Must be done before creating any BECommon instances. + * + * BECommon, when constructed, will calculate the type, size and align of all types. As a side effect, it will also + * lower ptr and ref types into a64. That will drop the information of what a ptr or ref points to. + * + * All metadata generation passes which depend on the pointed-to type must be done here. + */ + cg->GenPrimordialObjectList(m.GetBaseName()); + /* We initialize a couple of BECommon's tables using the size information of GlobalTables.type_table_. + * So, BECommon must be allocated after all the parsing is done and user-defined types are all acounted. + */ + beCommon = new BECommon(m); + Globals::GetInstance()->SetBECommon(*beCommon); + Globals::GetInstance()->SetTarget(*cg); + + /* If a metadata generation pass depends on object layout it must be done after creating BECommon. */ + cg->GenExtraTypeMetadata(cgOptions->GetClassListFile(), m.GetBaseName()); + + if (cg->NeedInsertInstrumentationFunction()) { + CHECK_FATAL(cgOptions->IsInsertCall(), "handling of --insert-call is not correct"); + cg->SetInstrumentationFunction(cgOptions->GetInstrumentationFunction()); + } +#if TARGAARCH64 + if (!m.IsCModule()) { + CGOptions::EnableFramePointer(); + } +#endif +} + +void CgFuncPM::PrepareLower(MIRModule &m) +{ + mirLower = GetManagerMemPool()->New(m, nullptr); + mirLower->Init(); + cgLower = + GetManagerMemPool()->New(m, *beCommon, cg->GenerateExceptionHandlingCode(), cg->GenerateVerboseCG()); + cgLower->RegisterBuiltIns(); + if (m.IsJavaModule()) { + cgLower->InitArrayClassCacheTableIndex(); + } + cgLower->RegisterExternalLibraryFunctions(); + cgLower->SetCheckLoadStore(CGOptions::IsCheckArrayStore()); + if (cg->IsStackProtectorStrong() || cg->IsStackProtectorAll() || m.HasPartO2List()) { + cg->AddStackGuardvar(); + } +} + +void CgFuncPM::DoFuncCGLower(const MIRModule &m, MIRFunction &mirFunc) +{ + if (m.GetFlavor() <= kFeProduced) { + mirLower->SetLowerCG(); + mirLower->SetMirFunc(&mirFunc); + + DumpMIRFunc(mirFunc, "************* before MIRLowerer **************"); + mirLower->LowerFunc(mirFunc); + } + + bool isNotQuiet = !CGOptions::IsQuiet(); + DumpMIRFunc(mirFunc, "************* before CGLowerer **************", isNotQuiet); + + cgLower->LowerFunc(mirFunc); + + DumpMIRFunc(mirFunc, "************* after CGLowerer **************", isNotQuiet, + "************* end CGLowerer **************"); +} + +void CgFuncPM::EmitDuplicatedAsmFunc(MIRModule &m) const +{ + if (CGOptions::IsDuplicateAsmFileEmpty()) { + return; + } + + std::ifstream duplicateAsmFileFD(CGOptions::GetDuplicateAsmFile()); + + if (!duplicateAsmFileFD.is_open()) { + duplicateAsmFileFD.close(); + ERR(kLncErr, " %s open failed!", CGOptions::GetDuplicateAsmFile().c_str()); + return; + } + std::string contend; + bool onlyForFramework = false; + bool isFramework = IsFramework(m); + + while (getline(duplicateAsmFileFD, contend)) { + if (!contend.compare("#Libframework_start")) { + onlyForFramework = true; + } + + if (!contend.compare("#Libframework_end")) { + onlyForFramework = false; + } + + if (onlyForFramework && !isFramework) { + continue; + } + + (void)cg->GetEmitter()->Emit(contend + "\n"); + } + duplicateAsmFileFD.close(); +} + +void CgFuncPM::EmitFastFuncs(const MIRModule &m) const +{ + if (CGOptions::IsFastFuncsAsmFileEmpty() || !(m.IsJavaModule())) { + return; + } + + struct stat buffer; + if (stat(CGOptions::GetFastFuncsAsmFile().c_str(), &buffer) != 0) { + return; + } + + std::ifstream fastFuncsAsmFileFD(CGOptions::GetFastFuncsAsmFile()); + if (fastFuncsAsmFileFD.is_open()) { + std::string contend; + (void)cg->GetEmitter()->Emit("#define ENABLE_LOCAL_FAST_FUNCS 1\n"); + + while (getline(fastFuncsAsmFileFD, contend)) { + (void)cg->GetEmitter()->Emit(contend + "\n"); + } + } + fastFuncsAsmFileFD.close(); +} + +void CgFuncPM::EmitDebugInfo(const MIRModule &m) const +{ + if (!cgOptions->WithDwarf()) { + return; + } + cg->GetEmitter()->SetupDBGInfo(m.GetDbgInfo()); + cg->GetEmitter()->EmitDIHeaderFileInfo(); + cg->GetEmitter()->EmitDIDebugInfoSection(m.GetDbgInfo()); + cg->GetEmitter()->EmitDIDebugAbbrevSection(m.GetDbgInfo()); + cg->GetEmitter()->EmitDIDebugARangesSection(); + cg->GetEmitter()->EmitDIDebugRangesSection(); + cg->GetEmitter()->EmitDIDebugLineSection(); + cg->GetEmitter()->EmitDIDebugStrSection(); +} + +bool CgFuncPM::IsFramework(MIRModule &m) const +{ + auto &funcList = m.GetFunctionList(); + for (auto it = funcList.begin(); it != funcList.end(); ++it) { + MIRFunction *mirFunc = *it; + DEBUG_ASSERT(mirFunc != nullptr, "nullptr check"); + if (mirFunc->GetBody() != nullptr && + mirFunc->GetName() == "Landroid_2Fos_2FParcel_3B_7CnativeWriteString_7C_28JLjava_2Flang_2FString_3B_29V") { + return true; + } + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgFuncPM, cgFuncPhaseManager) +/* register codegen common phases */ +MAPLE_TRANSFORM_PHASE_REGISTER(CgLayoutFrame, layoutstackframe) +MAPLE_TRANSFORM_PHASE_REGISTER(CgCreateLabel, createstartendlabel) +MAPLE_TRANSFORM_PHASE_REGISTER(InstructionSelector, instructionselector) +MAPLE_TRANSFORM_PHASE_REGISTER(CgMoveRegArgs, moveargs) +MAPLE_TRANSFORM_PHASE_REGISTER(CgRegAlloc, regalloc) +MAPLE_TRANSFORM_PHASE_REGISTER(CgAlignAnalysis, alignanalysis) +MAPLE_TRANSFORM_PHASE_REGISTER(CgFrameFinalize, framefinalize) +MAPLE_TRANSFORM_PHASE_REGISTER(CgYieldPointInsertion, yieldpoint) +MAPLE_TRANSFORM_PHASE_REGISTER(CgGenProEpiLog, generateproepilog) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_phi_elimination.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_phi_elimination.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3d57b335df86eb6e6e4d4b0f5cc02b38a19efea1 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_phi_elimination.cpp @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cg_phi_elimination.h" +#include "cg.h" +#include "cgbb.h" + +namespace maplebe { +void PhiEliminate::TranslateTSSAToCSSA() +{ + FOR_ALL_BB(bb, cgFunc) { + eliminatedBB.emplace(bb->GetId()); + for (auto phiInsnIt : bb->GetPhiInsns()) { + /* Method I create a temp move for phi-node */ + auto &destReg = static_cast(phiInsnIt.second->GetOperand(kInsnFirstOpnd)); + RegOperand &tempMovDest = cgFunc->GetOrCreateVirtualRegisterOperand(CreateTempRegForCSSA(destReg)); + auto &phiList = static_cast(phiInsnIt.second->GetOperand(kInsnSecondOpnd)); + for (auto phiOpndIt : phiList.GetOperands()) { + uint32 fBBId = phiOpndIt.first; + DEBUG_ASSERT(fBBId != 0, "GetFromBBID = 0"); +#if DEBUG + bool find = false; + for (auto predBB : bb->GetPreds()) { + if (predBB->GetId() == fBBId) { + find = true; + } + } + CHECK_FATAL(find, "dont exited pred for phi-node"); +#endif + PlaceMovInPredBB(fBBId, CreateMov(tempMovDest, *(phiOpndIt.second))); + } + Insn &movInsn = CreateMov(destReg, tempMovDest); + bb->ReplaceInsn(*phiInsnIt.second, movInsn); + } + } + + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + CHECK_FATAL(eliminatedBB.count(bb->GetId()), "still have phi"); + if (!insn->IsMachineInstruction()) { + continue; + } + ReCreateRegOperand(*insn); + bb->GetPhiInsns().clear(); + } + } + UpdateRematInfo(); + cgFunc->SetSSAvRegCount(0); +} + +void PhiEliminate::UpdateRematInfo() +{ + if (CGOptions::GetRematLevel() > 0) { + cgFunc->UpdateAllRegisterVregMapping(remateInfoAfterSSA); + } +} + +void PhiEliminate::PlaceMovInPredBB(uint32 predBBId, Insn &movInsn) +{ + BB *predBB = cgFunc->GetBBFromID(predBBId); + DEBUG_ASSERT(movInsn.GetOperand(kInsnSecondOpnd).IsRegister(), "unexpect operand"); + if (predBB->GetKind() == BB::kBBFallthru) { + predBB->AppendInsn(movInsn); + } else { + AppendMovAfterLastVregDef(*predBB, movInsn); + } +} + +regno_t PhiEliminate::GetAndIncreaseTempRegNO() +{ + while (GetSSAInfo()->GetAllSSAOperands().count(tempRegNO)) { + tempRegNO++; + } + regno_t ori = tempRegNO; + tempRegNO++; + return ori; +} + +RegOperand *PhiEliminate::MakeRoomForNoDefVreg(RegOperand &conflictReg) +{ + regno_t conflictVregNO = conflictReg.GetRegisterNumber(); + auto rVregIt = replaceVreg.find(conflictVregNO); + if (rVregIt != replaceVreg.end()) { + return rVregIt->second; + } else { + RegOperand *regForRecreate = &CreateTempRegForCSSA(conflictReg); + (void)replaceVreg.emplace(std::pair(conflictVregNO, regForRecreate)); + return regForRecreate; + } +} + +void PhiEliminate::RecordRematInfo(regno_t vRegNO, PregIdx pIdx) +{ + if (remateInfoAfterSSA.count(vRegNO)) { + if (remateInfoAfterSSA[vRegNO] != pIdx) { + remateInfoAfterSSA.erase(vRegNO); + } + } else { + (void)remateInfoAfterSSA.emplace(std::pair(vRegNO, pIdx)); + } +} + +bool CgPhiElimination::PhaseRun(maplebe::CGFunc &f) +{ + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + PhiEliminate *pe = f.GetCG()->CreatePhiElimintor(*GetPhaseMemPool(), f, *ssaInfo); + pe->TranslateTSSAToCSSA(); + return false; +} +void CgPhiElimination::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgPhiElimination, cgphielimination) +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_pre.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_pre.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6c4d7fe83c22be5ddec3756fa55e0e4bb2ad7237 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_pre.cpp @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cg_pre.h" +#include "cg_dominance.h" +#include "aarch64_cg.h" + +namespace maplebe { +/* Implement PRE in cgir */ +void CGPre::ResetDS(CgPhiOcc *phiOcc) +{ + if (!phiOcc->IsDownSafe()) { + return; + } + + phiOcc->SetIsDownSafe(false); + for (auto *phiOpnd : phiOcc->GetPhiOpnds()) { + auto *defOcc = phiOpnd->GetDef(); + if (defOcc != nullptr && defOcc->GetOccType() == kOccPhiocc) { + ResetDS(static_cast(defOcc)); + } + } +} + +void CGPre::ComputeDS() +{ + for (auto phiIt = phiOccs.rbegin(); phiIt != phiOccs.rend(); ++phiIt) { + auto *phiOcc = *phiIt; + if (phiOcc->IsDownSafe()) { + continue; + } + for (auto *phiOpnd : phiOcc->GetPhiOpnds()) { + if (phiOpnd->HasRealUse()) { + continue; + } + auto *defOcc = phiOpnd->GetDef(); + if (defOcc != nullptr && defOcc->GetOccType() == kOccPhiocc) { + ResetDS(static_cast(defOcc)); + } + } + } +} + +/* based on ssapre->workCand's realOccs and dfPhiDfns (which now privides all + the inserted phis), create the phi and phiOpnd occ nodes; link them all up in + order of dt_preorder in ssapre->allOccs; the phi occ nodes are in addition + provided in order of dt_preorder in ssapre->phiOccs */ +void CGPre::CreateSortedOccs() +{ + // merge varPhiDfns to dfPhiDfns + dfPhiDfns.insert(varPhiDfns.begin(), varPhiDfns.end()); + + auto comparator = [this](const CgPhiOpndOcc *occA, const CgPhiOpndOcc *occB) -> bool { + return dom->GetDtDfnItem(occA->GetBB()->GetId()) < dom->GetDtDfnItem(occB->GetBB()->GetId()); + }; + + std::vector phiOpnds; + for (auto dfn : dfPhiDfns) { + uint32 bbId = dom->GetDtPreOrderItem(dfn); + BB *bb = GetBB(bbId); + auto *phiOcc = perCandMemPool->New(*bb, workCand->GetTheOperand(), perCandAllocator); + phiOccs.push_back(phiOcc); + + for (BB *pred : bb->GetPreds()) { + auto phiOpnd = perCandMemPool->New(pred, workCand->GetTheOperand(), phiOcc); + phiOpnds.push_back(phiOpnd); + phiOcc->AddPhiOpnd(*phiOpnd); + phiOpnd->SetPhiOcc(*phiOcc); + } + } + std::sort(phiOpnds.begin(), phiOpnds.end(), comparator); + + auto realOccIt = workCand->GetRealOccs().begin(); + auto exitOccIt = exitOccs.begin(); + auto phiIt = phiOccs.begin(); + auto phiOpndIt = phiOpnds.begin(); + + CgOccur *nextRealOcc = nullptr; + if (realOccIt != workCand->GetRealOccs().end()) { + nextRealOcc = *realOccIt; + } + + CgOccur *nextExitOcc = nullptr; + if (exitOccIt != exitOccs.end()) { + nextExitOcc = *exitOccIt; + } + + CgPhiOcc *nextPhiOcc = nullptr; + if (phiIt != phiOccs.end()) { + nextPhiOcc = *phiIt; + } + + CgPhiOpndOcc *nextPhiOpndOcc = nullptr; + if (phiOpndIt != phiOpnds.end()) { + nextPhiOpndOcc = *phiOpndIt; + } + + CgOccur *pickedOcc; // the next picked occ in order of preorder traveral of dominator tree + do { + pickedOcc = nullptr; + // the 4 kinds of occ must be checked in this order, so it will be right + // if more than 1 has the same dfn + if (nextPhiOcc != nullptr) { + pickedOcc = nextPhiOcc; + } + if (nextRealOcc != nullptr && (pickedOcc == nullptr || dom->GetDtDfnItem(nextRealOcc->GetBB()->GetId()) < + dom->GetDtDfnItem(pickedOcc->GetBB()->GetId()))) { + pickedOcc = nextRealOcc; + } + if (nextExitOcc != nullptr && (pickedOcc == nullptr || dom->GetDtDfnItem(nextExitOcc->GetBB()->GetId()) < + dom->GetDtDfnItem(pickedOcc->GetBB()->GetId()))) { + pickedOcc = nextExitOcc; + } + if (nextPhiOpndOcc != nullptr && (pickedOcc == nullptr || dom->GetDtDfnItem(nextPhiOpndOcc->GetBB()->GetId()) < + dom->GetDtDfnItem(pickedOcc->GetBB()->GetId()))) { + pickedOcc = nextPhiOpndOcc; + } + if (pickedOcc != nullptr) { + allOccs.push_back(pickedOcc); + switch (pickedOcc->GetOccType()) { + case kOccReal: + case kOccUse: + case kOccDef: + case kOccStore: + case kOccMembar: { + ++realOccIt; + if (realOccIt != workCand->GetRealOccs().end()) { + nextRealOcc = *realOccIt; + } else { + nextRealOcc = nullptr; + } + break; + } + case kOccExit: { + ++exitOccIt; + if (exitOccIt != exitOccs.end()) { + nextExitOcc = *exitOccIt; + } else { + nextExitOcc = nullptr; + } + break; + } + case kOccPhiocc: { + ++phiIt; + if (phiIt != phiOccs.end()) { + nextPhiOcc = *phiIt; + } else { + nextPhiOcc = nullptr; + } + break; + } + case kOccPhiopnd: { + ++phiOpndIt; + if (phiOpndIt != phiOpnds.end()) { + nextPhiOpndOcc = *phiOpndIt; + } else { + nextPhiOpndOcc = nullptr; + } + break; + } + default: + DEBUG_ASSERT(false, "CreateSortedOccs: unexpected occty"); + break; + } + } + } while (pickedOcc != nullptr); +} + +CgOccur *CGPre::CreateRealOcc(Insn &insn, Operand &opnd, OccType occType) +{ + uint64 hashIdx = PreWorkCandHashTable::ComputeWorkCandHashIndex(opnd); + PreWorkCand *wkCand = preWorkCandHashTable.GetWorkcandFromIndex(hashIdx); + while (wkCand != nullptr) { + Operand *currOpnd = wkCand->GetTheOperand(); + DEBUG_ASSERT(currOpnd != nullptr, "CreateRealOcc: found workcand with theMeExpr as nullptr"); + if (currOpnd == &opnd) { + break; + } + wkCand = static_cast(wkCand->GetNext()); + } + + CgOccur *newOcc = nullptr; + switch (occType) { + case kOccDef: + newOcc = ssaPreMemPool->New(insn.GetBB(), &insn, &opnd); + break; + case kOccStore: + newOcc = ssaPreMemPool->New(insn.GetBB(), &insn, &opnd); + break; + case kOccUse: + newOcc = ssaPreMemPool->New(insn.GetBB(), &insn, &opnd); + break; + default: + CHECK_FATAL(false, "unsupported occur type"); + break; + } + + if (wkCand != nullptr) { + wkCand->AddRealOccAsLast(*newOcc, GetPUIdx()); + return newOcc; + } + + // workcand not yet created; create a new one and add to worklist + wkCand = ssaPreMemPool->New(ssaPreAllocator, &opnd, GetPUIdx()); + workList.push_back(wkCand); + wkCand->AddRealOccAsLast(*newOcc, GetPUIdx()); + // add to bucket at workcandHashTable[hashIdx] + wkCand->SetNext(*preWorkCandHashTable.GetWorkcandFromIndex(hashIdx)); + preWorkCandHashTable.SetWorkCandAt(hashIdx, *wkCand); + return newOcc; +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_prop.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_prop.cpp new file mode 100644 index 0000000000000000000000000000000000000000..168f6717dae11b7a324e0f8dd4c42aa20916a956 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_prop.cpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "loop.h" +#include "cg_prop.h" + +namespace maplebe { +void CGProp::DoCopyProp() +{ + CopyProp(); + cgDce->DoDce(); +} + +void CGProp::DoTargetProp() +{ + DoCopyProp(); + /* instruction level opt */ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + TargetProp(*insn); + } + } + /* pattern level opt */ + if (CGOptions::GetInstance().GetOptimizeLevel() == CGOptions::kLevel2) { + PropPatternOpt(); + } +} + +Insn *PropOptimizePattern::FindDefInsn(const VRegVersion *useVersion) +{ + if (!useVersion) { + return nullptr; + } + DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); + if (!defInfo) { + return nullptr; + } + return defInfo->GetInsn(); +} + +bool CgCopyProp::PhaseRun(maplebe::CGFunc &f) +{ + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + LiveIntervalAnalysis *ll = GET_ANALYSIS(CGliveIntervalAnalysis, f); + CGProp *cgProp = f.GetCG()->CreateCGProp(*GetPhaseMemPool(), f, *ssaInfo, *ll); + cgProp->DoCopyProp(); + ll->ClearBFS(); + return false; +} +void CgCopyProp::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgCopyProp, cgcopyprop) + +bool CgTargetProp::PhaseRun(maplebe::CGFunc &f) +{ + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + LiveIntervalAnalysis *ll = GET_ANALYSIS(CGliveIntervalAnalysis, f); + CGProp *cgProp = f.GetCG()->CreateCGProp(*GetPhaseMemPool(), f, *ssaInfo, *ll); + cgProp->DoTargetProp(); + ll->ClearBFS(); + return false; +} +void CgTargetProp::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgTargetProp, cgtargetprop) +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_ssa.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_ssa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7bb1d74dbfb4c30505206e965871fafc0852a1ff --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_ssa.cpp @@ -0,0 +1,371 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cg_ssa.h" +#include "cg.h" + +#include "optimize_common.h" + +namespace maplebe { +uint32 CGSSAInfo::SSARegNObase = 100; +void CGSSAInfo::ConstructSSA() +{ + InsertPhiInsn(); + /* Rename variables */ + RenameVariablesForBB(domInfo->GetCommonEntryBB().GetId()); +#if DEBUG + /* Check phiListOpnd, must be ssaForm */ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsPhi()) { + continue; + } + Operand &phiListOpnd = insn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(phiListOpnd.IsPhi(), "unexpect phi operand"); + MapleMap &phiList = static_cast(phiListOpnd).GetOperands(); + for (auto &phiOpndIt : phiList) { + if (!phiOpndIt.second->IsSSAForm()) { + CHECK_FATAL(false, "phiOperand is not ssaForm!"); + } + } + } + } +#endif + cgFunc->SetSSAvRegCount(static_cast(GetAllSSAOperands().size()) + SSARegNObase + 1); + /* save reversePostOrder of bbs for rectify validbit */ + SetReversePostOrder(); +} + +void CGSSAInfo::MarkInsnsInSSA(Insn &insn) +{ + CHECK_FATAL(insn.GetId() == 0, "insn is not clean !!"); /* change to assert*/ + insnCount += 2; + insn.SetId(static_cast(insnCount)); +} + +void CGSSAInfo::InsertPhiInsn() +{ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + std::set defRegNOs = insn->GetDefRegs(); + for (auto vRegNO : defRegNOs) { + RegOperand *virtualOpnd = cgFunc->GetVirtualRegisterOperand(vRegNO); + if (virtualOpnd != nullptr) { + PrunedPhiInsertion(*bb, *virtualOpnd); + } + } + } + } +} + +void CGSSAInfo::PrunedPhiInsertion(const BB &bb, RegOperand &virtualOpnd) +{ + regno_t vRegNO = virtualOpnd.GetRegisterNumber(); + MapleVector frontiers = domInfo->GetDomFrontier(bb.GetId()); + for (auto i : frontiers) { + BB *phiBB = cgFunc->GetBBFromID(i); + CHECK_FATAL(phiBB != nullptr, "get phiBB failed change to DEBUG_ASSERT"); + if (phiBB->HasPhiInsn(vRegNO)) { + continue; + } + if (phiBB->GetLiveIn()->TestBit(vRegNO)) { + CG *codeGen = cgFunc->GetCG(); + PhiOperand &phiList = codeGen->CreatePhiOperand(*memPool, ssaAlloc); + /* do not insert phi opnd when insert phi insn? */ + for (auto prevBB : phiBB->GetPreds()) { + if (prevBB->GetLiveOut()->TestBit(vRegNO)) { + auto *paraOpnd = static_cast(virtualOpnd.Clone(*tempMp)); + phiList.InsertOpnd(prevBB->GetId(), *paraOpnd); + } else { + CHECK_FATAL(false, "multipule BB in"); + } + } + Insn &phiInsn = codeGen->BuildPhiInsn(virtualOpnd, phiList); + MarkInsnsInSSA(phiInsn); + bool insertSuccess = false; + FOR_BB_INSNS(insn, phiBB) { + if (insn->IsMachineInstruction()) { + (void)phiBB->InsertInsnBefore(*insn, phiInsn); + insertSuccess = true; + break; + } + } + if (!insertSuccess) { + phiBB->InsertInsnBegin(phiInsn); + } + phiBB->AddPhiInsn(vRegNO, phiInsn); + PrunedPhiInsertion(*phiBB, virtualOpnd); + } + } +} + +void CGSSAInfo::RenameVariablesForBB(uint32 bbID) +{ + RenameBB(*cgFunc->GetBBFromID(bbID)); /* rename first BB */ + const auto &domChildren = domInfo->GetDomChildren(bbID); + for (const auto &child : domChildren) { + RenameBB(*cgFunc->GetBBFromID(child)); + } +} + +void CGSSAInfo::RenameBB(BB &bb) +{ + if (IsBBRenamed(bb.GetId())) { + return; + } + AddRenamedBB(bb.GetId()); + /* record version stack size */ + size_t tempSize = + vRegStk.empty() ? allSSAOperands.size() + cgFunc->GetFirstMapleIrVRegNO() + 1 : vRegStk.rbegin()->first + 1; + std::vector oriStackSize(tempSize, -1); + for (auto it : vRegStk) { + DEBUG_ASSERT(it.first < oriStackSize.size(), "out of range"); + oriStackSize[it.first] = static_cast(it.second.size()); + } + RenamePhi(bb); + FOR_BB_INSNS(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + MarkInsnsInSSA(*insn); + RenameInsn(*insn); + } + RenameSuccPhiUse(bb); + RenameVariablesForBB(bb.GetId()); + /* stack pop up */ + for (auto &it : vRegStk) { + if (it.first < oriStackSize.size() && oriStackSize[it.first] >= 0) { + while (static_cast(it.second.size()) > oriStackSize[static_cast(it.first)]) { + DEBUG_ASSERT(!it.second.empty(), "empty stack"); + it.second.pop(); + } + } + } +} + +void CGSSAInfo::RenamePhi(BB &bb) +{ + for (auto phiInsnIt : bb.GetPhiInsns()) { + Insn *phiInsn = phiInsnIt.second; + CHECK_FATAL(phiInsn != nullptr, "get phi insn failed"); + auto *phiDefOpnd = static_cast(&phiInsn->GetOperand(kInsnFirstOpnd)); + VRegVersion *newVst = CreateNewVersion(*phiDefOpnd, *phiInsn, kInsnFirstOpnd, true); + phiInsn->SetOperand(kInsnFirstOpnd, *newVst->GetSSAvRegOpnd()); + } +} + +void CGSSAInfo::RenameSuccPhiUse(const BB &bb) +{ + for (auto *sucBB : bb.GetSuccs()) { + for (auto phiInsnIt : sucBB->GetPhiInsns()) { + Insn *phiInsn = phiInsnIt.second; + CHECK_FATAL(phiInsn != nullptr, "get phi insn failed"); + Operand *phiListOpnd = &phiInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(phiListOpnd->IsPhi(), "unexpect phi operand"); + MapleMap &phiList = static_cast(phiListOpnd)->GetOperands(); + DEBUG_ASSERT(phiList.size() <= sucBB->GetPreds().size(), "unexpect phiList size need check"); + for (auto phiOpndIt = phiList.begin(); phiOpndIt != phiList.end(); ++phiOpndIt) { + if (phiOpndIt->first == bb.GetId()) { + RegOperand *renamedOpnd = GetRenamedOperand(*(phiOpndIt->second), false, *phiInsn, kInsnSecondOpnd); + phiList[phiOpndIt->first] = renamedOpnd; + } + } + } + } +} + +uint32 CGSSAInfo::IncreaseVregCount(regno_t vRegNO) +{ + if (!vRegDefCount.count(vRegNO)) { + vRegDefCount.emplace(vRegNO, 0); + } else { + vRegDefCount[vRegNO]++; + } + return vRegDefCount[vRegNO]; +} + +bool CGSSAInfo::IncreaseSSAOperand(regno_t vRegNO, VRegVersion *vst) +{ + if (allSSAOperands.count(vRegNO)) { + return false; + } + allSSAOperands.emplace(vRegNO, vst); + return true; +} + +VRegVersion *CGSSAInfo::CreateNewVersion(RegOperand &virtualOpnd, Insn &defInsn, uint32 idx, bool isDefByPhi) +{ + regno_t vRegNO = virtualOpnd.GetRegisterNumber(); + uint32 verionIdx = IncreaseVregCount(vRegNO); + RegOperand *ssaOpnd = CreateSSAOperand(virtualOpnd); + auto *newVst = memPool->New(ssaAlloc, *ssaOpnd, verionIdx, vRegNO); + auto *defInfo = CreateDUInsnInfo(&defInsn, idx); + newVst->SetDefInsn(defInfo, isDefByPhi ? kDefByPhi : kDefByInsn); + if (!IncreaseSSAOperand(ssaOpnd->GetRegisterNumber(), newVst)) { + CHECK_FATAL(false, "insert ssa operand failed"); + } + auto it = vRegStk.find(vRegNO); + if (it == vRegStk.end()) { + MapleStack vRegVersionStack(ssaAlloc.Adapter()); + auto ret = vRegStk.emplace(std::pair>(vRegNO, vRegVersionStack)); + CHECK_FATAL(ret.second, "insert failed"); + it = ret.first; + } + it->second.push(newVst); + return newVst; +} + +VRegVersion *CGSSAInfo::GetVersion(const RegOperand &virtualOpnd) +{ + regno_t vRegNO = virtualOpnd.GetRegisterNumber(); + auto vRegIt = vRegStk.find(vRegNO); + return vRegIt != vRegStk.end() ? vRegIt->second.top() : nullptr; +} + +VRegVersion *CGSSAInfo::FindSSAVersion(regno_t ssaRegNO) +{ + auto it = allSSAOperands.find(ssaRegNO); + return it != allSSAOperands.end() ? it->second : nullptr; +} + +PhiOperand &CGSSAInfo::CreatePhiOperand() +{ + return cgFunc->GetCG()->CreatePhiOperand(*memPool, ssaAlloc); +} + +void CGSSAInfo::SetReversePostOrder() +{ + MapleVector &reverse = domInfo->GetReversePostOrder(); + for (auto *bb : reverse) { + if (bb != nullptr) { + reversePostOrder.emplace_back(bb->GetId()); + } + } +} + +void CGSSAInfo::DumpFuncCGIRinSSAForm() const +{ + LogInfo::MapleLogger() << "\n****** SSA CGIR for " << cgFunc->GetName() << " *******\n"; + FOR_ALL_BB_CONST(bb, cgFunc) { + LogInfo::MapleLogger() << "=== BB " + << " <" << bb->GetKindName(); + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << bb->GetLabIdx(); + LogInfo::MapleLogger() << " ==> @" << cgFunc->GetFunction().GetLabelName(bb->GetLabIdx()) << "]"; + } + + LogInfo::MapleLogger() << "> <" << bb->GetId() << "> "; + if (bb->IsCleanup()) { + LogInfo::MapleLogger() << "[is_cleanup] "; + } + if (bb->IsUnreachable()) { + LogInfo::MapleLogger() << "[unreachable] "; + } + if (bb->GetFirstStmt() == cgFunc->GetCleanupLabel()) { + LogInfo::MapleLogger() << "cleanup "; + } + if (!bb->GetSuccs().empty()) { + LogInfo::MapleLogger() << "succs: "; + for (auto *succBB : bb->GetSuccs()) { + LogInfo::MapleLogger() << succBB->GetId() << " "; + } + } + if (!bb->GetEhSuccs().empty()) { + LogInfo::MapleLogger() << "eh_succs: "; + for (auto *ehSuccBB : bb->GetEhSuccs()) { + LogInfo::MapleLogger() << ehSuccBB->GetId() << " "; + } + } + LogInfo::MapleLogger() << "===\n"; + LogInfo::MapleLogger() << "frequency:" << bb->GetFrequency() << "\n"; + + FOR_BB_INSNS_CONST(insn, bb) { + if (insn->IsCfiInsn() && insn->IsDbgInsn()) { + insn->Dump(); + } else { + DumpInsnInSSAForm(*insn); + } + } + } +} + +void VRegVersion::AddUseInsn(CGSSAInfo &ssaInfo, Insn &useInsn, uint32 idx) +{ + DEBUG_ASSERT(useInsn.GetId() > 0, "insn should be marked during ssa"); + auto useInsnIt = useInsnInfos.find(useInsn.GetId()); + if (useInsnIt != useInsnInfos.end()) { + useInsnIt->second->IncreaseDU(idx); + } else { + useInsnInfos.insert(std::make_pair(useInsn.GetId(), ssaInfo.CreateDUInsnInfo(&useInsn, idx))); + } +} + +void VRegVersion::RemoveUseInsn(const Insn &useInsn, uint32 idx) +{ + auto useInsnIt = useInsnInfos.find(useInsn.GetId()); + DEBUG_ASSERT(useInsnIt != useInsnInfos.end(), "use Insn not found"); + useInsnIt->second->DecreaseDU(idx); + if (useInsnIt->second->HasNoDU()) { + useInsnInfos.erase(useInsnIt); + } +} + +void VRegVersion::CheckDeadUse(const Insn &useInsn) +{ + auto useInsnIt = useInsnInfos.find(useInsn.GetId()); + DEBUG_ASSERT(useInsnIt != useInsnInfos.end(), "use Insn not found"); + if (useInsnIt->second->HasNoDU()) { + useInsnInfos.erase(useInsnIt); + } +} + +void CgSSAConstruct::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} + +bool CgSSAConstruct::PhaseRun(maplebe::CGFunc &f) +{ + if (CG_DEBUG_FUNC(f)) { + DotGenerator::GenerateDot("beforessa", f, f.GetMirModule(), true); + } + MemPool *ssaMemPool = GetPhaseMemPool(); + MemPool *ssaTempMp = ApplyTempMemPool(); + DomAnalysis *domInfo = nullptr; + domInfo = GET_ANALYSIS(CgDomAnalysis, f); + LiveAnalysis *liveInfo = nullptr; + liveInfo = GET_ANALYSIS(CgLiveAnalysis, f); + ssaInfo = f.GetCG()->CreateCGSSAInfo(*ssaMemPool, f, *domInfo, *ssaTempMp); + ssaInfo->ConstructSSA(); + + if (CG_DEBUG_FUNC(f)) { + LogInfo::MapleLogger() << "******** CG IR After ssaconstruct in ssaForm: *********" + << "\n"; + ssaInfo->DumpFuncCGIRinSSAForm(); + } + if (liveInfo != nullptr) { + liveInfo->ClearInOutDataInfo(); + } + /* due to change of register number */ + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgLiveAnalysis::id); + return true; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgSSAConstruct, cgssaconstruct) /* both transform & analysis */ +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_ssa_pre.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_ssa_pre.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f8054b85ef557cced29cadf8d3ce10d4f1177305 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_ssa_pre.cpp @@ -0,0 +1,620 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cgfunc.h" +#include "loop.h" +#include "cg_ssa_pre.h" + +namespace maplebe { + +// ================ Step 6: Code Motion ================ +void SSAPre::CodeMotion() +{ + // pass 1 only doing insertion + for (Occ *occ : allOccs) { + if (occ->occTy != kAOccPhiOpnd) { + continue; + } + PhiOpndOcc *phiOpndOcc = static_cast(occ); + if (phiOpndOcc->insertHere) { + DEBUG_ASSERT(phiOpndOcc->cgbb->GetLoop() == nullptr, "cg_ssapre: save inserted inside loop"); + workCand->saveAtEntryBBs.insert(phiOpndOcc->cgbb->GetId()); + } + } + // pass 2 only doing deletion + for (Occ *occ : realOccs) { + if (occ->occTy != kAOccReal) { + continue; + } + RealOcc *realOcc = static_cast(occ); + if (!realOcc->redundant) { + DEBUG_ASSERT(realOcc->cgbb->GetLoop() == nullptr, "cg_ssapre: save in place inside loop"); + workCand->saveAtEntryBBs.insert(realOcc->cgbb->GetId()); + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ output _______" << '\n'; + LogInfo::MapleLogger() << " saveAtEntryBBs: ["; + for (uint32 id : workCand->saveAtEntryBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n\n"; + } +} + +// ================ Step 5: Finalize ================ +// for setting RealOcc's redundant flag and PhiOpndOcc's insertHere flag +void SSAPre::Finalize() +{ + std::vector availDefVec(classCount + 1, nullptr); + // preorder traversal of dominator tree + for (Occ *occ : allOccs) { + size_t classId = static_cast(occ->classId); + switch (occ->occTy) { + case kAOccPhi: { + PhiOcc *phiOcc = static_cast(occ); + if (phiOcc->WillBeAvail()) { + availDefVec[classId] = phiOcc; + } + break; + } + case kAOccReal: { + RealOcc *realOcc = static_cast(occ); + if (availDefVec[classId] == nullptr || !availDefVec[classId]->IsDominate(dom, occ)) { + realOcc->redundant = false; + availDefVec[classId] = realOcc; + } else { + realOcc->redundant = true; + } + break; + } + case kAOccPhiOpnd: { + PhiOpndOcc *phiOpndOcc = static_cast(occ); + const PhiOcc *phiOcc = phiOpndOcc->defPhiOcc; + if (phiOcc->WillBeAvail()) { + if (phiOpndOcc->def == nullptr || (!phiOpndOcc->hasRealUse && phiOpndOcc->def->occTy == kAOccPhi && + !static_cast(phiOpndOcc->def)->WillBeAvail())) { + // insert a store + if (phiOpndOcc->cgbb->GetSuccs().size() != 1) { // critical edge + workCand->saveAtProlog = true; + break; + } + phiOpndOcc->insertHere = true; + } else { + phiOpndOcc->def = availDefVec[classId]; + } + } + break; + } + case kAOccExit: + break; + default: + DEBUG_ASSERT(false, "Finalize: unexpected occ type"); + break; + } + if (workCand->saveAtProlog) { + break; + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after finalize _______" << '\n'; + if (workCand->saveAtProlog) { + LogInfo::MapleLogger() << "Giving up because of insertion at critical edge" << '\n'; + return; + } + for (Occ *occ : allOccs) { + if (occ->occTy == kAOccReal) { + RealOcc *realOcc = static_cast(occ); + if (!realOcc->redundant) { + occ->Dump(); + LogInfo::MapleLogger() << " non-redundant" << '\n'; + } + } else if (occ->occTy == kAOccPhiOpnd) { + PhiOpndOcc *phiOpndOcc = static_cast(occ); + if (phiOpndOcc->insertHere) { + occ->Dump(); + LogInfo::MapleLogger() << " insertHere" << '\n'; + } + } + } + } +} + +// ================ Step 4: WillBeAvail Computation ================ + +void SSAPre::ResetCanBeAvail(PhiOcc *phi) const +{ + phi->isCanBeAvail = false; + // the following loop finds phi's uses and reset them + for (PhiOcc *phiOcc : phiOccs) { + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->def == phi) { + if (!phiOpndOcc->hasRealUse && !phiOcc->isDownsafe && phiOcc->isCanBeAvail) { + ResetCanBeAvail(phiOcc); + } + } + } + } +} + +void SSAPre::ComputeCanBeAvail() const +{ + for (PhiOcc *phiOcc : phiOccs) { + if (!phiOcc->isDownsafe && phiOcc->isCanBeAvail) { + bool existNullUse = false; + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def == nullptr) { + existNullUse = true; + break; + } + } + if (existNullUse) { + ResetCanBeAvail(phiOcc); + } + } + } +} + +void SSAPre::ResetLater(PhiOcc *phi) const +{ + phi->isLater = false; + // the following loop finds phi's uses and reset them + for (PhiOcc *phiOcc : phiOccs) { + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->def == phi) { + if (phiOcc->isLater) { + ResetLater(phiOcc); + } + } + } + } +} + +void SSAPre::ComputeLater() const +{ + for (PhiOcc *phiOcc : phiOccs) { + phiOcc->isLater = phiOcc->isCanBeAvail; + } + for (PhiOcc *phiOcc : phiOccs) { + if (phiOcc->isLater) { + bool existNonNullUse = false; + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->hasRealUse) { + existNonNullUse = true; + break; + } + } + if (existNonNullUse || phiOcc->speculativeDownsafe) { + ResetLater(phiOcc); + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after later computation _______" << '\n'; + for (PhiOcc *phiOcc : phiOccs) { + phiOcc->Dump(); + if (phiOcc->isCanBeAvail) { + LogInfo::MapleLogger() << " canbeAvail"; + } + if (phiOcc->isLater) { + LogInfo::MapleLogger() << " later"; + } + if (phiOcc->isCanBeAvail && !phiOcc->isLater) { + LogInfo::MapleLogger() << " will be Avail"; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 3: Downsafe Computation ================ +void SSAPre::ResetDownsafe(const PhiOpndOcc *phiOpnd) const +{ + if (phiOpnd->hasRealUse) { + return; + } + Occ *defOcc = phiOpnd->def; + if (defOcc == nullptr || defOcc->occTy != kAOccPhi) { + return; + } + PhiOcc *defPhiOcc = static_cast(defOcc); + if (defPhiOcc->speculativeDownsafe) { + return; + } + if (!defPhiOcc->isDownsafe) { + return; + } + defPhiOcc->isDownsafe = false; + for (PhiOpndOcc *phiOpndOcc : defPhiOcc->phiOpnds) { + ResetDownsafe(phiOpndOcc); + } +} + +void SSAPre::ComputeDownsafe() const +{ + for (PhiOcc *phiOcc : phiOccs) { + if (!phiOcc->isDownsafe) { + // propagate not-Downsafe backward along use-def edges + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + ResetDownsafe(phiOpndOcc); + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after downsafe computation _______" << '\n'; + for (PhiOcc *phiOcc : phiOccs) { + phiOcc->Dump(); + if (phiOcc->speculativeDownsafe) { + LogInfo::MapleLogger() << " spec_downsafe /"; + } + if (phiOcc->isDownsafe) { + LogInfo::MapleLogger() << " downsafe"; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 2: rename ================ +static void PropagateSpeculativeDownsafe(PhiOcc *phiOcc) +{ + if (phiOcc->speculativeDownsafe) { + return; + } + phiOcc->isDownsafe = true; + phiOcc->speculativeDownsafe = true; + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->def->occTy == kAOccPhi) { + PhiOcc *nextPhiOcc = static_cast(phiOpndOcc->def); + if (nextPhiOcc->cgbb->GetLoop() != nullptr) { + PropagateSpeculativeDownsafe(nextPhiOcc); + } + } + } +} + +void SSAPre::Rename() +{ + std::stack occStack; + classCount = 0; + // iterate thru the occurrences in order of preorder traversal of dominator + // tree + for (Occ *occ : allOccs) { + while (!occStack.empty() && !occStack.top()->IsDominate(dom, occ)) { + occStack.pop(); + } + switch (occ->occTy) { + case kAOccExit: + if (!occStack.empty()) { + Occ *topOcc = occStack.top(); + if (topOcc->occTy == kAOccPhi) { + PhiOcc *phiTopOcc = static_cast(topOcc); + if (!phiTopOcc->speculativeDownsafe) { + phiTopOcc->isDownsafe = false; + } + } + } + break; + case kAOccPhi: + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + case kAOccReal: { + if (occStack.empty()) { + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + } + Occ *topOcc = occStack.top(); + occ->classId = topOcc->classId; + if (topOcc->occTy == kAOccPhi) { + occStack.push(occ); + if (occ->cgbb->GetLoop() != nullptr) { + static_cast(topOcc)->isDownsafe = true; + static_cast(topOcc)->speculativeDownsafe = true; + } + } + break; + } + case kAOccPhiOpnd: { + if (occStack.empty()) { + // leave classId as 0 + break; + } + Occ *topOcc = occStack.top(); + occ->def = topOcc; + occ->classId = topOcc->classId; + if (topOcc->occTy == kAOccReal) { + static_cast(occ)->hasRealUse = true; + } + break; + } + default: + DEBUG_ASSERT(false, "Rename: unexpected type of occurrence"); + break; + } + } + // loop thru phiOccs to propagate speculativeDownsafe + for (PhiOcc *phiOcc : phiOccs) { + if (phiOcc->speculativeDownsafe) { + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->def->occTy == kAOccPhi) { + PhiOcc *nextPhiOcc = static_cast(phiOpndOcc->def); + if (nextPhiOcc->cgbb->GetLoop() != nullptr) { + PropagateSpeculativeDownsafe(nextPhiOcc); + } + } + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after rename _______" << '\n'; + for (Occ *occ : allOccs) { + occ->Dump(); + if (occ->occTy == kAOccPhi) { + PhiOcc *phiOcc = static_cast(occ); + if (phiOcc->speculativeDownsafe) { + LogInfo::MapleLogger() << " spec_downsafe /"; + } + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 1: insert phis ================ + +// form pih occ based on the real occ in workCand->realOccs; result is +// stored in phiDfns +void SSAPre::FormPhis() +{ + for (Occ *occ : realOccs) { + GetIterDomFrontier(occ->cgbb, &phiDfns); + } +} + +// form allOccs inclusive of real, phi, phiOpnd, exit occurrences; +// form phiOccs containing only the phis +void SSAPre::CreateSortedOccs() +{ + // form phiOpnd occs based on the preds of the phi occs; result is + // stored in phiOpndDfns + std::multiset phiOpndDfns; + for (uint32 dfn : phiDfns) { + const BBId bbId = dom->GetDtPreOrderItem(dfn); + BB *cgbb = cgFunc->GetAllBBs()[bbId]; + for (BB *pred : cgbb->GetPreds()) { + (void)phiOpndDfns.insert(dom->GetDtDfnItem(pred->GetId())); + } + } + std::unordered_map> bb2PhiOpndMap; + MapleVector::iterator realOccIt = realOccs.begin(); + MapleVector::iterator exitOccIt = exitOccs.begin(); + MapleSet::iterator phiDfnIt = phiDfns.begin(); + MapleSet::iterator phiOpndDfnIt = phiOpndDfns.begin(); + Occ *nextRealOcc = nullptr; + if (realOccIt != realOccs.end()) { + nextRealOcc = *realOccIt; + } + ExitOcc *nextExitOcc = nullptr; + if (exitOccIt != exitOccs.end()) { + nextExitOcc = *exitOccIt; + } + PhiOcc *nextPhiOcc = nullptr; + if (phiDfnIt != phiDfns.end()) { + nextPhiOcc = preMp->New(cgFunc->GetAllBBs().at(dom->GetDtPreOrderItem(*phiDfnIt)), preAllocator); + } + PhiOpndOcc *nextPhiOpndOcc = nullptr; + if (phiOpndDfnIt != phiOpndDfns.end()) { + nextPhiOpndOcc = preMp->New(cgFunc->GetAllBBs().at(dom->GetDtPreOrderItem(*phiOpndDfnIt))); + auto it = bb2PhiOpndMap.find(dom->GetDtPreOrderItem(*phiOpndDfnIt)); + if (it == bb2PhiOpndMap.end()) { + std::forward_list newlist = {nextPhiOpndOcc}; + bb2PhiOpndMap[dom->GetDtPreOrderItem(*phiOpndDfnIt)] = newlist; + } else { + it->second.push_front(nextPhiOpndOcc); + } + } + Occ *pickedOcc = nullptr; // the next picked occ in order of preorder traversal of dominator tree + do { + pickedOcc = nullptr; + if (nextPhiOcc != nullptr) { + pickedOcc = nextPhiOcc; + } + if (nextRealOcc != nullptr && (pickedOcc == nullptr || dom->GetDtDfnItem(nextRealOcc->cgbb->GetId()) < + dom->GetDtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextRealOcc; + } + if (nextPhiOpndOcc != nullptr && + (pickedOcc == nullptr || *phiOpndDfnIt < dom->GetDtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextPhiOpndOcc; + } + if (nextExitOcc != nullptr && (pickedOcc == nullptr || dom->GetDtDfnItem(nextExitOcc->cgbb->GetId()) < + dom->GetDtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextExitOcc; + } + if (pickedOcc != nullptr) { + allOccs.push_back(pickedOcc); + switch (pickedOcc->occTy) { + case kAOccReal: { + // get the next real occ + CHECK_FATAL(realOccIt != realOccs.end(), "iterator check"); + ++realOccIt; + if (realOccIt != realOccs.end()) { + nextRealOcc = *realOccIt; + } else { + nextRealOcc = nullptr; + } + break; + } + case kAOccExit: { + CHECK_FATAL(exitOccIt != exitOccs.end(), "iterator check"); + ++exitOccIt; + if (exitOccIt != exitOccs.end()) { + nextExitOcc = *exitOccIt; + } else { + nextExitOcc = nullptr; + } + break; + } + case kAOccPhi: { + phiOccs.push_back(static_cast(pickedOcc)); + CHECK_FATAL(phiDfnIt != phiDfns.end(), "iterator check"); + ++phiDfnIt; + if (phiDfnIt != phiDfns.end()) { + nextPhiOcc = + preMp->New(cgFunc->GetAllBBs().at(dom->GetDtPreOrderItem(*phiDfnIt)), preAllocator); + } else { + nextPhiOcc = nullptr; + } + break; + } + case kAOccPhiOpnd: { + CHECK_FATAL(phiOpndDfnIt != phiOpndDfns.end(), "iterator check"); + ++phiOpndDfnIt; + if (phiOpndDfnIt != phiOpndDfns.end()) { + nextPhiOpndOcc = + preMp->New(cgFunc->GetAllBBs().at(dom->GetDtPreOrderItem(*phiOpndDfnIt))); + auto it = bb2PhiOpndMap.find(dom->GetDtPreOrderItem(*phiOpndDfnIt)); + if (it == bb2PhiOpndMap.end()) { + std::forward_list newlist = {nextPhiOpndOcc}; + bb2PhiOpndMap[dom->GetDtPreOrderItem(*phiOpndDfnIt)] = newlist; + } else { + it->second.push_front(nextPhiOpndOcc); + } + } else { + nextPhiOpndOcc = nullptr; + } + break; + } + default: + DEBUG_ASSERT(false, "CreateSortedOccs: unexpected occTy"); + break; + } + } + } while (pickedOcc != nullptr); + // initialize phiOpnd vector in each PhiOcc node and defPhiOcc in each PhiOpndOcc + for (PhiOcc *phiOcc : phiOccs) { + for (BB *pred : phiOcc->cgbb->GetPreds()) { + PhiOpndOcc *phiOpndOcc = bb2PhiOpndMap[pred->GetId()].front(); + phiOcc->phiOpnds.push_back(phiOpndOcc); + phiOpndOcc->defPhiOcc = phiOcc; + bb2PhiOpndMap[pred->GetId()].pop_front(); + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after phi insertion _______" << '\n'; + for (Occ *occ : allOccs) { + occ->Dump(); + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 0: Preparations ================ + +void SSAPre::PropagateNotAnt(BB *bb, std::set *visitedBBs) +{ + if (visitedBBs->count(bb) != 0) { + return; + } + visitedBBs->insert(bb); + if (workCand->occBBs.count(bb->GetId()) != 0) { + return; + } + fullyAntBBs[bb->GetId()] = false; + for (BB *predbb : bb->GetPreds()) { + PropagateNotAnt(predbb, visitedBBs); + } +} + +void SSAPre::FormRealsNExits() +{ + std::set visitedBBs; + if (asEarlyAsPossible) { + for (BB *cgbb : cgFunc->GetExitBBsVec()) { + if (!cgbb->IsUnreachable()) { + PropagateNotAnt(cgbb, &visitedBBs); + } + } + } + + for (uint32 i = 0; i < dom->GetDtPreOrderSize(); i++) { + BBId bbid = dom->GetDtPreOrderItem(i); + BB *cgbb = cgFunc->GetAllBBs()[bbid]; + if (asEarlyAsPossible) { + if (fullyAntBBs[cgbb->GetId()]) { + RealOcc *realOcc = preMp->New(cgbb); + realOccs.push_back(realOcc); + } + } else { + if (workCand->occBBs.count(cgbb->GetId()) != 0) { + RealOcc *realOcc = preMp->New(cgbb); + realOccs.push_back(realOcc); + } + } + if (!cgbb->IsUnreachable() && (cgbb->NumSuccs() == 0 || cgbb->GetKind() == BB::kBBReturn)) { + ExitOcc *exitOcc = preMp->New(cgbb); + exitOccs.push_back(exitOcc); + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << "Placement Optimization for callee-save saves" << '\n'; + LogInfo::MapleLogger() << "-----------------------------------------------" << '\n'; + LogInfo::MapleLogger() << " _______ input _______" << '\n'; + LogInfo::MapleLogger() << " occBBs: ["; + for (uint32 id : workCand->occBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n"; + } +} + +void SSAPre::ApplySSAPre() +{ + FormRealsNExits(); + // #1 insert phis; results in allOccs and phiOccs + FormPhis(); // result put in the set phi_bbs + CreateSortedOccs(); + // #2 rename + Rename(); + if (!phiOccs.empty()) { + // #3 DownSafety + ComputeDownsafe(); + // #4 CanBeAvail + ComputeCanBeAvail(); + ComputeLater(); + } + // #5 Finalize + Finalize(); + if (!workCand->saveAtProlog) { + // #6 Code Motion + CodeMotion(); + } +} + +void DoSavePlacementOpt(CGFunc *f, DomAnalysis *dom, SsaPreWorkCand *workCand) +{ + MemPool *tempMP = memPoolCtrler.NewMemPool("cg_ssa_pre", true); + SSAPre cgssapre(f, dom, tempMP, workCand, false /*asEarlyAsPossible*/, false /*enabledDebug*/); + + cgssapre.ApplySSAPre(); + + memPoolCtrler.DeleteMemPool(tempMP); +} + +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_ssu_pre.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_ssu_pre.cpp new file mode 100644 index 0000000000000000000000000000000000000000..60ff37e50f64c434dd0bb2490a5da2a36b30d207 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_ssu_pre.cpp @@ -0,0 +1,619 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cgfunc.h" +#include "cg_ssu_pre.h" + +namespace maplebe { + +// ================ Step 6: Code Motion ================ +void SSUPre::CodeMotion() +{ + // pass 1 only donig insertion + for (SOcc *occ : allOccs) { + if (occ->occTy != kSOccLambdaRes) { + continue; + } + SLambdaResOcc *lambdaResOcc = static_cast(occ); + if (lambdaResOcc->insertHere) { + workCand->restoreAtEntryBBs.insert(lambdaResOcc->cgbb->GetId()); + } + } + // pass 2 only doing deletion + for (SOcc *occ : realOccs) { + if (occ->occTy != kSOccReal) { + continue; + } + SRealOcc *realOcc = static_cast(occ); + if (!realOcc->redundant) { + if (realOcc->cgbb->IsWontExit()) { + workCand->restoreAtEpilog = true; + break; + } + workCand->restoreAtExitBBs.insert(realOcc->cgbb->GetId()); + } + } + if (enabledDebug) { + if (workCand->restoreAtEpilog) { + LogInfo::MapleLogger() << "Giving up because of restore inside infinite loop" << '\n'; + return; + } + LogInfo::MapleLogger() << " _______ output _______" << '\n'; + LogInfo::MapleLogger() << " restoreAtEntryBBs: ["; + for (uint32 id : workCand->restoreAtEntryBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n restoreAtExitBBs: ["; + for (uint32 id : workCand->restoreAtExitBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n\n"; + } +} + +// ================ Step 5: Finalize ================ +// for setting SRealOcc's redundant flag and SLambdaResOcc's insertHere flag +void SSUPre::Finalize() +{ + std::vector anticipatedDefVec(classCount + 1, nullptr); + // preorder traversal of post-dominator tree + for (SOcc *occ : allOccs) { + size_t classId = static_cast(occ->classId); + switch (occ->occTy) { + case kSOccLambda: { + auto *lambdaOcc = static_cast(occ); + if (lambdaOcc->WillBeAnt()) { + anticipatedDefVec[classId] = lambdaOcc; + } + break; + } + case kSOccReal: { + auto *realOcc = static_cast(occ); + if (anticipatedDefVec[classId] == nullptr || !anticipatedDefVec[classId]->IsPostDominate(pdom, occ)) { + realOcc->redundant = false; + anticipatedDefVec[classId] = realOcc; + } else { + realOcc->redundant = true; + } + break; + } + case kSOccLambdaRes: { + auto *lambdaResOcc = static_cast(occ); + const SLambdaOcc *lambdaOcc = lambdaResOcc->useLambdaOcc; + if (lambdaOcc->WillBeAnt()) { + if (lambdaResOcc->use == nullptr || + (!lambdaResOcc->hasRealUse && lambdaResOcc->use->occTy == kSOccLambda && + !static_cast(lambdaResOcc->use)->WillBeAnt())) { + // insert a store + if (lambdaResOcc->cgbb->GetPreds().size() != 1) { // critical edge + workCand->restoreAtEpilog = true; + break; + } + lambdaResOcc->insertHere = true; + } else { + lambdaResOcc->use = anticipatedDefVec[classId]; + } + } + break; + } + case kSOccEntry: + case kSOccKill: + break; + default: + DEBUG_ASSERT(false, "Finalize: unexpected occ type"); + break; + } + if (workCand->restoreAtEpilog) { + break; + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after finalize _______" << '\n'; + if (workCand->restoreAtEpilog) { + LogInfo::MapleLogger() << "Giving up because of insertion at critical edge" << '\n'; + return; + } + for (SOcc *occ : allOccs) { + if (occ->occTy == kSOccReal) { + SRealOcc *realOcc = static_cast(occ); + if (!realOcc->redundant) { + occ->Dump(); + LogInfo::MapleLogger() << " non-redundant" << '\n'; + } + } else if (occ->occTy == kSOccLambdaRes) { + SLambdaResOcc *lambdaResOcc = static_cast(occ); + if (lambdaResOcc->insertHere) { + occ->Dump(); + LogInfo::MapleLogger() << " insertHere" << '\n'; + } + } + } + } +} + +// ================ Step 4: WillBeAnt Computation ================ + +void SSUPre::ResetCanBeAnt(SLambdaOcc *lambda) const +{ + lambda->isCanBeAnt = false; + // the following loop finds lambda's defs and reset them + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + if (lambdaResOcc->use != nullptr && lambdaResOcc->use == lambda) { + if (!lambdaResOcc->hasRealUse && !lambdaOcc->isUpsafe && lambdaOcc->isCanBeAnt) { + ResetCanBeAnt(lambdaOcc); + } + } + } + } +} + +void SSUPre::ComputeCanBeAnt() const +{ + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + if (!lambdaOcc->isUpsafe && lambdaOcc->isCanBeAnt) { + bool existNullUse = false; + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + if (lambdaResOcc->use == nullptr) { + existNullUse = true; + break; + } + } + if (existNullUse) { + ResetCanBeAnt(lambdaOcc); + } + } + } +} + +void SSUPre::ResetEarlier(SLambdaOcc *lambda) const +{ + lambda->isEarlier = false; + // the following loop finds lambda's defs and reset them + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + if (lambdaResOcc->use != nullptr && lambdaResOcc->use == lambda) { + if (lambdaOcc->isEarlier) { + ResetEarlier(lambdaOcc); + } + } + } + } +} + +void SSUPre::ComputeEarlier() const +{ + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + lambdaOcc->isEarlier = lambdaOcc->isCanBeAnt; + } + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + if (lambdaOcc->isEarlier) { + bool existNonNullUse = false; + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + if (lambdaResOcc->use != nullptr && lambdaResOcc->hasRealUse) { + existNonNullUse = true; + break; + } + } + if (existNonNullUse) { + ResetEarlier(lambdaOcc); + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after earlier computation _______" << '\n'; + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + lambdaOcc->Dump(); + if (lambdaOcc->isCanBeAnt) { + LogInfo::MapleLogger() << " canbeant"; + } + if (lambdaOcc->isEarlier) { + LogInfo::MapleLogger() << " earlier"; + } + if (lambdaOcc->isCanBeAnt && !lambdaOcc->isEarlier) { + LogInfo::MapleLogger() << " will be ant"; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 3: Upsafe Computation ================ +void SSUPre::ResetUpsafe(const SLambdaResOcc *lambdaRes) const +{ + if (lambdaRes->hasRealUse) { + return; + } + SOcc *useOcc = lambdaRes->use; + if (useOcc == nullptr || useOcc->occTy != kSOccLambda) { + return; + } + auto *useLambdaOcc = static_cast(useOcc); + if (!useLambdaOcc->isUpsafe) { + return; + } + useLambdaOcc->isUpsafe = false; + for (SLambdaResOcc *lambdaResOcc : useLambdaOcc->lambdaRes) { + ResetUpsafe(lambdaResOcc); + } +} + +void SSUPre::ComputeUpsafe() const +{ + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + if (!lambdaOcc->isUpsafe) { + // propagate not-upsafe forward along def-use edges + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + ResetUpsafe(lambdaResOcc); + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after upsafe computation _______" << '\n'; + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + lambdaOcc->Dump(); + if (lambdaOcc->isUpsafe) { + LogInfo::MapleLogger() << " upsafe"; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 2: rename ================ +void SSUPre::Rename() +{ + std::stack occStack; + classCount = 0; + // iterate thru the occurrences in order of preorder traversal of + // post-dominator tree + for (SOcc *occ : allOccs) { + while (!occStack.empty() && !occStack.top()->IsPostDominate(pdom, occ)) { + occStack.pop(); + } + switch (occ->occTy) { + case kSOccKill: + if (!occStack.empty()) { + SOcc *topOcc = occStack.top(); + if (topOcc->occTy == kSOccLambda) { + static_cast(topOcc)->isUpsafe = false; + } + } + occStack.push(occ); + break; + case kSOccEntry: + if (!occStack.empty()) { + SOcc *topOcc = occStack.top(); + if (topOcc->occTy == kSOccLambda) { + static_cast(topOcc)->isUpsafe = false; + } + } + break; + case kSOccLambda: + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + case kSOccReal: { + if (occStack.empty()) { + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + } + SOcc *topOcc = occStack.top(); + if (topOcc->occTy == kSOccKill) { + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + } + DEBUG_ASSERT(topOcc->occTy == kSOccLambda || topOcc->occTy == kSOccReal, + "Rename: unexpected top-of-stack occ"); + occ->classId = topOcc->classId; + if (topOcc->occTy == kSOccLambda) { + occStack.push(occ); + } + break; + } + case kSOccLambdaRes: { + if (occStack.empty()) { + // leave classId as 0 + break; + } + SOcc *topOcc = occStack.top(); + if (topOcc->occTy == kSOccKill) { + // leave classId as 0 + break; + } + DEBUG_ASSERT(topOcc->occTy == kSOccLambda || topOcc->occTy == kSOccReal, + "Rename: unexpected top-of-stack occ"); + occ->use = topOcc; + occ->classId = topOcc->classId; + if (topOcc->occTy == kSOccReal) { + static_cast(occ)->hasRealUse = true; + } + break; + } + default: + DEBUG_ASSERT(false, "Rename: unexpected type of occurrence"); + break; + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after rename _______" << '\n'; + for (SOcc *occ : allOccs) { + occ->Dump(); + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 1: insert lambdas ================ + +// form lambda occ based on the real occ in workCand->realOccs; result is +// stored in lambdaDfns +void SSUPre::FormLambdas() +{ + for (SOcc *occ : realOccs) { + if (occ->occTy == kSOccKill) { + continue; + } + GetIterPdomFrontier(occ->cgbb, &lambdaDfns); + } +} + +// form allOccs inclusive of real, kill, lambda, lambdaRes, entry occurrences; +// form lambdaOccs containing only the lambdas +void SSUPre::CreateSortedOccs() +{ + // form lambdaRes occs based on the succs of the lambda occs; result is + // stored in lambdaResDfns + std::multiset lambdaResDfns; + for (uint32 dfn : lambdaDfns) { + const BBId bbId = pdom->GetPdtPreOrderItem(dfn); + BB *cgbb = cgFunc->GetAllBBs()[bbId]; + for (BB *succ : cgbb->GetSuccs()) { + (void)lambdaResDfns.insert(pdom->GetPdtDfnItem(succ->GetId())); + } + } + std::unordered_map> bb2LambdaResMap; + MapleVector::iterator realOccIt = realOccs.begin(); + MapleVector::iterator entryOccIt = entryOccs.begin(); + MapleSet::iterator lambdaDfnIt = lambdaDfns.begin(); + MapleSet::iterator lambdaResDfnIt = lambdaResDfns.begin(); + SOcc *nextRealOcc = nullptr; + if (realOccIt != realOccs.end()) { + nextRealOcc = *realOccIt; + } + SEntryOcc *nextEntryOcc = nullptr; + if (entryOccIt != entryOccs.end()) { + nextEntryOcc = *entryOccIt; + } + SLambdaOcc *nextLambdaOcc = nullptr; + if (lambdaDfnIt != lambdaDfns.end()) { + nextLambdaOcc = + spreMp->New(cgFunc->GetAllBBs().at(pdom->GetPdtPreOrderItem(*lambdaDfnIt)), spreAllocator); + } + SLambdaResOcc *nextLambdaResOcc = nullptr; + if (lambdaResDfnIt != lambdaResDfns.end()) { + nextLambdaResOcc = + spreMp->New(cgFunc->GetAllBBs().at(pdom->GetPdtPreOrderItem(*lambdaResDfnIt))); + auto it = bb2LambdaResMap.find(pdom->GetPdtPreOrderItem(*lambdaResDfnIt)); + if (it == bb2LambdaResMap.end()) { + std::forward_list newlist = {nextLambdaResOcc}; + bb2LambdaResMap[pdom->GetPdtPreOrderItem(*lambdaResDfnIt)] = newlist; + } else { + it->second.push_front(nextLambdaResOcc); + } + } + SOcc *pickedOcc = nullptr; // the next picked occ in order of preorder traversal of post-dominator tree + do { + pickedOcc = nullptr; + if (nextLambdaOcc != nullptr) { + pickedOcc = nextLambdaOcc; + } + if (nextRealOcc != nullptr && (pickedOcc == nullptr || pdom->GetPdtDfnItem(nextRealOcc->cgbb->GetId()) < + pdom->GetPdtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextRealOcc; + } + if (nextLambdaResOcc != nullptr && + (pickedOcc == nullptr || *lambdaResDfnIt < pdom->GetPdtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextLambdaResOcc; + } + if (nextEntryOcc != nullptr && (pickedOcc == nullptr || pdom->GetPdtDfnItem(nextEntryOcc->cgbb->GetId()) < + pdom->GetPdtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextEntryOcc; + } + if (pickedOcc != nullptr) { + allOccs.push_back(pickedOcc); + switch (pickedOcc->occTy) { + case kSOccReal: + case kSOccKill: { + // get the next real/kill occ + CHECK_FATAL(realOccIt != realOccs.end(), "iterator check"); + ++realOccIt; + if (realOccIt != realOccs.end()) { + nextRealOcc = *realOccIt; + } else { + nextRealOcc = nullptr; + } + break; + } + case kSOccEntry: { + CHECK_FATAL(entryOccIt != entryOccs.end(), "iterator check"); + ++entryOccIt; + if (entryOccIt != entryOccs.end()) { + nextEntryOcc = *entryOccIt; + } else { + nextEntryOcc = nullptr; + } + break; + } + case kSOccLambda: { + lambdaOccs.push_back(static_cast(pickedOcc)); + CHECK_FATAL(lambdaDfnIt != lambdaDfns.end(), "iterator check"); + ++lambdaDfnIt; + if (lambdaDfnIt != lambdaDfns.end()) { + nextLambdaOcc = spreMp->New( + cgFunc->GetAllBBs().at(pdom->GetPdtPreOrderItem(*lambdaDfnIt)), spreAllocator); + } else { + nextLambdaOcc = nullptr; + } + break; + } + case kSOccLambdaRes: { + CHECK_FATAL(lambdaResDfnIt != lambdaResDfns.end(), "iterator check"); + ++lambdaResDfnIt; + if (lambdaResDfnIt != lambdaResDfns.end()) { + nextLambdaResOcc = spreMp->New( + cgFunc->GetAllBBs().at(pdom->GetPdtPreOrderItem(*lambdaResDfnIt))); + auto it = bb2LambdaResMap.find(pdom->GetPdtPreOrderItem(*lambdaResDfnIt)); + if (it == bb2LambdaResMap.end()) { + std::forward_list newlist = {nextLambdaResOcc}; + bb2LambdaResMap[pdom->GetPdtPreOrderItem(*lambdaResDfnIt)] = newlist; + } else { + it->second.push_front(nextLambdaResOcc); + } + } else { + nextLambdaResOcc = nullptr; + } + break; + } + default: + DEBUG_ASSERT(false, "CreateSortedOccs: unexpected occTy"); + break; + } + } + } while (pickedOcc != nullptr); + // initialize lambdaRes vector in each SLambdaOcc node and useLambdaOcc in each SLambdaResOcc + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + for (BB *succ : lambdaOcc->cgbb->GetSuccs()) { + SLambdaResOcc *lambdaResOcc = bb2LambdaResMap[succ->GetId()].front(); + lambdaOcc->lambdaRes.push_back(lambdaResOcc); + lambdaResOcc->useLambdaOcc = lambdaOcc; + bb2LambdaResMap[succ->GetId()].pop_front(); + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after lambda insertion _______" << '\n'; + for (SOcc *occ : allOccs) { + occ->Dump(); + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 0: Preparations ================ + +void SSUPre::PropagateNotAvail(BB *bb, std::set *visitedBBs) +{ + if (visitedBBs->count(bb) != 0) { + return; + } + visitedBBs->insert(bb); + if (workCand->occBBs.count(bb->GetId()) != 0 || workCand->saveBBs.count(bb->GetId()) != 0) { + return; + } + fullyAvailBBs[bb->GetId()] = false; + for (BB *succbb : bb->GetSuccs()) { + PropagateNotAvail(succbb, visitedBBs); + } +} + +void SSUPre::FormReals() +{ + if (!asLateAsPossible) { + for (uint32 i = 0; i < pdom->GetPdtPreOrderSize(); i++) { + BBId bbid = pdom->GetPdtPreOrderItem(i); + BB *cgbb = cgFunc->GetAllBBs()[bbid]; + if (workCand->saveBBs.count(cgbb->GetId()) != 0) { + SRealOcc *realOcc = spreMp->New(cgbb); + realOccs.push_back(realOcc); + SKillOcc *killOcc = spreMp->New(cgbb); + realOccs.push_back(killOcc); + } else if (workCand->occBBs.count(cgbb->GetId()) != 0) { + SRealOcc *realOcc = spreMp->New(cgbb); + realOccs.push_back(realOcc); + } + } + } else { + std::set visitedBBs; + fullyAvailBBs[cgFunc->GetCommonExitBB()->GetId()] = false; + PropagateNotAvail(cgFunc->GetFirstBB(), &visitedBBs); + for (uint32 i = 0; i < pdom->GetPdtPreOrderSize(); i++) { + BBId bbid = pdom->GetPdtPreOrderItem(i); + BB *cgbb = cgFunc->GetAllBBs()[bbid]; + if (fullyAvailBBs[cgbb->GetId()]) { + SRealOcc *realOcc = spreMp->New(cgbb); + realOccs.push_back(realOcc); + if (workCand->saveBBs.count(cgbb->GetId()) != 0) { + SKillOcc *killOcc = spreMp->New(cgbb); + realOccs.push_back(killOcc); + } + } + } + } + + if (enabledDebug) { + LogInfo::MapleLogger() << "Placement Optimization for callee-save restores" << '\n'; + LogInfo::MapleLogger() << "-----------------------------------------------" << '\n'; + LogInfo::MapleLogger() << " _______ input _______" << '\n'; + LogInfo::MapleLogger() << " occBBs: ["; + for (uint32 id : workCand->occBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n saveBBs: ["; + for (uint32 id : workCand->saveBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n"; + } +} + +void SSUPre::ApplySSUPre() +{ + FormReals(); + // #1 insert lambdas; results in allOccs and lambdaOccs + FormLambdas(); // result put in the set lambda_bbs + CreateSortedOccs(); + // #2 rename + Rename(); + if (!lambdaOccs.empty()) { + // #3 UpSafety + ComputeUpsafe(); + // #4 CanBeAnt + ComputeCanBeAnt(); + ComputeEarlier(); + } + // #5 Finalize + Finalize(); + if (!workCand->restoreAtEpilog) { + // #6 Code Motion + CodeMotion(); + } +} + +void DoRestorePlacementOpt(CGFunc *f, PostDomAnalysis *pdom, SPreWorkCand *workCand) +{ + MemPool *tempMP = memPoolCtrler.NewMemPool("cg_ssu_pre", true); + SSUPre cgssupre(f, pdom, tempMP, workCand, true /*asLateAsPossible*/, false /*enabledDebug*/); + + cgssupre.ApplySSUPre(); + + memPoolCtrler.DeleteMemPool(tempMP); +} + +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_validbit_opt.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_validbit_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a7f6fffd5172ec07838a07ba2eee7c6d21dcf527 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cg_validbit_opt.cpp @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cg_validbit_opt.h" +#include "mempool.h" +#include "aarch64_validbit_opt.h" + +namespace maplebe { +Insn *ValidBitPattern::GetDefInsn(const RegOperand &useReg) +{ + if (!useReg.IsSSAForm()) { + return nullptr; + } + regno_t useRegNO = useReg.GetRegisterNumber(); + VRegVersion *useVersion = ssaInfo->FindSSAVersion(useRegNO); + DEBUG_ASSERT(useVersion != nullptr, "useVRegVersion must not be null based on ssa"); + CHECK_FATAL(!useVersion->IsDeleted(), "deleted version"); + DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); + return defInfo == nullptr ? nullptr : defInfo->GetInsn(); +} + +InsnSet ValidBitPattern::GetAllUseInsn(const RegOperand &defReg) +{ + InsnSet allUseInsn; + if ((ssaInfo != nullptr) && defReg.IsSSAForm()) { + VRegVersion *defVersion = ssaInfo->FindSSAVersion(defReg.GetRegisterNumber()); + CHECK_FATAL(defVersion != nullptr, "useVRegVersion must not be null based on ssa"); + for (auto insnInfo : defVersion->GetAllUseInsns()) { + Insn *currInsn = insnInfo.second->GetInsn(); + allUseInsn.emplace(currInsn); + } + } + return allUseInsn; +} + +void ValidBitPattern::DumpAfterPattern(std::vector &prevInsns, const Insn *replacedInsn, const Insn *newInsn) +{ + LogInfo::MapleLogger() << ">>>>>>> In " << GetPatternName() << " : <<<<<<<\n"; + if (!prevInsns.empty()) { + if ((replacedInsn == nullptr) && (newInsn == nullptr)) { + LogInfo::MapleLogger() << "======= RemoveInsns : {\n"; + } else { + LogInfo::MapleLogger() << "======= PrevInsns : {\n"; + } + for (auto *prevInsn : prevInsns) { + if (prevInsn != nullptr) { + LogInfo::MapleLogger() << "[primal form] "; + prevInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*prevInsn); + } + } + } + LogInfo::MapleLogger() << "}\n"; + } + if (replacedInsn != nullptr) { + LogInfo::MapleLogger() << "======= OldInsn :\n"; + LogInfo::MapleLogger() << "[primal form] "; + replacedInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*replacedInsn); + } + } + if (newInsn != nullptr) { + LogInfo::MapleLogger() << "======= NewInsn :\n"; + LogInfo::MapleLogger() << "[primal form] "; + newInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*newInsn); + } + } +} + +void ValidBitOpt::RectifyValidBitNum() +{ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + SetValidBits(*insn); + } + } + bool iterate; + /* Use inverse postorder to converge with minimal iterations */ + do { + iterate = false; + MapleVector reversePostOrder = ssaInfo->GetReversePostOrder(); + for (uint32 bbId : reversePostOrder) { + BB *bb = cgFunc->GetBBFromID(bbId); + FOR_BB_INSNS(insn, bb) { + if (!insn->IsPhi()) { + continue; + } + bool change = SetPhiValidBits(*insn); + if (change) { + /* if vb changes once, iterate. */ + iterate = true; + } + } + } + } while (iterate); +} + +void ValidBitOpt::RecoverValidBitNum() +{ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction() && !insn->IsPhi()) { + continue; + } + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (insn->OpndIsDef(i) && insn->GetOperand(i).IsRegister()) { + auto &dstOpnd = static_cast(insn->GetOperand(i)); + dstOpnd.SetValidBitsNum(dstOpnd.GetSize()); + } + } + } + } +} + +void ValidBitOpt::Run() +{ + /* + * Set validbit of regOpnd before optimization + */ + RectifyValidBitNum(); + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + DoOpt(*bb, *insn); + } + } + /* + * Recover validbit of regOpnd after optimization + */ + RecoverValidBitNum(); +} + +bool CgValidBitOpt::PhaseRun(maplebe::CGFunc &f) +{ + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + CHECK_FATAL(ssaInfo != nullptr, "Get ssaInfo failed"); + auto *vbOpt = f.GetCG()->CreateValidBitOpt(*GetPhaseMemPool(), f, *ssaInfo); + CHECK_FATAL(vbOpt != nullptr, "vbOpt instance create failed"); + vbOpt->Run(); + return true; +} + +void CgValidBitOpt::GetAnalysisDependence(AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgValidBitOpt, cgvalidbitopt) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cgbb.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cgbb.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9a05f13aa9df44a18acac75fb510be60c0959ffa --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cgbb.cpp @@ -0,0 +1,573 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cgbb.h" +#include "cgfunc.h" +#include "loop.h" + +namespace maplebe { +constexpr uint32 kCondBrNum = 2; +constexpr uint32 kSwitchCaseNum = 5; + +const std::string BB::bbNames[BB::kBBLast] = {"BB_ft", "BB_if", "BB_goto", "BB_igoto", + "BB_ret", "BB_intrinsic", "BB_rangegoto", "BB_throw"}; + +Insn *BB::InsertInsnBefore(Insn &existing, Insn &newInsn) +{ + Insn *pre = existing.GetPrev(); + newInsn.SetPrev(pre); + newInsn.SetNext(&existing); + existing.SetPrev(&newInsn); + if (pre != nullptr) { + pre->SetNext(&newInsn); + } + if (&existing == firstInsn) { + firstInsn = &newInsn; + } + newInsn.SetBB(this); + return &newInsn; +} + +Insn *BB::InsertInsnAfter(Insn &existing, Insn &newInsn) +{ + newInsn.SetPrev(&existing); + newInsn.SetNext(existing.GetNext()); + existing.SetNext(&newInsn); + if (&existing == lastInsn) { + lastInsn = &newInsn; + } else if (newInsn.GetNext()) { + newInsn.GetNext()->SetPrev(&newInsn); + } + newInsn.SetBB(this); + internalFlag1++; + return &newInsn; +} + +void BB::ReplaceInsn(Insn &insn, Insn &newInsn) +{ + if (insn.IsAccessRefField()) { + newInsn.MarkAsAccessRefField(true); + } + if (insn.GetDoNotRemove()) { + newInsn.SetDoNotRemove(true); + } + newInsn.SetPrev(insn.GetPrev()); + newInsn.SetNext(insn.GetNext()); + if (&insn == lastInsn) { + lastInsn = &newInsn; + } else if (newInsn.GetNext() != nullptr) { + newInsn.GetNext()->SetPrev(&newInsn); + } + if (firstInsn == &insn) { + firstInsn = &newInsn; + } else if (newInsn.GetPrev() != nullptr) { + newInsn.GetPrev()->SetNext(&newInsn); + } + newInsn.SetComment(insn.GetComment()); + newInsn.SetBB(this); +} + +void BB::RemoveInsn(Insn &insn) +{ + if ((firstInsn == &insn) && (lastInsn == &insn)) { + firstInsn = lastInsn = nullptr; + } else if (firstInsn == &insn) { + firstInsn = insn.GetNext(); + } else if (lastInsn == &insn) { + lastInsn = insn.GetPrev(); + } + /* remove insn from lir list */ + Insn *prevInsn = insn.GetPrev(); + Insn *nextInsn = insn.GetNext(); + if (prevInsn != nullptr) { + prevInsn->SetNext(nextInsn); + } + if (nextInsn != nullptr) { + nextInsn->SetPrev(prevInsn); + } +} + +void BB::RemoveInsnPair(Insn &insn, const Insn &nextInsn) +{ + DEBUG_ASSERT(insn.GetNext() == &nextInsn, "next_insn is supposed to follow insn"); + DEBUG_ASSERT(nextInsn.GetPrev() == &insn, "next_insn is supposed to follow insn"); + if ((firstInsn == &insn) && (lastInsn == &nextInsn)) { + firstInsn = lastInsn = nullptr; + } else if (firstInsn == &insn) { + firstInsn = nextInsn.GetNext(); + } else if (lastInsn == &nextInsn) { + lastInsn = insn.GetPrev(); + } + if (insn.GetPrev() != nullptr) { + insn.GetPrev()->SetNext(nextInsn.GetNext()); + } + if (nextInsn.GetNext() != nullptr) { + nextInsn.GetNext()->SetPrev(insn.GetPrev()); + } +} + +/* Remove insns in this bb from insn1 to insn2. */ +void BB::RemoveInsnSequence(Insn &insn, const Insn &nextInsn) +{ + DEBUG_ASSERT(insn.GetBB() == this, "remove insn sequence in one bb"); + DEBUG_ASSERT(nextInsn.GetBB() == this, "remove insn sequence in one bb"); + if ((firstInsn == &insn) && (lastInsn == &nextInsn)) { + firstInsn = lastInsn = nullptr; + } else if (firstInsn == &insn) { + firstInsn = nextInsn.GetNext(); + } else if (lastInsn == &nextInsn) { + lastInsn = insn.GetPrev(); + } + + if (insn.GetPrev() != nullptr) { + insn.GetPrev()->SetNext(nextInsn.GetNext()); + } + if (nextInsn.GetNext() != nullptr) { + nextInsn.GetNext()->SetPrev(insn.GetPrev()); + } +} + +/* append all insns from bb into this bb */ +void BB::AppendBBInsns(BB &bb) +{ + if (firstInsn == nullptr) { + firstInsn = bb.firstInsn; + lastInsn = bb.lastInsn; + if (firstInsn != nullptr) { + FOR_BB_INSNS(i, &bb) { + i->SetBB(this); + } + } + return; + } + if ((bb.firstInsn == nullptr) || (bb.lastInsn == nullptr)) { + return; + } + FOR_BB_INSNS_SAFE(insn, &bb, nextInsn) { + AppendInsn(*insn); + } +} + +/* prepend all insns from bb into this bb */ +void BB::InsertAtBeginning(BB &bb) +{ + if (bb.firstInsn == nullptr) { /* nothing to add */ + return; + } + + FOR_BB_INSNS(insn, &bb) { + insn->SetBB(this); + } + + if (firstInsn == nullptr) { + firstInsn = bb.firstInsn; + lastInsn = bb.lastInsn; + } else { + bb.lastInsn->SetNext(firstInsn); + firstInsn->SetPrev(bb.lastInsn); + firstInsn = bb.firstInsn; + } + bb.firstInsn = bb.lastInsn = nullptr; +} + +/* append all insns from bb into this bb */ +void BB::InsertAtEnd(BB &bb) +{ + if (bb.firstInsn == nullptr) { /* nothing to add */ + return; + } + + FOR_BB_INSNS(insn, &bb) { + insn->SetBB(this); + } + + if (firstInsn == nullptr) { + firstInsn = bb.firstInsn; + lastInsn = bb.lastInsn; + } else { + bb.firstInsn->SetPrev(lastInsn); + lastInsn->SetNext(bb.firstInsn); + lastInsn = bb.lastInsn; + } + bb.firstInsn = bb.lastInsn = nullptr; +} + +/* Insert all insns from bb into this bb before the last instr */ +void BB::InsertAtEndMinus1(BB &bb) +{ + if (bb.firstInsn == nullptr) { /* nothing to add */ + return; + } + + if (NumInsn() == 1) { + InsertAtBeginning(bb); + return; + } + + FOR_BB_INSNS(insn, &bb) { + insn->SetBB(this); + } + + if (firstInsn == nullptr) { + firstInsn = bb.firstInsn; + lastInsn = bb.lastInsn; + } else { + /* Add between prevLast and lastInsn */ + Insn *prevLast = lastInsn->GetPrev(); + bb.firstInsn->SetPrev(prevLast); + prevLast->SetNext(bb.firstInsn); + lastInsn->SetPrev(bb.lastInsn); + bb.lastInsn->SetNext(lastInsn); + } + bb.firstInsn = bb.lastInsn = nullptr; +} + +/* Number of instructions excluding DbgInsn and comments */ +int32 BB::NumInsn() const +{ + int32 bbSize = 0; + FOR_BB_INSNS_CONST(i, this) { + if (i->IsImmaterialInsn() || i->IsDbgInsn()) { + continue; + } + ++bbSize; + } + return bbSize; +} + +bool BB::IsInPhiList(regno_t regNO) +{ + for (auto phiInsnIt : phiInsnList) { + Insn *phiInsn = phiInsnIt.second; + if (phiInsn == nullptr) { + continue; + } + auto &phiListOpnd = static_cast(phiInsn->GetOperand(kInsnSecondOpnd)); + for (auto phiListIt : phiListOpnd.GetOperands()) { + RegOperand *phiUseOpnd = phiListIt.second; + if (phiUseOpnd == nullptr) { + continue; + } + if (phiUseOpnd->GetRegisterNumber() == regNO) { + return true; + } + } + } + return false; +} + +bool BB::IsInPhiDef(regno_t regNO) +{ + for (auto phiInsnIt : phiInsnList) { + Insn *phiInsn = phiInsnIt.second; + if (phiInsn == nullptr) { + continue; + } + auto &phiDefOpnd = static_cast(phiInsn->GetOperand(kInsnFirstOpnd)); + if (phiDefOpnd.GetRegisterNumber() == regNO) { + return true; + } + } + return false; +} + +bool BB::HasCriticalEdge() +{ + constexpr int minPredsNum = 2; + if (preds.size() < minPredsNum) { + return false; + } + for (BB *pred : preds) { + if (pred->GetKind() == BB::kBBGoto || pred->GetKind() == BB::kBBIgoto) { + continue; + } + if (pred->GetSuccs().size() > 1) { + return true; + } + } + return false; +} + +void BB::Dump() const +{ + LogInfo::MapleLogger() << "=== BB " << this << " <" << GetKindName(); + if (labIdx) { + LogInfo::MapleLogger() << "[labeled with " << labIdx << "]"; + if (labelTaken) { + LogInfo::MapleLogger() << " taken"; + } + } + LogInfo::MapleLogger() << "> <" << id << "> "; + if (isCleanup) { + LogInfo::MapleLogger() << "[is_cleanup] "; + } + if (unreachable) { + LogInfo::MapleLogger() << "[unreachable] "; + } + LogInfo::MapleLogger() << "frequency:" << frequency << "===\n"; + + Insn *insn = firstInsn; + while (insn != nullptr) { + insn->Dump(); + insn = insn->GetNext(); + } +} + +bool BB::IsCommentBB() const +{ + if (GetKind() != kBBFallthru) { + return false; + } + FOR_BB_INSNS_CONST(insn, this) { + if (insn->IsMachineInstruction()) { + return false; + } + } + return true; +} + +/* return true if bb has no real insns. */ +bool BB::IsEmptyOrCommentOnly() const +{ + return (IsEmpty() || IsCommentBB()); +} + +bool BB::IsSoloGoto() const +{ + if (GetKind() != kBBGoto) { + return false; + } + if (GetHasCfi()) { + return false; + } + FOR_BB_INSNS_CONST(insn, this) { + if (!insn->IsMachineInstruction()) { + continue; + } + return (insn->IsUnCondBranch()); + } + return false; +} + +BB *BB::GetValidPrev() +{ + BB *pre = GetPrev(); + while (pre != nullptr && (pre->IsEmptyOrCommentOnly() || pre->IsUnreachable())) { + pre = pre->GetPrev(); + } + return pre; +} + +bool Bfs::AllPredBBVisited(const BB &bb, long &level) const +{ + bool isAllPredsVisited = true; + for (const auto *predBB : bb.GetPreds()) { + /* See if pred bb is a loop back edge */ + bool isBackEdge = false; + for (const auto *loopBB : predBB->GetLoopSuccs()) { + if (loopBB == &bb) { + isBackEdge = true; + break; + } + } + if (!isBackEdge && !visitedBBs[predBB->GetId()]) { + isAllPredsVisited = false; + break; + } + level = std::max(level, predBB->GetInternalFlag2()); + } + for (const auto *predEhBB : bb.GetEhPreds()) { + bool isBackEdge = false; + for (const auto *loopBB : predEhBB->GetLoopSuccs()) { + if (loopBB == &bb) { + isBackEdge = true; + break; + } + } + if (!isBackEdge && !visitedBBs[predEhBB->GetId()]) { + isAllPredsVisited = false; + break; + } + level = std::max(level, predEhBB->GetInternalFlag2()); + } + return isAllPredsVisited; +} + +/* + * During live interval construction, bb has only one predecessor and/or one + * successor are stright line bb. It can be considered to be a single large bb + * for the purpose of finding live interval. This is to prevent extending live + * interval of registers unnecessarily when interleaving bb from other paths. + */ +BB *Bfs::MarkStraightLineBBInBFS(BB *bb) +{ + while (true) { + if ((bb->GetSuccs().size() != 1) || !bb->GetEhSuccs().empty()) { + break; + } + BB *sbb = bb->GetSuccs().front(); + if (visitedBBs[sbb->GetId()]) { + break; + } + if ((sbb->GetPreds().size() != 1) || !sbb->GetEhPreds().empty()) { + break; + } + sortedBBs.push_back(sbb); + visitedBBs[sbb->GetId()] = true; + sbb->SetInternalFlag2(bb->GetInternalFlag2() + 1); + bb = sbb; + } + return bb; +} + +BB *Bfs::SearchForStraightLineBBs(BB &bb) +{ + if ((bb.GetSuccs().size() != kCondBrNum) || bb.GetEhSuccs().empty()) { + return &bb; + } + BB *sbb1 = bb.GetSuccs().front(); + BB *sbb2 = bb.GetSuccs().back(); + size_t predSz1 = sbb1->GetPreds().size(); + size_t predSz2 = sbb2->GetPreds().size(); + BB *candidateBB = nullptr; + if ((predSz1 == 1) && (predSz2 > kSwitchCaseNum)) { + candidateBB = sbb1; + } else if ((predSz2 == 1) && (predSz1 > kSwitchCaseNum)) { + candidateBB = sbb2; + } else { + return &bb; + } + DEBUG_ASSERT(candidateBB->GetId() < visitedBBs.size(), "index out of range in RA::SearchForStraightLineBBs"); + if (visitedBBs[candidateBB->GetId()]) { + return &bb; + } + if (!candidateBB->GetEhPreds().empty()) { + return &bb; + } + if (candidateBB->GetSuccs().size() != 1) { + return &bb; + } + + sortedBBs.push_back(candidateBB); + visitedBBs[candidateBB->GetId()] = true; + return MarkStraightLineBBInBFS(candidateBB); +} + +void Bfs::BFS(BB &curBB) +{ + std::queue workList; + workList.push(&curBB); + DEBUG_ASSERT(curBB.GetId() < cgfunc->NumBBs(), "RA::BFS visitedBBs overflow"); + DEBUG_ASSERT(curBB.GetId() < visitedBBs.size(), "index out of range in RA::BFS"); + visitedBBs[curBB.GetId()] = true; + do { + BB *bb = workList.front(); + sortedBBs.push_back(bb); + DEBUG_ASSERT(bb->GetId() < cgfunc->NumBBs(), "RA::BFS visitedBBs overflow"); + visitedBBs[bb->GetId()] = true; + workList.pop(); + /* Look for straight line bb */ + bb = MarkStraightLineBBInBFS(bb); + /* Look for an 'if' followed by some straight-line bb */ + bb = SearchForStraightLineBBs(*bb); + for (auto *ibb : bb->GetSuccs()) { + /* See if there are unvisited predecessor */ + if (visitedBBs[ibb->GetId()]) { + continue; + } + long prevLevel = 0; + if (AllPredBBVisited(*ibb, prevLevel)) { + ibb->SetInternalFlag2(prevLevel + 1); + workList.push(ibb); + DEBUG_ASSERT(ibb->GetId() < cgfunc->NumBBs(), "GCRA::BFS visitedBBs overflow"); + visitedBBs[ibb->GetId()] = true; + } + } + } while (!workList.empty()); +} + +void Bfs::ComputeBlockOrder() +{ + visitedBBs.clear(); + sortedBBs.clear(); + visitedBBs.resize(cgfunc->NumBBs()); + for (uint32 i = 0; i < cgfunc->NumBBs(); ++i) { + visitedBBs[i] = false; + } + BB *cleanupBB = nullptr; + FOR_ALL_BB(bb, cgfunc) { + bb->SetInternalFlag1(0); + bb->SetInternalFlag2(1); + if (bb->GetFirstStmt() == cgfunc->GetCleanupLabel()) { + cleanupBB = bb; + } + } + for (BB *bb = cleanupBB; bb != nullptr; bb = bb->GetNext()) { + bb->SetInternalFlag1(1); + } + + bool changed; + size_t sortedCnt = 0; + bool done = false; + do { + changed = false; + FOR_ALL_BB(bb, cgfunc) { + if (bb->GetInternalFlag1() == 1) { + continue; + } + if (visitedBBs[bb->GetId()]) { + continue; + } + changed = true; + long prevLevel = 0; + if (AllPredBBVisited(*bb, prevLevel)) { + bb->SetInternalFlag2(prevLevel + 1); + BFS(*bb); + } + } + /* Make sure there is no infinite loop. */ + if (sortedCnt == sortedBBs.size()) { + if (!done) { + done = true; + } else { + LogInfo::MapleLogger() << "Error: RA BFS loop " << sortedCnt << " in func " << cgfunc->GetName() + << "\n"; + } + } + sortedCnt = sortedBBs.size(); + } while (changed); + + for (BB *bb = cleanupBB; bb != nullptr; bb = bb->GetNext()) { + sortedBBs.push_back(bb); + } +} + +void CgBBSort::GetAnalysisDependence(AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.AddRequired(); + aDep.SetPreservedAll(); +} + +bool CgBBSort::PhaseRun(CGFunc &f) +{ + MemPool *memPool = GetPhaseMemPool(); + bfs = memPool->New(f, *memPool); + CHECK_FATAL(bfs != nullptr, "NIY, ptr null check."); + bfs->ComputeBlockOrder(); + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgBBSort, bbsort) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/cgfunc.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cgfunc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c43a520138a006563907df70b70c1ba5b33e10e1 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/cgfunc.cpp @@ -0,0 +1,2543 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cgfunc.h" +#if DEBUG +#include +#endif +#include "cg.h" +#include "insn.h" +#include "loop.h" +#include "mir_builder.h" +#include "factory.h" +#include "debug_info.h" +#include "optimize_common.h" + +namespace maplebe { +using namespace maple; + +#define JAVALANG (GetMirModule().IsJavaModule()) + +Operand *HandleDread(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + auto &dreadNode = static_cast(expr); + return cgFunc.SelectDread(parent, dreadNode); +} + +Operand *HandleRegread(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + (void)parent; + auto ®ReadNode = static_cast(expr); + if (regReadNode.GetRegIdx() == -kSregRetval0 || regReadNode.GetRegIdx() == -kSregRetval1) { + return &cgFunc.ProcessReturnReg(regReadNode.GetPrimType(), -(regReadNode.GetRegIdx())); + } + return cgFunc.SelectRegread(regReadNode); +} + +Operand *HandleConstVal(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + auto &constValNode = static_cast(expr); + MIRConst *mirConst = constValNode.GetConstVal(); + DEBUG_ASSERT(mirConst != nullptr, "get constval of constvalnode failed"); + if (mirConst->GetKind() == kConstInt) { + auto *mirIntConst = safe_cast(mirConst); + return cgFunc.SelectIntConst(*mirIntConst); + } else if (mirConst->GetKind() == kConstFloatConst) { + auto *mirFloatConst = safe_cast(mirConst); + return cgFunc.SelectFloatConst(*mirFloatConst, parent); + } else if (mirConst->GetKind() == kConstDoubleConst) { + auto *mirDoubleConst = safe_cast(mirConst); + return cgFunc.SelectDoubleConst(*mirDoubleConst, parent); + } else { + CHECK_FATAL(false, "NYI"); + } + return nullptr; +} + +Operand *HandleConstStr(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + (void)parent; + auto &constStrNode = static_cast(expr); +#if TARGAARCH64 || TARGRISCV64 + if (CGOptions::IsArm64ilp32()) { + return cgFunc.SelectStrConst(*cgFunc.GetMemoryPool()->New( + constStrNode.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a32)))); + } else { + return cgFunc.SelectStrConst(*cgFunc.GetMemoryPool()->New( + constStrNode.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)))); + } +#else + return cgFunc.SelectStrConst(*cgFunc.GetMemoryPool()->New( + constStrNode.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a32))); +#endif +} + +Operand *HandleConstStr16(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + (void)parent; + auto &constStr16Node = static_cast(expr); +#if TARGAARCH64 || TARGRISCV64 + if (CGOptions::IsArm64ilp32()) { + return cgFunc.SelectStr16Const(*cgFunc.GetMemoryPool()->New( + constStr16Node.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a32)))); + } else { + return cgFunc.SelectStr16Const(*cgFunc.GetMemoryPool()->New( + constStr16Node.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)))); + } +#else + return cgFunc.SelectStr16Const(*cgFunc.GetMemoryPool()->New( + constStr16Node.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a32))); +#endif +} + +Operand *HandleAdd(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2 && expr.Opnd(0)->GetOpCode() == OP_mul && + !IsPrimitiveVector(expr.GetPrimType()) && !IsPrimitiveFloat(expr.GetPrimType()) && + expr.Opnd(0)->Opnd(0)->GetOpCode() != OP_constval && expr.Opnd(0)->Opnd(1)->GetOpCode() != OP_constval) { + return cgFunc.SelectMadd( + static_cast(expr), *cgFunc.HandleExpr(*expr.Opnd(0), *expr.Opnd(0)->Opnd(0)), + *cgFunc.HandleExpr(*expr.Opnd(0), *expr.Opnd(0)->Opnd(1)), *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); + } else if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2 && expr.Opnd(1)->GetOpCode() == OP_mul && + !IsPrimitiveVector(expr.GetPrimType()) && !IsPrimitiveFloat(expr.GetPrimType()) && + expr.Opnd(1)->Opnd(0)->GetOpCode() != OP_constval && expr.Opnd(1)->Opnd(1)->GetOpCode() != OP_constval) { + return cgFunc.SelectMadd( + static_cast(expr), *cgFunc.HandleExpr(*expr.Opnd(0), *expr.Opnd(1)->Opnd(0)), + *cgFunc.HandleExpr(*expr.Opnd(0), *expr.Opnd(1)->Opnd(1)), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); + } else { + return cgFunc.SelectAdd(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); + } +} + +Operand *HandleCGArrayElemAdd(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return &cgFunc.SelectCGArrayElemAdd(static_cast(expr), parent); +} + +BaseNode *IsConstantInVectorFromScalar(BaseNode *expr) +{ + if (expr->op != OP_intrinsicop) { + return nullptr; + } + IntrinsicopNode *intrn = static_cast(expr); + switch (intrn->GetIntrinsic()) { + case INTRN_vector_from_scalar_v8u8: + case INTRN_vector_from_scalar_v8i8: + case INTRN_vector_from_scalar_v4u16: + case INTRN_vector_from_scalar_v4i16: + case INTRN_vector_from_scalar_v2u32: + case INTRN_vector_from_scalar_v2i32: + case INTRN_vector_from_scalar_v1u64: + case INTRN_vector_from_scalar_v1i64: + case INTRN_vector_from_scalar_v16u8: + case INTRN_vector_from_scalar_v16i8: + case INTRN_vector_from_scalar_v8u16: + case INTRN_vector_from_scalar_v8i16: + case INTRN_vector_from_scalar_v4u32: + case INTRN_vector_from_scalar_v4i32: + case INTRN_vector_from_scalar_v2u64: + case INTRN_vector_from_scalar_v2i64: { + if (intrn->Opnd(0) != nullptr && intrn->Opnd(0)->op == OP_constval) { + return intrn->Opnd(0); + } + break; + } + default: + break; + } + return nullptr; +} + +Operand *HandleShift(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + BaseNode *cExpr = IsConstantInVectorFromScalar(expr.Opnd(1)); + if (cExpr == nullptr) { + return cgFunc.SelectShift(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); + } else { + return cgFunc.SelectShift(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(*expr.Opnd(1), *cExpr), parent); + } +} + +Operand *HandleRor(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectRor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleMpy(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectMpy(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleDiv(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectDiv(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleRem(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectRem(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleAddrof(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + auto &addrofNode = static_cast(expr); + return cgFunc.SelectAddrof(addrofNode, parent, false); +} + +Operand *HandleAddrofoff(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + auto &addrofoffNode = static_cast(expr); + return cgFunc.SelectAddrofoff(addrofoffNode, parent); +} + +Operand *HandleAddroffunc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + auto &addroffuncNode = static_cast(expr); + return &cgFunc.SelectAddrofFunc(addroffuncNode, parent); +} + +Operand *HandleAddrofLabel(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + auto &addrofLabelNode = static_cast(expr); + return &cgFunc.SelectAddrofLabel(addrofLabelNode, parent); +} + +Operand *HandleIread(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + auto &ireadNode = static_cast(expr); + return cgFunc.SelectIread(parent, ireadNode); +} + +Operand *HandleIreadoff(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + auto &ireadNode = static_cast(expr); + return cgFunc.SelectIreadoff(parent, ireadNode); +} + +Operand *HandleIreadfpoff(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + auto &ireadNode = static_cast(expr); + return cgFunc.SelectIreadfpoff(parent, ireadNode); +} + +Operand *HandleSub(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectSub(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBand(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectBand(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBior(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectBior(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBxor(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectBxor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleAbs(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + (void)parent; + return cgFunc.SelectAbs(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleBnot(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectBnot(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleExtractBits(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + ExtractbitsNode &node = static_cast(expr); + uint8 bitOffset = node.GetBitsOffset(); + uint8 bitSize = node.GetBitsSize(); + if (!CGOptions::IsBigEndian() && (bitSize == k8BitSize || bitSize == k16BitSize) && + GetPrimTypeBitSize(node.GetPrimType()) != k64BitSize && + (bitOffset == 0 || bitOffset == k8BitSize || bitOffset == k16BitSize || bitOffset == k24BitSize) && + expr.Opnd(0)->GetOpCode() == OP_iread && node.GetOpCode() == OP_extractbits) { + return cgFunc.SelectRegularBitFieldLoad(node, parent); + } + return cgFunc.SelectExtractbits(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + parent); +} + +Operand *HandleDepositBits(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectDepositBits(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleLnot(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectLnot(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleLand(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectLand(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleLor(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + if (parent.IsCondBr()) { + return cgFunc.SelectLor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent, true); + } else { + return cgFunc.SelectLor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); + } +} + +Operand *HandleMin(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectMin(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleMax(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectMax(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleNeg(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectNeg(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleRecip(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectRecip(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleSqrt(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectSqrt(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleCeil(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectCeil(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleFloor(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectFloor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleRetype(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + (void)parent; + return cgFunc.SelectRetype(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleCvt(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectCvt(parent, static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleRound(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectRound(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleTrunc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + return cgFunc.SelectTrunc(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +static bool HasCompare(const BaseNode *expr) +{ + if (kOpcodeInfo.IsCompare(expr->GetOpCode())) { + return true; + } + for (size_t i = 0; i < expr->GetNumOpnds(); ++i) { + if (HasCompare(expr->Opnd(i))) { + return true; + } + } + return false; +} + +Operand *HandleSelect(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + /* 0,1,2 represent the first opnd and the second opnd and the third opnd of expr */ + bool hasCompare = false; + if (HasCompare(expr.Opnd(1)) || HasCompare(expr.Opnd(2))) { + hasCompare = true; + } + Operand &trueOpnd = *cgFunc.HandleExpr(expr, *expr.Opnd(1)); + Operand &falseOpnd = *cgFunc.HandleExpr(expr, *expr.Opnd(2)); + Operand *cond = cgFunc.HandleExpr(expr, *expr.Opnd(0)); + return cgFunc.SelectSelect(static_cast(expr), *cond, trueOpnd, falseOpnd, parent, hasCompare); +} + +Operand *HandleCmp(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + // fix opnd type before select insn + PrimType targetPtyp = parent.GetPrimType(); + if (kOpcodeInfo.IsCompare(parent.GetOpCode())) { + targetPtyp = static_cast(parent).GetOpndType(); + } else if (kOpcodeInfo.IsTypeCvt(parent.GetOpCode())) { + targetPtyp = static_cast(parent).FromType(); + } + if (IsPrimitiveInteger(targetPtyp) && targetPtyp != expr.GetPrimType()) { + expr.SetPrimType(targetPtyp); + } + return cgFunc.SelectCmpOp(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleAlloca(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + (void)parent; + return cgFunc.SelectAlloca(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleMalloc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + (void)parent; + return cgFunc.SelectMalloc(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleGCMalloc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + (void)parent; + return cgFunc.SelectGCMalloc(static_cast(expr)); +} + +Operand *HandleJarrayMalloc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + (void)parent; + return cgFunc.SelectJarrayMalloc(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +/* Neon intrinsic handling */ +Operand *HandleVectorAddLong(const BaseNode &expr, CGFunc &cgFunc, bool isLow) +{ + Operand *o1 = cgFunc.HandleExpr(expr, *expr.Opnd(0)); + Operand *o2 = cgFunc.HandleExpr(expr, *expr.Opnd(1)); + return cgFunc.SelectVectorAddLong(expr.GetPrimType(), o1, o2, expr.Opnd(0)->GetPrimType(), isLow); +} + +Operand *HandleVectorAddWiden(const BaseNode &expr, CGFunc &cgFunc, bool isLow) +{ + Operand *o1 = cgFunc.HandleExpr(expr, *expr.Opnd(0)); + Operand *o2 = cgFunc.HandleExpr(expr, *expr.Opnd(1)); + return cgFunc.SelectVectorAddWiden(o1, expr.Opnd(0)->GetPrimType(), o2, expr.Opnd(1)->GetPrimType(), isLow); +} + +Operand *HandleVectorFromScalar(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) +{ + return cgFunc.SelectVectorFromScalar(intrnNode.GetPrimType(), cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)), + intrnNode.Opnd(0)->GetPrimType()); +} + +Operand *HandleVectorAbsSubL(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) +{ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand 1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand 2 */ + return cgFunc.SelectVectorAbsSubL(intrnNode.GetPrimType(), opnd1, opnd2, intrnNode.Opnd(0)->GetPrimType(), isLow); +} + +Operand *HandleVectorMerge(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) +{ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand2 */ + BaseNode *index = intrnNode.Opnd(2); /* index operand */ + int32 iNum = 0; + if (index->GetOpCode() == OP_constval) { + MIRConst *mirConst = static_cast(index)->GetConstVal(); + iNum = static_cast(safe_cast(mirConst)->GetExtValue()); + PrimType ty = intrnNode.Opnd(0)->GetPrimType(); + if (!IsPrimitiveVector(ty)) { + iNum = 0; + } else { + iNum *= GetPrimTypeSize(ty) / GetVecLanes(ty); /* 64x2: 0-1 -> 0-8 */ + } + } else { /* 32x4: 0-3 -> 0-12 */ + CHECK_FATAL(0, "VectorMerge does not have const index"); + } + return cgFunc.SelectVectorMerge(intrnNode.GetPrimType(), opnd1, opnd2, iNum); +} + +Operand *HandleVectorGetHigh(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) +{ + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + return cgFunc.SelectVectorDup(rType, opnd1, false); +} + +Operand *HandleVectorGetLow(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) +{ + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + return cgFunc.SelectVectorDup(rType, opnd1, true); +} + +Operand *HandleVectorGetElement(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) +{ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + PrimType o1Type = intrnNode.Opnd(0)->GetPrimType(); + Operand *opndLane = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); + int32 laneNum = -1; + if (opndLane->IsConstImmediate()) { + MIRConst *mirConst = static_cast(intrnNode.Opnd(1))->GetConstVal(); + laneNum = static_cast(safe_cast(mirConst)->GetExtValue()); + } else { + CHECK_FATAL(0, "VectorGetElement does not have lane const"); + } + return cgFunc.SelectVectorGetElement(intrnNode.GetPrimType(), opnd1, o1Type, laneNum); +} + +Operand *HandleVectorPairwiseAdd(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) +{ + Operand *src = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector src operand */ + PrimType sType = intrnNode.Opnd(0)->GetPrimType(); + return cgFunc.SelectVectorPairwiseAdd(intrnNode.GetPrimType(), src, sType); +} + +Operand *HandleVectorPairwiseAdalp(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) +{ + BaseNode *arg1 = intrnNode.Opnd(0); + BaseNode *arg2 = intrnNode.Opnd(1); + Operand *src1 = cgFunc.HandleExpr(intrnNode, *arg1); /* vector src operand 1 */ + Operand *src2 = cgFunc.HandleExpr(intrnNode, *arg2); /* vector src operand 2 */ + return cgFunc.SelectVectorPairwiseAdalp(src1, arg1->GetPrimType(), src2, arg2->GetPrimType()); +} + +Operand *HandleVectorSetElement(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) +{ + BaseNode *arg0 = intrnNode.Opnd(0); /* uint32_t operand */ + Operand *opnd0 = cgFunc.HandleExpr(intrnNode, *arg0); + PrimType aType = arg0->GetPrimType(); + + BaseNode *arg1 = intrnNode.Opnd(1); /* vector operand == result */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *arg1); + PrimType vType = arg1->GetPrimType(); + + BaseNode *arg2 = intrnNode.Opnd(2); /* lane const operand */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *arg2); + int32 laneNum = -1; + if (opnd2->IsConstImmediate()) { + MIRConst *mirConst = static_cast(arg2)->GetConstVal(); + laneNum = static_cast(safe_cast(mirConst)->GetExtValue()); + } else { + CHECK_FATAL(0, "VectorSetElement does not have lane const"); + } + return cgFunc.SelectVectorSetElement(opnd0, aType, opnd1, vType, laneNum); +} + +Operand *HandleVectorReverse(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, uint32 size) +{ + BaseNode *argExpr = intrnNode.Opnd(0); /* src operand */ + Operand *src = cgFunc.HandleExpr(intrnNode, *argExpr); + MIRType *type = intrnNode.GetIntrinDesc().GetReturnType(); + DEBUG_ASSERT(type != nullptr, "null ptr check"); + auto revVecType = type->GetPrimType(); + return cgFunc.SelectVectorReverse(revVecType, src, revVecType, size); +} + +Operand *HandleVectorShiftNarrow(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) +{ + PrimType rType = intrnNode.GetPrimType(); /* vector result */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* shift const */ + if (!opnd2->IsConstImmediate()) { + CHECK_FATAL(0, "VectorShiftNarrow does not have shift const"); + } + return cgFunc.SelectVectorShiftRNarrow(rType, opnd1, intrnNode.Opnd(0)->GetPrimType(), opnd2, isLow); +} + +Operand *HandleVectorSubWiden(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow, bool isWide) +{ + PrimType resType = intrnNode.GetPrimType(); /* uint32_t result */ + Operand *o1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); + Operand *o2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); + return cgFunc.SelectVectorSubWiden(resType, o1, intrnNode.Opnd(0)->GetPrimType(), o2, + intrnNode.Opnd(1)->GetPrimType(), isLow, isWide); +} + +Operand *HandleVectorSum(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) +{ + PrimType resType = intrnNode.GetPrimType(); /* uint32_t result */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + return cgFunc.SelectVectorSum(resType, opnd1, intrnNode.Opnd(0)->GetPrimType()); +} + +Operand *HandleVectorTableLookup(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) +{ + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand 1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand 2 */ + return cgFunc.SelectVectorTableLookup(rType, opnd1, opnd2); +} + +Operand *HandleVectorMadd(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) +{ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand 1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand 2 */ + Operand *opnd3 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(2)); /* vector operand 3 */ + PrimType oTyp1 = intrnNode.Opnd(0)->GetPrimType(); + PrimType oTyp2 = intrnNode.Opnd(1)->GetPrimType(); + PrimType oTyp3 = intrnNode.Opnd(2)->GetPrimType(); + return cgFunc.SelectVectorMadd(opnd1, oTyp1, opnd2, oTyp2, opnd3, oTyp3); +} + +Operand *HandleVectorMull(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) +{ + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand 1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand 2 */ + PrimType oTyp1 = intrnNode.Opnd(0)->GetPrimType(); + PrimType oTyp2 = intrnNode.Opnd(1)->GetPrimType(); + return cgFunc.SelectVectorMull(rType, opnd1, oTyp1, opnd2, oTyp2, isLow); +} + +Operand *HandleVectorNarrow(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) +{ + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector opnd 1 */ + if (isLow) { + return cgFunc.SelectVectorNarrow(rType, opnd1, intrnNode.Opnd(0)->GetPrimType()); + } else { + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector opnd 2 */ + return cgFunc.SelectVectorNarrow2(rType, opnd1, intrnNode.Opnd(0)->GetPrimType(), opnd2, + intrnNode.Opnd(1)->GetPrimType()); + } +} + +Operand *HandleVectorWiden(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) +{ + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector opnd 1 */ + return cgFunc.SelectVectorWiden(rType, opnd1, intrnNode.Opnd(0)->GetPrimType(), isLow); +} + +Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) +{ + auto &intrinsicopNode = static_cast(expr); + switch (intrinsicopNode.GetIntrinsic()) { + case INTRN_MPL_READ_OVTABLE_ENTRY_LAZY: { + Operand *srcOpnd = cgFunc.HandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(0)); + return cgFunc.SelectLazyLoad(*srcOpnd, intrinsicopNode.GetPrimType()); + } + case INTRN_MPL_READ_STATIC_OFFSET_TAB: { + auto addrOfNode = static_cast(intrinsicopNode.Opnd(0)); + MIRSymbol *st = cgFunc.GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(addrOfNode->GetStIdx()); + auto constNode = static_cast(intrinsicopNode.Opnd(1)); + CHECK_FATAL(constNode != nullptr, "null ptr check"); + auto mirIntConst = static_cast(constNode->GetConstVal()); + return cgFunc.SelectLazyLoadStatic(*st, mirIntConst->GetExtValue(), intrinsicopNode.GetPrimType()); + } + case INTRN_MPL_READ_ARRAYCLASS_CACHE_ENTRY: { + auto addrOfNode = static_cast(intrinsicopNode.Opnd(0)); + MIRSymbol *st = cgFunc.GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(addrOfNode->GetStIdx()); + auto constNode = static_cast(intrinsicopNode.Opnd(1)); + CHECK_FATAL(constNode != nullptr, "null ptr check"); + auto mirIntConst = static_cast(constNode->GetConstVal()); + return cgFunc.SelectLoadArrayClassCache(*st, mirIntConst->GetExtValue(), intrinsicopNode.GetPrimType()); + } + // double + case INTRN_C_sin: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "sin"); + case INTRN_C_sinh: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "sinh"); + case INTRN_C_asin: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "asin"); + case INTRN_C_cos: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "cos"); + case INTRN_C_cosh: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "cosh"); + case INTRN_C_acos: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "acos"); + case INTRN_C_atan: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "atan"); + case INTRN_C_exp: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "exp"); + case INTRN_C_log: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "log"); + case INTRN_C_log10: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "log10"); + // float + case INTRN_C_sinf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "sinf"); + case INTRN_C_sinhf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "sinhf"); + case INTRN_C_asinf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "asinf"); + case INTRN_C_cosf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "cosf"); + case INTRN_C_coshf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "coshf"); + case INTRN_C_acosf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "acosf"); + case INTRN_C_atanf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "atanf"); + case INTRN_C_expf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "expf"); + case INTRN_C_logf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "logf"); + case INTRN_C_log10f: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "log10f"); + // int + case INTRN_C_ffs: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "ffs"); + // libc mem* and str* functions as intrinsicops + case INTRN_C_memcmp: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_i32, "memcmp"); + case INTRN_C_strlen: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_u64, "strlen"); + case INTRN_C_strcmp: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_i32, "strcmp"); + case INTRN_C_strncmp: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_i32, "strncmp"); + case INTRN_C_strchr: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_a64, "strchr"); + case INTRN_C_strrchr: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_a64, "strrchr"); + + case INTRN_C_rev16_2: + case INTRN_C_rev_4: + case INTRN_C_rev_8: + return cgFunc.SelectBswap(intrinsicopNode, *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); + + case INTRN_C_clz32: + case INTRN_C_clz64: + return cgFunc.SelectCclz(intrinsicopNode); + case INTRN_C_ctz32: + case INTRN_C_ctz64: + return cgFunc.SelectCctz(intrinsicopNode); + case INTRN_C_popcount32: + case INTRN_C_popcount64: + return cgFunc.SelectCpopcount(intrinsicopNode); + case INTRN_C_parity32: + case INTRN_C_parity64: + return cgFunc.SelectCparity(intrinsicopNode); + case INTRN_C_clrsb32: + case INTRN_C_clrsb64: + return cgFunc.SelectCclrsb(intrinsicopNode); + case INTRN_C_isaligned: + return cgFunc.SelectCisaligned(intrinsicopNode); + case INTRN_C_alignup: + return cgFunc.SelectCalignup(intrinsicopNode); + case INTRN_C_aligndown: + return cgFunc.SelectCaligndown(intrinsicopNode); + case INTRN_C___sync_add_and_fetch_1: + case INTRN_C___sync_add_and_fetch_2: + case INTRN_C___sync_add_and_fetch_4: + case INTRN_C___sync_add_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_add, false); + case INTRN_C___sync_sub_and_fetch_1: + case INTRN_C___sync_sub_and_fetch_2: + case INTRN_C___sync_sub_and_fetch_4: + case INTRN_C___sync_sub_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_sub, false); + case INTRN_C___sync_fetch_and_add_1: + case INTRN_C___sync_fetch_and_add_2: + case INTRN_C___sync_fetch_and_add_4: + case INTRN_C___sync_fetch_and_add_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_add, true); + case INTRN_C___sync_fetch_and_sub_1: + case INTRN_C___sync_fetch_and_sub_2: + case INTRN_C___sync_fetch_and_sub_4: + case INTRN_C___sync_fetch_and_sub_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_sub, true); + case INTRN_C___sync_bool_compare_and_swap_1: + case INTRN_C___sync_bool_compare_and_swap_2: + case INTRN_C___sync_bool_compare_and_swap_4: + case INTRN_C___sync_bool_compare_and_swap_8: + return cgFunc.SelectCSyncBoolCmpSwap(intrinsicopNode); + case INTRN_C___sync_val_compare_and_swap_1: + case INTRN_C___sync_val_compare_and_swap_2: + case INTRN_C___sync_val_compare_and_swap_4: + case INTRN_C___sync_val_compare_and_swap_8: + return cgFunc.SelectCSyncValCmpSwap(intrinsicopNode); + case INTRN_C___sync_lock_test_and_set_1: + return cgFunc.SelectCSyncLockTestSet(intrinsicopNode, PTY_i8); + case INTRN_C___sync_lock_test_and_set_2: + return cgFunc.SelectCSyncLockTestSet(intrinsicopNode, PTY_i16); + case INTRN_C___sync_lock_test_and_set_4: + return cgFunc.SelectCSyncLockTestSet(intrinsicopNode, PTY_i32); + case INTRN_C___sync_lock_test_and_set_8: + return cgFunc.SelectCSyncLockTestSet(intrinsicopNode, PTY_i64); + case INTRN_C___sync_fetch_and_and_1: + case INTRN_C___sync_fetch_and_and_2: + case INTRN_C___sync_fetch_and_and_4: + case INTRN_C___sync_fetch_and_and_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_band, true); + case INTRN_C___sync_and_and_fetch_1: + case INTRN_C___sync_and_and_fetch_2: + case INTRN_C___sync_and_and_fetch_4: + case INTRN_C___sync_and_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_band, false); + case INTRN_C___sync_fetch_and_or_1: + case INTRN_C___sync_fetch_and_or_2: + case INTRN_C___sync_fetch_and_or_4: + case INTRN_C___sync_fetch_and_or_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_bior, true); + case INTRN_C___sync_or_and_fetch_1: + case INTRN_C___sync_or_and_fetch_2: + case INTRN_C___sync_or_and_fetch_4: + case INTRN_C___sync_or_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_bior, false); + case INTRN_C___sync_fetch_and_xor_1: + case INTRN_C___sync_fetch_and_xor_2: + case INTRN_C___sync_fetch_and_xor_4: + case INTRN_C___sync_fetch_and_xor_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_bxor, true); + case INTRN_C___sync_xor_and_fetch_1: + case INTRN_C___sync_xor_and_fetch_2: + case INTRN_C___sync_xor_and_fetch_4: + case INTRN_C___sync_xor_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_bxor, false); + case INTRN_C___sync_synchronize: + return cgFunc.SelectCSyncSynchronize(intrinsicopNode); + case INTRN_C___atomic_load_n: + return cgFunc.SelectCAtomicLoadN(intrinsicopNode); + case INTRN_C___atomic_exchange_n: + return cgFunc.SelectCAtomicExchangeN(intrinsicopNode); + case INTRN_C__builtin_return_address: + case INTRN_C__builtin_extract_return_addr: + return cgFunc.SelectCReturnAddress(intrinsicopNode); + + case INTRN_vector_abs_v8i8: + case INTRN_vector_abs_v4i16: + case INTRN_vector_abs_v2i32: + case INTRN_vector_abs_v1i64: + case INTRN_vector_abs_v16i8: + case INTRN_vector_abs_v8i16: + case INTRN_vector_abs_v4i32: + case INTRN_vector_abs_v2i64: + return HandleAbs(parent, intrinsicopNode, cgFunc); + + case INTRN_vector_addl_low_v8i8: + case INTRN_vector_addl_low_v8u8: + case INTRN_vector_addl_low_v4i16: + case INTRN_vector_addl_low_v4u16: + case INTRN_vector_addl_low_v2i32: + case INTRN_vector_addl_low_v2u32: + return HandleVectorAddLong(intrinsicopNode, cgFunc, true); + + case INTRN_vector_addl_high_v8i8: + case INTRN_vector_addl_high_v8u8: + case INTRN_vector_addl_high_v4i16: + case INTRN_vector_addl_high_v4u16: + case INTRN_vector_addl_high_v2i32: + case INTRN_vector_addl_high_v2u32: + return HandleVectorAddLong(intrinsicopNode, cgFunc, false); + + case INTRN_vector_addw_low_v8i8: + case INTRN_vector_addw_low_v8u8: + case INTRN_vector_addw_low_v4i16: + case INTRN_vector_addw_low_v4u16: + case INTRN_vector_addw_low_v2i32: + case INTRN_vector_addw_low_v2u32: + return HandleVectorAddWiden(intrinsicopNode, cgFunc, true); + + case INTRN_vector_addw_high_v8i8: + case INTRN_vector_addw_high_v8u8: + case INTRN_vector_addw_high_v4i16: + case INTRN_vector_addw_high_v4u16: + case INTRN_vector_addw_high_v2i32: + case INTRN_vector_addw_high_v2u32: + return HandleVectorAddWiden(intrinsicopNode, cgFunc, false); + + case INTRN_vector_sum_v8u8: + case INTRN_vector_sum_v8i8: + case INTRN_vector_sum_v4u16: + case INTRN_vector_sum_v4i16: + case INTRN_vector_sum_v2u32: + case INTRN_vector_sum_v2i32: + case INTRN_vector_sum_v16u8: + case INTRN_vector_sum_v16i8: + case INTRN_vector_sum_v8u16: + case INTRN_vector_sum_v8i16: + case INTRN_vector_sum_v4u32: + case INTRN_vector_sum_v4i32: + case INTRN_vector_sum_v2u64: + case INTRN_vector_sum_v2i64: + return HandleVectorSum(intrinsicopNode, cgFunc); + + case INTRN_vector_from_scalar_v8u8: + case INTRN_vector_from_scalar_v8i8: + case INTRN_vector_from_scalar_v4u16: + case INTRN_vector_from_scalar_v4i16: + case INTRN_vector_from_scalar_v2u32: + case INTRN_vector_from_scalar_v2i32: + case INTRN_vector_from_scalar_v1u64: + case INTRN_vector_from_scalar_v1i64: + case INTRN_vector_from_scalar_v16u8: + case INTRN_vector_from_scalar_v16i8: + case INTRN_vector_from_scalar_v8u16: + case INTRN_vector_from_scalar_v8i16: + case INTRN_vector_from_scalar_v4u32: + case INTRN_vector_from_scalar_v4i32: + case INTRN_vector_from_scalar_v2u64: + case INTRN_vector_from_scalar_v2i64: + return HandleVectorFromScalar(intrinsicopNode, cgFunc); + + case INTRN_vector_labssub_low_v8u8: + case INTRN_vector_labssub_low_v8i8: + case INTRN_vector_labssub_low_v4u16: + case INTRN_vector_labssub_low_v4i16: + case INTRN_vector_labssub_low_v2u32: + case INTRN_vector_labssub_low_v2i32: + return HandleVectorAbsSubL(intrinsicopNode, cgFunc, true); + + case INTRN_vector_labssub_high_v8u8: + case INTRN_vector_labssub_high_v8i8: + case INTRN_vector_labssub_high_v4u16: + case INTRN_vector_labssub_high_v4i16: + case INTRN_vector_labssub_high_v2u32: + case INTRN_vector_labssub_high_v2i32: + return HandleVectorAbsSubL(intrinsicopNode, cgFunc, false); + + case INTRN_vector_merge_v8u8: + case INTRN_vector_merge_v8i8: + case INTRN_vector_merge_v4u16: + case INTRN_vector_merge_v4i16: + case INTRN_vector_merge_v2u32: + case INTRN_vector_merge_v2i32: + case INTRN_vector_merge_v1u64: + case INTRN_vector_merge_v1i64: + case INTRN_vector_merge_v16u8: + case INTRN_vector_merge_v16i8: + case INTRN_vector_merge_v8u16: + case INTRN_vector_merge_v8i16: + case INTRN_vector_merge_v4u32: + case INTRN_vector_merge_v4i32: + case INTRN_vector_merge_v2u64: + case INTRN_vector_merge_v2i64: + return HandleVectorMerge(intrinsicopNode, cgFunc); + + case INTRN_vector_set_element_v8u8: + case INTRN_vector_set_element_v8i8: + case INTRN_vector_set_element_v4u16: + case INTRN_vector_set_element_v4i16: + case INTRN_vector_set_element_v2u32: + case INTRN_vector_set_element_v2i32: + case INTRN_vector_set_element_v1u64: + case INTRN_vector_set_element_v1i64: + case INTRN_vector_set_element_v16u8: + case INTRN_vector_set_element_v16i8: + case INTRN_vector_set_element_v8u16: + case INTRN_vector_set_element_v8i16: + case INTRN_vector_set_element_v4u32: + case INTRN_vector_set_element_v4i32: + case INTRN_vector_set_element_v2u64: + case INTRN_vector_set_element_v2i64: + return HandleVectorSetElement(intrinsicopNode, cgFunc); + + case INTRN_vector_get_high_v16u8: + case INTRN_vector_get_high_v16i8: + case INTRN_vector_get_high_v8u16: + case INTRN_vector_get_high_v8i16: + case INTRN_vector_get_high_v4u32: + case INTRN_vector_get_high_v4i32: + case INTRN_vector_get_high_v2u64: + case INTRN_vector_get_high_v2i64: + return HandleVectorGetHigh(intrinsicopNode, cgFunc); + + case INTRN_vector_get_low_v16u8: + case INTRN_vector_get_low_v16i8: + case INTRN_vector_get_low_v8u16: + case INTRN_vector_get_low_v8i16: + case INTRN_vector_get_low_v4u32: + case INTRN_vector_get_low_v4i32: + case INTRN_vector_get_low_v2u64: + case INTRN_vector_get_low_v2i64: + return HandleVectorGetLow(intrinsicopNode, cgFunc); + + case INTRN_vector_get_element_v8u8: + case INTRN_vector_get_element_v8i8: + case INTRN_vector_get_element_v4u16: + case INTRN_vector_get_element_v4i16: + case INTRN_vector_get_element_v2u32: + case INTRN_vector_get_element_v2i32: + case INTRN_vector_get_element_v1u64: + case INTRN_vector_get_element_v1i64: + case INTRN_vector_get_element_v16u8: + case INTRN_vector_get_element_v16i8: + case INTRN_vector_get_element_v8u16: + case INTRN_vector_get_element_v8i16: + case INTRN_vector_get_element_v4u32: + case INTRN_vector_get_element_v4i32: + case INTRN_vector_get_element_v2u64: + case INTRN_vector_get_element_v2i64: + return HandleVectorGetElement(intrinsicopNode, cgFunc); + + case INTRN_vector_pairwise_adalp_v8i8: + case INTRN_vector_pairwise_adalp_v4i16: + case INTRN_vector_pairwise_adalp_v2i32: + case INTRN_vector_pairwise_adalp_v8u8: + case INTRN_vector_pairwise_adalp_v4u16: + case INTRN_vector_pairwise_adalp_v2u32: + case INTRN_vector_pairwise_adalp_v16i8: + case INTRN_vector_pairwise_adalp_v8i16: + case INTRN_vector_pairwise_adalp_v4i32: + case INTRN_vector_pairwise_adalp_v16u8: + case INTRN_vector_pairwise_adalp_v8u16: + case INTRN_vector_pairwise_adalp_v4u32: + return HandleVectorPairwiseAdalp(intrinsicopNode, cgFunc); + + case INTRN_vector_pairwise_add_v8u8: + case INTRN_vector_pairwise_add_v8i8: + case INTRN_vector_pairwise_add_v4u16: + case INTRN_vector_pairwise_add_v4i16: + case INTRN_vector_pairwise_add_v2u32: + case INTRN_vector_pairwise_add_v2i32: + case INTRN_vector_pairwise_add_v16u8: + case INTRN_vector_pairwise_add_v16i8: + case INTRN_vector_pairwise_add_v8u16: + case INTRN_vector_pairwise_add_v8i16: + case INTRN_vector_pairwise_add_v4u32: + case INTRN_vector_pairwise_add_v4i32: + return HandleVectorPairwiseAdd(intrinsicopNode, cgFunc); + + case INTRN_vector_madd_v8u8: + case INTRN_vector_madd_v8i8: + case INTRN_vector_madd_v4u16: + case INTRN_vector_madd_v4i16: + case INTRN_vector_madd_v2u32: + case INTRN_vector_madd_v2i32: + return HandleVectorMadd(intrinsicopNode, cgFunc); + + case INTRN_vector_mull_low_v8u8: + case INTRN_vector_mull_low_v8i8: + case INTRN_vector_mull_low_v4u16: + case INTRN_vector_mull_low_v4i16: + case INTRN_vector_mull_low_v2u32: + case INTRN_vector_mull_low_v2i32: + return HandleVectorMull(intrinsicopNode, cgFunc, true); + + case INTRN_vector_mull_high_v8u8: + case INTRN_vector_mull_high_v8i8: + case INTRN_vector_mull_high_v4u16: + case INTRN_vector_mull_high_v4i16: + case INTRN_vector_mull_high_v2u32: + case INTRN_vector_mull_high_v2i32: + return HandleVectorMull(intrinsicopNode, cgFunc, false); + + case INTRN_vector_narrow_low_v8u16: + case INTRN_vector_narrow_low_v8i16: + case INTRN_vector_narrow_low_v4u32: + case INTRN_vector_narrow_low_v4i32: + case INTRN_vector_narrow_low_v2u64: + case INTRN_vector_narrow_low_v2i64: + return HandleVectorNarrow(intrinsicopNode, cgFunc, true); + + case INTRN_vector_narrow_high_v8u16: + case INTRN_vector_narrow_high_v8i16: + case INTRN_vector_narrow_high_v4u32: + case INTRN_vector_narrow_high_v4i32: + case INTRN_vector_narrow_high_v2u64: + case INTRN_vector_narrow_high_v2i64: + return HandleVectorNarrow(intrinsicopNode, cgFunc, false); + + case INTRN_vector_reverse_v8u8: + case INTRN_vector_reverse_v8i8: + case INTRN_vector_reverse_v4u16: + case INTRN_vector_reverse_v4i16: + case INTRN_vector_reverse_v16u8: + case INTRN_vector_reverse_v16i8: + case INTRN_vector_reverse_v8u16: + case INTRN_vector_reverse_v8i16: + return HandleVectorReverse(intrinsicopNode, cgFunc, k32BitSize); + + case INTRN_vector_reverse16_v16u8: + case INTRN_vector_reverse16_v16i8: + case INTRN_vector_reverse16_v8u8: + case INTRN_vector_reverse16_v8i8: + return HandleVectorReverse(intrinsicopNode, cgFunc, k16BitSize); + + case INTRN_vector_reverse64_v16u8: + case INTRN_vector_reverse64_v16i8: + case INTRN_vector_reverse64_v8u8: + case INTRN_vector_reverse64_v8i8: + case INTRN_vector_reverse64_v8u16: + case INTRN_vector_reverse64_v8i16: + case INTRN_vector_reverse64_v4u16: + case INTRN_vector_reverse64_v4i16: + case INTRN_vector_reverse64_v4u32: + case INTRN_vector_reverse64_v4i32: + case INTRN_vector_reverse64_v2u32: + case INTRN_vector_reverse64_v2i32: + return HandleVectorReverse(intrinsicopNode, cgFunc, k64BitSize); + + case INTRN_vector_shr_narrow_low_v8u16: + case INTRN_vector_shr_narrow_low_v8i16: + case INTRN_vector_shr_narrow_low_v4u32: + case INTRN_vector_shr_narrow_low_v4i32: + case INTRN_vector_shr_narrow_low_v2u64: + case INTRN_vector_shr_narrow_low_v2i64: + return HandleVectorShiftNarrow(intrinsicopNode, cgFunc, true); + + case INTRN_vector_subl_low_v8i8: + case INTRN_vector_subl_low_v8u8: + case INTRN_vector_subl_low_v4i16: + case INTRN_vector_subl_low_v4u16: + case INTRN_vector_subl_low_v2i32: + case INTRN_vector_subl_low_v2u32: + return HandleVectorSubWiden(intrinsicopNode, cgFunc, true, false); + + case INTRN_vector_subl_high_v8i8: + case INTRN_vector_subl_high_v8u8: + case INTRN_vector_subl_high_v4i16: + case INTRN_vector_subl_high_v4u16: + case INTRN_vector_subl_high_v2i32: + case INTRN_vector_subl_high_v2u32: + return HandleVectorSubWiden(intrinsicopNode, cgFunc, false, false); + + case INTRN_vector_subw_low_v8i8: + case INTRN_vector_subw_low_v8u8: + case INTRN_vector_subw_low_v4i16: + case INTRN_vector_subw_low_v4u16: + case INTRN_vector_subw_low_v2i32: + case INTRN_vector_subw_low_v2u32: + return HandleVectorSubWiden(intrinsicopNode, cgFunc, true, true); + + case INTRN_vector_subw_high_v8i8: + case INTRN_vector_subw_high_v8u8: + case INTRN_vector_subw_high_v4i16: + case INTRN_vector_subw_high_v4u16: + case INTRN_vector_subw_high_v2i32: + case INTRN_vector_subw_high_v2u32: + return HandleVectorSubWiden(intrinsicopNode, cgFunc, false, true); + + case INTRN_vector_table_lookup_v8u8: + case INTRN_vector_table_lookup_v8i8: + case INTRN_vector_table_lookup_v16u8: + case INTRN_vector_table_lookup_v16i8: + return HandleVectorTableLookup(intrinsicopNode, cgFunc); + + case INTRN_vector_widen_low_v8u8: + case INTRN_vector_widen_low_v8i8: + case INTRN_vector_widen_low_v4u16: + case INTRN_vector_widen_low_v4i16: + case INTRN_vector_widen_low_v2u32: + case INTRN_vector_widen_low_v2i32: + return HandleVectorWiden(intrinsicopNode, cgFunc, true); + + case INTRN_vector_widen_high_v8u8: + case INTRN_vector_widen_high_v8i8: + case INTRN_vector_widen_high_v4u16: + case INTRN_vector_widen_high_v4i16: + case INTRN_vector_widen_high_v2u32: + case INTRN_vector_widen_high_v2i32: + return HandleVectorWiden(intrinsicopNode, cgFunc, false); + + default: + DEBUG_ASSERT(false, "Should not reach here."); + return nullptr; + } +} + +using HandleExprFactory = FunctionFactory; +void InitHandleExprFactory() +{ + RegisterFactoryFunction(OP_dread, HandleDread); + RegisterFactoryFunction(OP_regread, HandleRegread); + RegisterFactoryFunction(OP_constval, HandleConstVal); + RegisterFactoryFunction(OP_conststr, HandleConstStr); + RegisterFactoryFunction(OP_conststr16, HandleConstStr16); + RegisterFactoryFunction(OP_add, HandleAdd); + RegisterFactoryFunction(OP_CG_array_elem_add, HandleCGArrayElemAdd); + RegisterFactoryFunction(OP_ashr, HandleShift); + RegisterFactoryFunction(OP_lshr, HandleShift); + RegisterFactoryFunction(OP_shl, HandleShift); + RegisterFactoryFunction(OP_ror, HandleRor); + RegisterFactoryFunction(OP_mul, HandleMpy); + RegisterFactoryFunction(OP_div, HandleDiv); + RegisterFactoryFunction(OP_rem, HandleRem); + RegisterFactoryFunction(OP_addrof, HandleAddrof); + RegisterFactoryFunction(OP_addrofoff, HandleAddrofoff); + RegisterFactoryFunction(OP_addroffunc, HandleAddroffunc); + RegisterFactoryFunction(OP_addroflabel, HandleAddrofLabel); + RegisterFactoryFunction(OP_iread, HandleIread); + RegisterFactoryFunction(OP_ireadoff, HandleIreadoff); + RegisterFactoryFunction(OP_ireadfpoff, HandleIreadfpoff); + RegisterFactoryFunction(OP_sub, HandleSub); + RegisterFactoryFunction(OP_band, HandleBand); + RegisterFactoryFunction(OP_bior, HandleBior); + RegisterFactoryFunction(OP_bxor, HandleBxor); + RegisterFactoryFunction(OP_abs, HandleAbs); + RegisterFactoryFunction(OP_bnot, HandleBnot); + RegisterFactoryFunction(OP_sext, HandleExtractBits); + RegisterFactoryFunction(OP_zext, HandleExtractBits); + RegisterFactoryFunction(OP_extractbits, HandleExtractBits); + RegisterFactoryFunction(OP_depositbits, HandleDepositBits); + RegisterFactoryFunction(OP_lnot, HandleLnot); + RegisterFactoryFunction(OP_land, HandleLand); + RegisterFactoryFunction(OP_lior, HandleLor); + RegisterFactoryFunction(OP_min, HandleMin); + RegisterFactoryFunction(OP_max, HandleMax); + RegisterFactoryFunction(OP_neg, HandleNeg); + RegisterFactoryFunction(OP_recip, HandleRecip); + RegisterFactoryFunction(OP_sqrt, HandleSqrt); + RegisterFactoryFunction(OP_ceil, HandleCeil); + RegisterFactoryFunction(OP_floor, HandleFloor); + RegisterFactoryFunction(OP_retype, HandleRetype); + RegisterFactoryFunction(OP_cvt, HandleCvt); + RegisterFactoryFunction(OP_round, HandleRound); + RegisterFactoryFunction(OP_trunc, HandleTrunc); + RegisterFactoryFunction(OP_select, HandleSelect); + RegisterFactoryFunction(OP_le, HandleCmp); + RegisterFactoryFunction(OP_ge, HandleCmp); + RegisterFactoryFunction(OP_gt, HandleCmp); + RegisterFactoryFunction(OP_lt, HandleCmp); + RegisterFactoryFunction(OP_ne, HandleCmp); + RegisterFactoryFunction(OP_eq, HandleCmp); + RegisterFactoryFunction(OP_cmp, HandleCmp); + RegisterFactoryFunction(OP_cmpl, HandleCmp); + RegisterFactoryFunction(OP_cmpg, HandleCmp); + RegisterFactoryFunction(OP_alloca, HandleAlloca); + RegisterFactoryFunction(OP_malloc, HandleMalloc); + RegisterFactoryFunction(OP_gcmalloc, HandleGCMalloc); + RegisterFactoryFunction(OP_gcpermalloc, HandleGCMalloc); + RegisterFactoryFunction(OP_gcmallocjarray, HandleJarrayMalloc); + RegisterFactoryFunction(OP_gcpermallocjarray, HandleJarrayMalloc); + RegisterFactoryFunction(OP_intrinsicop, HandleIntrinOp); +} + +void HandleLabel(StmtNode &stmt, CGFunc &cgFunc) +{ + DEBUG_ASSERT(stmt.GetOpCode() == OP_label, "error"); + auto &label = static_cast(stmt); + BB *newBB = cgFunc.StartNewBBImpl(false, label); + newBB->AddLabel(label.GetLabelIdx()); + if (newBB->GetId() == 1) { + newBB->SetFrequency(kFreqBase); + } + cgFunc.SetLab2BBMap(newBB->GetLabIdx(), *newBB); + cgFunc.SetCurBB(*newBB); +} + +void HandleGoto(StmtNode &stmt, CGFunc &cgFunc) +{ + cgFunc.UpdateFrequency(stmt); + auto &gotoNode = static_cast(stmt); + cgFunc.SetCurBBKind(BB::kBBGoto); + cgFunc.SelectGoto(gotoNode); + cgFunc.SetCurBB(*cgFunc.StartNewBB(gotoNode)); + DEBUG_ASSERT(&stmt == &gotoNode, "stmt must be same as gotoNoe"); + + if ((gotoNode.GetNext() != nullptr) && (gotoNode.GetNext()->GetOpCode() != OP_label)) { + DEBUG_ASSERT(cgFunc.GetCurBB()->GetPrev()->GetLastStmt() == &stmt, "check the relation between BB and stmt"); + } +} + +void HandleIgoto(StmtNode &stmt, CGFunc &cgFunc) +{ + auto &igotoNode = static_cast(stmt); + Operand *targetOpnd = cgFunc.HandleExpr(stmt, *igotoNode.Opnd(0)); + cgFunc.SelectIgoto(targetOpnd); + cgFunc.SetCurBB(*cgFunc.StartNewBB(igotoNode)); +} + +void HandleCondbr(StmtNode &stmt, CGFunc &cgFunc) +{ + cgFunc.UpdateFrequency(stmt); + auto &condGotoNode = static_cast(stmt); + BaseNode *condNode = condGotoNode.Opnd(0); + DEBUG_ASSERT(condNode != nullptr, "expect first operand of cond br"); + Opcode condOp = condGotoNode.GetOpCode(); + if (condNode->GetOpCode() == OP_constval) { + auto *constValNode = static_cast(condNode); + if ((constValNode->GetConstVal()->IsZero() && (OP_brfalse == condOp)) || + (!constValNode->GetConstVal()->IsZero() && (OP_brtrue == condOp))) { + auto *gotoStmt = cgFunc.GetMemoryPool()->New(OP_goto); + gotoStmt->SetOffset(condGotoNode.GetOffset()); + HandleGoto(*gotoStmt, cgFunc); + auto *labelStmt = cgFunc.GetMemoryPool()->New(); + labelStmt->SetLabelIdx(cgFunc.CreateLabel()); + HandleLabel(*labelStmt, cgFunc); + } + return; + } + cgFunc.SetCurBBKind(BB::kBBIf); + /* if condNode is not a cmp node, cmp it with zero. */ + if (!kOpcodeInfo.IsCompare(condNode->GetOpCode())) { + Operand *opnd0 = cgFunc.HandleExpr(condGotoNode, *condNode); + PrimType primType = condNode->GetPrimType(); + Operand *zeroOpnd = nullptr; + if (IsPrimitiveInteger(primType)) { + zeroOpnd = &cgFunc.CreateImmOperand(primType, 0); + } else { + DEBUG_ASSERT(((PTY_f32 == primType) || (PTY_f64 == primType)), + "we don't support half-precision FP operands yet"); + zeroOpnd = &cgFunc.CreateImmOperand(primType, 0); + } + cgFunc.SelectCondGoto(condGotoNode, *opnd0, *zeroOpnd); + cgFunc.SetCurBB(*cgFunc.StartNewBB(condGotoNode)); + return; + } + /* + * Special case: + * bgt (cmp (op0, op1), 0) ==> + * bgt (op0, op1) + * but skip the case cmp(op0, 0) + */ + BaseNode *op0 = condNode->Opnd(0); + DEBUG_ASSERT(op0 != nullptr, "get first opnd of a condNode failed"); + BaseNode *op1 = condNode->Opnd(1); + DEBUG_ASSERT(op1 != nullptr, "get second opnd of a condNode failed"); + if ((op0->GetOpCode() == OP_cmp) && (op1->GetOpCode() == OP_constval)) { + auto *constValNode = static_cast(op1); + MIRConst *mirConst = constValNode->GetConstVal(); + auto *cmpNode = static_cast(op0); + bool skip = false; + if (cmpNode->Opnd(1)->GetOpCode() == OP_constval) { + auto *constVal = static_cast(cmpNode->Opnd(1))->GetConstVal(); + if (constVal->IsZero()) { + skip = true; + } + } + if (!skip && mirConst->IsZero()) { + cgFunc.SelectCondSpecialCase1(condGotoNode, *op0); + cgFunc.SetCurBB(*cgFunc.StartNewBB(condGotoNode)); + return; + } + } + /* + * Special case: + * brfalse(ge (cmpg (op0, op1), 0) ==> + * fcmp op1, op2 + * blo + */ + if ((condGotoNode.GetOpCode() == OP_brfalse) && (condNode->GetOpCode() == OP_ge) && (op0->GetOpCode() == OP_cmpg) && + (op1->GetOpCode() == OP_constval)) { + auto *constValNode = static_cast(op1); + MIRConst *mirConst = constValNode->GetConstVal(); + if (mirConst->IsZero()) { + cgFunc.SelectCondSpecialCase2(condGotoNode, *op0); + cgFunc.SetCurBB(*cgFunc.StartNewBB(condGotoNode)); + return; + } + } + Operand *opnd0 = cgFunc.HandleExpr(*condNode, *condNode->Opnd(0)); + Operand *opnd1 = cgFunc.HandleExpr(*condNode, *condNode->Opnd(1)); + cgFunc.SelectCondGoto(condGotoNode, *opnd0, *opnd1); + cgFunc.SetCurBB(*cgFunc.StartNewBB(condGotoNode)); +} + +void HandleReturn(StmtNode &stmt, CGFunc &cgFunc) +{ + cgFunc.UpdateFrequency(stmt); + auto &retNode = static_cast(stmt); + cgFunc.HandleRetCleanup(retNode); + DEBUG_ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); + Operand *opnd = nullptr; + if (retNode.NumOpnds() != 0) { + if (!cgFunc.GetFunction().StructReturnedInRegs()) { + opnd = cgFunc.HandleExpr(retNode, *retNode.Opnd(0)); + } else { + cgFunc.SelectReturnSendOfStructInRegs(retNode.Opnd(0)); + } + } + cgFunc.SelectReturn(opnd); + cgFunc.SetCurBBKind(BB::kBBReturn); + cgFunc.SetCurBB(*cgFunc.StartNewBB(retNode)); +} + +void HandleCall(StmtNode &stmt, CGFunc &cgFunc) +{ + cgFunc.UpdateFrequency(stmt); + auto &callNode = static_cast(stmt); + cgFunc.SelectCall(callNode); + if (cgFunc.GetCurBB()->GetKind() != BB::kBBFallthru) { + cgFunc.SetCurBB(*cgFunc.StartNewBB(callNode)); + } + + StmtNode *prevStmt = stmt.GetPrev(); + if (prevStmt == nullptr || prevStmt->GetOpCode() != OP_catch) { + return; + } + if ((stmt.GetNext() != nullptr) && (stmt.GetNext()->GetOpCode() == OP_label)) { + cgFunc.SetCurBB(*cgFunc.StartNewBBImpl(true, stmt)); + } + cgFunc.HandleCatch(); +} + +void HandleICall(StmtNode &stmt, CGFunc &cgFunc) +{ + cgFunc.UpdateFrequency(stmt); + auto &icallNode = static_cast(stmt); + cgFunc.GetCurBB()->SetHasCall(); + Operand *opnd0 = cgFunc.HandleExpr(stmt, *icallNode.GetNopndAt(0)); + cgFunc.SelectIcall(icallNode, *opnd0); + if (cgFunc.GetCurBB()->GetKind() != BB::kBBFallthru) { + cgFunc.SetCurBB(*cgFunc.StartNewBB(icallNode)); + } +} + +void HandleIntrinCall(StmtNode &stmt, CGFunc &cgFunc) +{ + auto &call = static_cast(stmt); + cgFunc.SelectIntrinCall(call); +} + +void HandleDassign(StmtNode &stmt, CGFunc &cgFunc) +{ + auto &dassignNode = static_cast(stmt); + DEBUG_ASSERT(dassignNode.GetOpCode() == OP_dassign, "expect dassign"); + BaseNode *rhs = dassignNode.GetRHS(); + DEBUG_ASSERT(rhs != nullptr, "get rhs of dassignNode failed"); + if (rhs->GetOpCode() == OP_malloc || rhs->GetOpCode() == OP_alloca) { + UnaryStmtNode &uNode = static_cast(stmt); + Operand *opnd0 = cgFunc.HandleExpr(dassignNode, *(uNode.Opnd())); + cgFunc.SelectDassign(dassignNode, *opnd0); + return; + } else if (rhs->GetPrimType() == PTY_agg) { + cgFunc.SelectAggDassign(dassignNode); + return; + } + bool isSaveRetvalToLocal = false; + if (rhs->GetOpCode() == OP_regread) { + isSaveRetvalToLocal = (static_cast(rhs)->GetRegIdx() == -kSregRetval0); + } + Operand *opnd0 = cgFunc.HandleExpr(dassignNode, *rhs); + cgFunc.SelectDassign(dassignNode, *opnd0); + if (isSaveRetvalToLocal) { + cgFunc.GetCurBB()->GetLastInsn()->MarkAsSaveRetValToLocal(); + } +} + +void HandleDassignoff(StmtNode &stmt, CGFunc &cgFunc) +{ + auto &dassignoffNode = static_cast(stmt); + BaseNode *rhs = dassignoffNode.GetRHS(); + CHECK_FATAL(rhs->GetOpCode() == OP_constval, "dassignoffNode without constval"); + Operand *opnd0 = cgFunc.HandleExpr(dassignoffNode, *rhs); + cgFunc.SelectDassignoff(dassignoffNode, *opnd0); +} + +void HandleRegassign(StmtNode &stmt, CGFunc &cgFunc) +{ + DEBUG_ASSERT(stmt.GetOpCode() == OP_regassign, "expect regAssign"); + auto ®AssignNode = static_cast(stmt); + bool isSaveRetvalToLocal = false; + BaseNode *operand = regAssignNode.Opnd(0); + DEBUG_ASSERT(operand != nullptr, "get operand of regassignNode failed"); + if (operand->GetOpCode() == OP_regread) { + isSaveRetvalToLocal = (static_cast(operand)->GetRegIdx() == -kSregRetval0); + } + Operand *opnd0 = cgFunc.HandleExpr(regAssignNode, *operand); + cgFunc.SelectRegassign(regAssignNode, *opnd0); + if (isSaveRetvalToLocal) { + cgFunc.GetCurBB()->GetLastInsn()->MarkAsSaveRetValToLocal(); + } +} + +void HandleIassign(StmtNode &stmt, CGFunc &cgFunc) +{ + DEBUG_ASSERT(stmt.GetOpCode() == OP_iassign, "expect stmt"); + auto &iassignNode = static_cast(stmt); + if ((iassignNode.GetRHS() != nullptr) && iassignNode.GetRHS()->GetPrimType() != PTY_agg) { + cgFunc.SelectIassign(iassignNode); + } else { + BaseNode *addrNode = iassignNode.Opnd(0); + if (addrNode == nullptr) { + return; + } + cgFunc.SelectAggIassign(iassignNode, *cgFunc.HandleExpr(stmt, *addrNode)); + } +} + +void HandleIassignoff(StmtNode &stmt, CGFunc &cgFunc) +{ + DEBUG_ASSERT(stmt.GetOpCode() == OP_iassignoff, "expect iassignoff"); + auto &iassignoffNode = static_cast(stmt); + cgFunc.SelectIassignoff(iassignoffNode); +} + +void HandleIassignfpoff(StmtNode &stmt, CGFunc &cgFunc) +{ + DEBUG_ASSERT(stmt.GetOpCode() == OP_iassignfpoff, "expect iassignfpoff"); + auto &iassignfpoffNode = static_cast(stmt); + cgFunc.SelectIassignfpoff(iassignfpoffNode, *cgFunc.HandleExpr(stmt, *stmt.Opnd(0))); +} + +void HandleIassignspoff(StmtNode &stmt, CGFunc &cgFunc) +{ + DEBUG_ASSERT(stmt.GetOpCode() == OP_iassignspoff, "expect iassignspoff"); + auto &baseNode = static_cast(stmt); /* same as FP */ + BaseNode *rhs = baseNode.GetRHS(); + DEBUG_ASSERT(rhs != nullptr, "get rhs of iassignspoffNode failed"); + Operand *opnd0 = cgFunc.HandleExpr(baseNode, *rhs); + cgFunc.SelectIassignspoff(baseNode.GetPrimType(), baseNode.GetOffset(), *opnd0); +} + +void HandleBlkassignoff(StmtNode &stmt, CGFunc &cgFunc) +{ + DEBUG_ASSERT(stmt.GetOpCode() == OP_blkassignoff, "expect blkassignoff"); + auto &baseNode = static_cast(stmt); + Operand *src = cgFunc.HandleExpr(baseNode, *baseNode.Opnd(1)); + cgFunc.SelectBlkassignoff(baseNode, src); +} + +void HandleEval(const StmtNode &stmt, CGFunc &cgFunc) +{ + (void)cgFunc.HandleExpr(stmt, *static_cast(stmt).Opnd(0)); +} + +void HandleRangeGoto(StmtNode &stmt, CGFunc &cgFunc) +{ + cgFunc.UpdateFrequency(stmt); + auto &rangeGotoNode = static_cast(stmt); + cgFunc.SetCurBBKind(BB::kBBRangeGoto); + cgFunc.SelectRangeGoto(rangeGotoNode, *cgFunc.HandleExpr(rangeGotoNode, *rangeGotoNode.Opnd(0))); + cgFunc.SetCurBB(*cgFunc.StartNewBB(rangeGotoNode)); +} + +void HandleMembar(StmtNode &stmt, CGFunc &cgFunc) +{ + cgFunc.SelectMembar(stmt); + if (stmt.GetOpCode() != OP_membarrelease) { + return; + } +#if TARGAARCH64 || TARGRISCV64 + if (CGOptions::UseBarriersForVolatile()) { + return; + } +#endif + StmtNode *secondStmt = stmt.GetRealNext(); + if (secondStmt == nullptr || ((secondStmt->GetOpCode() != OP_iassign) && (secondStmt->GetOpCode() != OP_dassign))) { + return; + } + StmtNode *thirdStmt = secondStmt->GetRealNext(); + if (thirdStmt == nullptr || thirdStmt->GetOpCode() != OP_membarstoreload) { + return; + } + cgFunc.SetVolStore(true); + cgFunc.SetVolReleaseInsn(cgFunc.GetCurBB()->GetLastInsn()); +} + +void HandleComment(StmtNode &stmt, CGFunc &cgFunc) +{ + if (cgFunc.GetCG()->GenerateVerboseAsm() || cgFunc.GetCG()->GenerateVerboseCG()) { + cgFunc.SelectComment(static_cast(stmt)); + } +} + +void HandleCatchOp(const StmtNode &stmt, const CGFunc &cgFunc) +{ + (void)stmt; + (void)cgFunc; + DEBUG_ASSERT(stmt.GetNext()->GetOpCode() == OP_call, "The next statement of OP_catch should be OP_call."); +} + +void HandleAssertNull(StmtNode &stmt, CGFunc &cgFunc) +{ + auto &cgAssertNode = static_cast(stmt); + cgFunc.SelectAssertNull(cgAssertNode); +} + +void HandleAbort(const StmtNode &stmt, CGFunc &cgFunc) +{ + (void)stmt; + cgFunc.SelectAbort(); +} + +void HandleAsm(StmtNode &stmt, CGFunc &cgFunc) +{ + cgFunc.SelectAsm(static_cast(stmt)); +} + +using HandleStmtFactory = FunctionFactory; +void InitHandleStmtFactory() +{ + RegisterFactoryFunction(OP_label, HandleLabel); + RegisterFactoryFunction(OP_goto, HandleGoto); + RegisterFactoryFunction(OP_igoto, HandleIgoto); + RegisterFactoryFunction(OP_brfalse, HandleCondbr); + RegisterFactoryFunction(OP_brtrue, HandleCondbr); + RegisterFactoryFunction(OP_return, HandleReturn); + RegisterFactoryFunction(OP_call, HandleCall); + RegisterFactoryFunction(OP_icall, HandleICall); + RegisterFactoryFunction(OP_icallproto, HandleICall); + RegisterFactoryFunction(OP_intrinsiccall, HandleIntrinCall); + RegisterFactoryFunction(OP_intrinsiccallassigned, HandleIntrinCall); + RegisterFactoryFunction(OP_intrinsiccallwithtype, HandleIntrinCall); + RegisterFactoryFunction(OP_intrinsiccallwithtypeassigned, HandleIntrinCall); + RegisterFactoryFunction(OP_dassign, HandleDassign); + RegisterFactoryFunction(OP_dassignoff, HandleDassignoff); + RegisterFactoryFunction(OP_regassign, HandleRegassign); + RegisterFactoryFunction(OP_iassign, HandleIassign); + RegisterFactoryFunction(OP_iassignoff, HandleIassignoff); + RegisterFactoryFunction(OP_iassignfpoff, HandleIassignfpoff); + RegisterFactoryFunction(OP_iassignspoff, HandleIassignspoff); + RegisterFactoryFunction(OP_blkassignoff, HandleBlkassignoff); + RegisterFactoryFunction(OP_eval, HandleEval); + RegisterFactoryFunction(OP_rangegoto, HandleRangeGoto); + RegisterFactoryFunction(OP_membarrelease, HandleMembar); + RegisterFactoryFunction(OP_membaracquire, HandleMembar); + RegisterFactoryFunction(OP_membarstoreload, HandleMembar); + RegisterFactoryFunction(OP_membarstorestore, HandleMembar); + RegisterFactoryFunction(OP_comment, HandleComment); + RegisterFactoryFunction(OP_catch, HandleCatchOp); + RegisterFactoryFunction(OP_abort, HandleAbort); + RegisterFactoryFunction(OP_assertnonnull, HandleAssertNull); + RegisterFactoryFunction(OP_callassertnonnull, HandleAssertNull); + RegisterFactoryFunction(OP_assignassertnonnull, HandleAssertNull); + RegisterFactoryFunction(OP_returnassertnonnull, HandleAssertNull); + RegisterFactoryFunction(OP_asm, HandleAsm); +} + +CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, MemPool &memPool, + StackMemPool &stackMp, MapleAllocator &allocator, uint32 funcId) + : vRegTable(allocator.Adapter()), + bbVec(allocator.Adapter()), + vRegOperandTable(allocator.Adapter()), + referenceVirtualRegs(allocator.Adapter()), + referenceStackSlots(allocator.Adapter()), + pregIdx2Opnd(mirFunc.GetPregTab()->Size(), nullptr, allocator.Adapter()), + pRegSpillMemOperands(allocator.Adapter()), + spillRegMemOperands(allocator.Adapter()), + reuseSpillLocMem(allocator.Adapter()), + labelMap(std::less(), allocator.Adapter()), + vregsToPregsMap(std::less(), allocator.Adapter()), + stackMapInsns(allocator.Adapter()), + hasVLAOrAlloca(mirFunc.HasVlaOrAlloca()), + dbgCallFrameLocations(allocator.Adapter()), + cg(&cg), + mirModule(mod), + memPool(&memPool), + stackMp(stackMp), + func(mirFunc), + exitBBVec(allocator.Adapter()), + extendSet(allocator.Adapter()), + lab2BBMap(allocator.Adapter()), + beCommon(beCommon), + funcScopeAllocator(&allocator), + emitStVec(allocator.Adapter()), + switchLabelCnt(allocator.Adapter()), +#if TARGARM32 + sortedBBs(allocator.Adapter()), + lrVec(allocator.Adapter()), +#endif /* TARGARM32 */ + loops(allocator.Adapter()), + lmbcParamVec(allocator.Adapter()), + shortFuncName(cg.ExtractFuncName(mirFunc.GetName()) + "." + std::to_string(funcId), &memPool) +{ + mirModule.SetCurFunction(&func); + dummyBB = CreateNewBB(); + vRegCount = firstMapleIrVRegNO + func.GetPregTab()->Size(); + firstNonPregVRegNO = vRegCount; + /* maximum register count initial be increased by 1024 */ + maxRegCount = vRegCount + 1024; + + insnBuilder = memPool.New(memPool); + opndBuilder = memPool.New(memPool, func.GetPregTab()->Size()); + + vRegTable.resize(maxRegCount); + /* func.GetPregTab()->_preg_table[0] is nullptr, so skip it */ + DEBUG_ASSERT(func.GetPregTab()->PregFromPregIdx(0) == nullptr, "PregFromPregIdx(0) must be nullptr"); + for (size_t i = 1; i < func.GetPregTab()->Size(); ++i) { + PrimType primType = func.GetPregTab()->PregFromPregIdx(i)->GetPrimType(); + uint32 byteLen = GetPrimTypeSize(primType); + if (byteLen < k4ByteSize) { + byteLen = k4ByteSize; + } + if (primType == PTY_u128 || primType == PTY_i128) { + byteLen = k8ByteSize; + } + new (&GetVirtualRegNodeFromPseudoRegIdx(i)) VirtualRegNode(GetRegTyFromPrimTy(primType), byteLen); + } + firstCGGenLabelIdx = func.GetLabelTab()->GetLabelTableSize(); + lSymSize = 0; + if (func.GetSymTab()) { + lSymSize = func.GetSymTab()->GetSymbolTableSize(); + } + callingConventionKind = CCImpl::GetCallConvKind(mirFunc); +} + +CGFunc::~CGFunc() +{ + mirModule.SetCurFunction(nullptr); +} + +Operand *CGFunc::HandleExpr(const BaseNode &parent, BaseNode &expr) +{ + auto function = CreateProductFunction(expr.GetOpCode()); + CHECK_FATAL(function != nullptr, "unsupported opCode in HandleExpr()"); + return function(parent, expr, *this); +} + +StmtNode *CGFunc::HandleFirstStmt() +{ + BlockNode *block = func.GetBody(); + + DEBUG_ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + StmtNode *stmt = block->GetFirst(); + if (stmt == nullptr) { + return nullptr; + } + bool withFreqInfo = func.HasFreqMap() && !func.GetLastFreqMap().empty(); + if (withFreqInfo) { + frequency = kFreqBase; + } + DEBUG_ASSERT(stmt->GetOpCode() == OP_label, "The first statement should be a label"); + HandleLabel(*stmt, *this); + firstBB = curBB; + stmt = stmt->GetNext(); + if (stmt == nullptr) { + return nullptr; + } + curBB = StartNewBBImpl(false, *stmt); + curBB->SetFrequency(frequency); + if (JAVALANG) { + HandleRCCall(true); + } + return stmt; +} + +bool CGFunc::CheckSkipMembarOp(const StmtNode &stmt) +{ + StmtNode *nextStmt = stmt.GetRealNext(); + if (nextStmt == nullptr) { + return false; + } + + Opcode opCode = stmt.GetOpCode(); + if (((opCode == OP_membaracquire) || (opCode == OP_membarrelease)) && (nextStmt->GetOpCode() == stmt.GetOpCode())) { + return true; + } + if ((opCode == OP_membarstorestore) && (nextStmt->GetOpCode() == OP_membarrelease)) { + return true; + } + if ((opCode == OP_membarstorestore) && func.IsConstructor() && MemBarOpt(stmt)) { + return true; + ; + } +#if TARGAARCH64 || TARGRISCV64 + if ((!CGOptions::UseBarriersForVolatile()) && (nextStmt->GetOpCode() == OP_membaracquire)) { + isVolLoad = true; + } +#endif /* TARGAARCH64 */ + return false; +} + +void CGFunc::GenerateLoc(StmtNode *stmt, unsigned &lastSrcLoc, unsigned &lastMplLoc) +{ + /* insert Insn for .loc before cg for the stmt */ + if (cg->GetCGOptions().WithLoc() && stmt->op != OP_label && stmt->op != OP_comment) { + /* if original src file location info is availiable for this stmt, + * use it and skip mpl file location info for this stmt + */ + bool hasLoc = false; + unsigned newSrcLoc = cg->GetCGOptions().WithSrc() ? stmt->GetSrcPos().LineNum() : 0; + if (newSrcLoc != 0 && newSrcLoc != lastSrcLoc) { + /* .loc for original src file */ + unsigned fileid = stmt->GetSrcPos().FileNum(); + Operand *o0 = CreateDbgImmOperand(fileid); + Operand *o1 = CreateDbgImmOperand(newSrcLoc); + Insn &loc = GetInsnBuilder()->BuildDbgInsn(mpldbg::OP_DBG_loc).AddOpndChain(*o0).AddOpndChain(*o1); + curBB->AppendInsn(loc); + lastSrcLoc = newSrcLoc; + hasLoc = true; + } + /* .loc for mpl file, skip if already has .loc from src for this stmt */ + unsigned newMplLoc = cg->GetCGOptions().WithMpl() ? stmt->GetSrcPos().MplLineNum() : 0; + if (newMplLoc != 0 && newMplLoc != lastMplLoc && !hasLoc) { + unsigned fileid = 1; + Operand *o0 = CreateDbgImmOperand(fileid); + Operand *o1 = CreateDbgImmOperand(newMplLoc); + Insn &loc = GetInsnBuilder()->BuildDbgInsn(mpldbg::OP_DBG_loc).AddOpndChain(*o0).AddOpndChain(*o1); + curBB->AppendInsn(loc); + lastMplLoc = newMplLoc; + } + } +} + +int32 CGFunc::GetFreqFromStmt(uint32 stmtId) +{ + int32 freq = GetFunction().GetFreqFromLastStmt(stmtId); + if (freq != -1) { + return freq; + } + return GetFunction().GetFreqFromFirstStmt(stmtId); +} + +LmbcFormalParamInfo *CGFunc::GetLmbcFormalParamInfo(uint32 offset) +{ + MapleVector ¶mVec = GetLmbcParamVec(); + for (auto *param : paramVec) { + uint32 paramOffset = param->GetOffset(); + uint32 paramSize = param->GetSize(); + if (paramOffset <= offset && offset < (paramOffset + paramSize)) { + return param; + } + } + return nullptr; +} + +/* + * For formals of lmbc, the formal list is deleted if there is no + * passing of aggregate by value. + */ +void CGFunc::CreateLmbcFormalParamInfo() +{ + if (GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + return; + } + PrimType primType; + uint32 offset; + uint32 typeSize; + MIRFunction &lmbcFunc = GetFunction(); + if (lmbcFunc.GetFormalCount() > 0) { + /* Whenever lmbc cannot delete call type info, the prototype is available */ + uint32 stackOffset = 0; + for (size_t idx = 0; idx < lmbcFunc.GetFormalCount(); ++idx) { + MIRSymbol *sym = lmbcFunc.GetFormal(idx); + MIRType *type; + TyIdx tyIdx; + if (sym) { + tyIdx = lmbcFunc.GetFormalDefVec()[idx].formalTyIdx; + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + } else { + FormalDef vec = + const_cast(GetBecommon().GetMIRModule().CurFunction())->GetFormalDefAt(idx); + tyIdx = vec.formalTyIdx; + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + } + primType = type->GetPrimType(); + offset = stackOffset; + typeSize = static_cast(GetBecommon().GetTypeSize(tyIdx)); + stackOffset += (typeSize + 7) & (-8); + LmbcFormalParamInfo *info = GetMemoryPool()->New(primType, offset, typeSize); + lmbcParamVec.push_back(info); + if (idx == 0 && lmbcFunc.IsFirstArgReturn()) { + info->SetIsReturn(); + } + if (type->GetKind() == kTypeStruct) { + MIRStructType *structType = static_cast(type); + info->SetType(structType); + uint32 fpSize; + uint32 numFpRegs = FloatParamRegRequired(structType, fpSize); + if (numFpRegs > 0) { + info->SetIsPureFloat(); + info->SetNumRegs(numFpRegs); + info->SetFpSize(fpSize); + } + } + } + } else { + /* No aggregate pass by value here */ + for (StmtNode *stmt = lmbcFunc.GetBody()->GetFirst(); stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt == nullptr) { + break; + } + if (stmt->GetOpCode() == OP_label) { + continue; + } + if (stmt->GetOpCode() != OP_regassign) { + break; + } + RegassignNode *regAssignNode = static_cast(stmt); + BaseNode *operand = regAssignNode->Opnd(0); + if (operand->GetOpCode() != OP_ireadfpoff) { + break; + } + IreadFPoffNode *ireadNode = static_cast(operand); + primType = ireadNode->GetPrimType(); + if (ireadNode->GetOffset() < 0) { + continue; + } + offset = static_cast(ireadNode->GetOffset()); + typeSize = GetPrimTypeSize(primType); + CHECK_FATAL((offset % k8ByteSize) == 0, ""); /* scalar only, no struct for now */ + LmbcFormalParamInfo *info = GetMemoryPool()->New(primType, offset, typeSize); + lmbcParamVec.push_back(info); + } + } + std::sort(lmbcParamVec.begin(), lmbcParamVec.end(), [](const LmbcFormalParamInfo *x, const LmbcFormalParamInfo *y) { + return x->GetOffset() < y->GetOffset(); + }); + + /* When a scalar param address is taken, its regassign is not in the 1st block */ + for (StmtNode *stmt = lmbcFunc.GetBody()->GetFirst(); stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt == nullptr) { + break; + } + if (stmt->GetOpCode() == OP_label) { + continue; + } + if (stmt->GetOpCode() != OP_regassign) { + break; + } + RegassignNode *regAssignNode = static_cast(stmt); + BaseNode *operand = regAssignNode->Opnd(0); + if (operand->GetOpCode() != OP_ireadfpoff) { + break; + } + IreadFPoffNode *ireadNode = static_cast(operand); + if (ireadNode->GetOffset() < 0) { + continue; + } + LmbcFormalParamInfo *info = GetLmbcFormalParamInfo(static_cast(ireadNode->GetOffset())); + ASSERT_NOT_NULL(info); + info->SetHasRegassign(); + } + + AssignLmbcFormalParams(); +} + +void CGFunc::GenerateInstruction() +{ + InitHandleExprFactory(); + InitHandleStmtFactory(); + StmtNode *secondStmt = HandleFirstStmt(); + + /* First Pass: Creates the doubly-linked list of BBs (next,prev) */ + volReleaseInsn = nullptr; + unsigned lastSrcLoc = 0; + unsigned lastMplLoc = 0; + std::set bbFreqSet; + for (StmtNode *stmt = secondStmt; stmt != nullptr; stmt = stmt->GetNext()) { + /* insert Insn for .loc before cg for the stmt */ + GenerateLoc(stmt, lastSrcLoc, lastMplLoc); + BB *tmpBB = curBB; + isVolLoad = false; + if (CheckSkipMembarOp(*stmt)) { + continue; + } + bool tempLoad = isVolLoad; + auto function = CreateProductFunction(stmt->GetOpCode()); + CHECK_FATAL(function != nullptr, "unsupported opCode or has been lowered before"); + function(*stmt, *this); + /* skip the membar acquire if it is just after the iread. ldr + membaraquire->ldar */ + if (tempLoad && !isVolLoad) { + stmt = stmt->GetNext(); + } + int32 freq = GetFreqFromStmt(stmt->GetStmtID()); + if (freq != -1) { + if (tmpBB != curBB) { + if (curBB->GetFirstInsn() == nullptr && curBB->GetLabIdx() == 0 && + bbFreqSet.count(tmpBB->GetId()) == 0) { + tmpBB->SetFrequency(static_cast(freq)); + bbFreqSet.insert(tmpBB->GetId()); + } else if ((curBB->GetFirstInsn() != nullptr || curBB->GetLabIdx() != 0) && + bbFreqSet.count(curBB->GetId()) == 0) { + curBB->SetFrequency(static_cast(freq)); + bbFreqSet.insert(tmpBB->GetId()); + } + } else if (bbFreqSet.count(curBB->GetId()) == 0) { + curBB->SetFrequency(static_cast(freq)); + bbFreqSet.insert(curBB->GetId()); + } + } + + /* + * skip the membarstoreload if there is the pattern for volatile write( membarrelease + store + membarstoreload + * ) membarrelease + store + membarstoreload -> stlr + */ + if (volReleaseInsn != nullptr) { + if ((stmt->GetOpCode() != OP_membarrelease) && (stmt->GetOpCode() != OP_comment)) { + if (!isVolStore) { + /* remove the generated membar release insn. */ + curBB->RemoveInsn(*volReleaseInsn); + /* skip the membarstoreload. */ + stmt = stmt->GetNext(); + } + volReleaseInsn = nullptr; + isVolStore = false; + } + } + if (curBB != tmpBB) { + lastSrcLoc = 0; + } + } + + /* Set lastbb's frequency */ + BlockNode *block = func.GetBody(); + DEBUG_ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + curBB->SetLastStmt(*block->GetLast()); + curBB->SetFrequency(frequency); + lastBB = curBB; + cleanupBB = lastBB->GetPrev(); + /* All stmts are handled */ + frequency = 0; +} + +LabelIdx CGFunc::CreateLabel() +{ + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func.GetStIdx().Idx()); + DEBUG_ASSERT(funcSt != nullptr, "Get func failed at CGFunc::CreateLabel"); + std::string funcName = funcSt->GetName(); + std::string labelStr = funcName.append(std::to_string(labelIdx++)); + return func.GetOrCreateLableIdxFromName(labelStr); +} + +MIRSymbol *CGFunc::GetRetRefSymbol(BaseNode &expr) +{ + Opcode opcode = expr.GetOpCode(); + if (opcode != OP_dread) { + return nullptr; + } + auto &retExpr = static_cast(expr); + MIRSymbol *symbol = mirModule.CurFunction()->GetLocalOrGlobalSymbol(retExpr.GetStIdx()); + DEBUG_ASSERT(symbol != nullptr, "get symbol in mirmodule failed"); + if (symbol->IsRefType()) { + MIRSymbol *sym = nullptr; + for (uint32 i = 0; i < func.GetFormalCount(); i++) { + sym = func.GetFormal(i); + if (sym == symbol) { + return nullptr; + } + } + return symbol; + } + return nullptr; +} + +void CGFunc::GenerateCfiPrologEpilog() +{ + if (GenCfi() == false) { + return; + } + Insn &ipoint = GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_startproc); + /* prolog */ + if (firstBB->GetFirstInsn() != nullptr) { + firstBB->InsertInsnBefore(*firstBB->GetFirstInsn(), ipoint); + } else { + firstBB->AppendInsn(ipoint); + } + +#if !defined(TARGARM32) + /* + * always generate ".cfi_personality 155, DW.ref.__mpl_personality_v0" for Java methods. + * we depend on this to tell whether it is a java method. + */ + if (mirModule.IsJavaModule() && func.IsJava()) { + Insn &personality = GetInsnBuilder() + ->BuildCfiInsn(cfi::OP_CFI_personality_symbol) + .AddOpndChain(CreateCfiImmOperand(EHFunc::kTypeEncoding, k8BitSize)) + .AddOpndChain(CreateCfiStrOperand("DW.ref.__mpl_personality_v0")); + firstBB->InsertInsnAfter(ipoint, personality); + } +#endif + + /* epilog */ + lastBB->AppendInsn(GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_endproc)); +} + +void CGFunc::TraverseAndClearCatchMark(BB &bb) +{ + /* has bb been visited */ + if (bb.GetInternalFlag3()) { + return; + } + bb.SetIsCatch(false); + bb.SetInternalFlag3(1); + for (auto succBB : bb.GetSuccs()) { + TraverseAndClearCatchMark(*succBB); + } +} + +/* + * Two types of successor edges, normal and eh. Any bb which is not + * reachable by a normal successor edge is considered to be in a + * catch block. + * Marking it as a catch block does not automatically make it into + * a catch block. Unreachables can be marked as such too. + */ +void CGFunc::MarkCatchBBs() +{ + /* First, suspect all bb to be in catch */ + FOR_ALL_BB(bb, this) { + bb->SetIsCatch(true); + bb->SetInternalFlag3(0); /* mark as not visited */ + } + /* Eliminate cleanup section from catch */ + FOR_ALL_BB(bb, this) { + if (bb->GetFirstStmt() == cleanupLabel) { + bb->SetIsCatch(false); + DEBUG_ASSERT(bb->GetSuccs().size() <= 1, "MarkCatchBBs incorrect cleanup label"); + BB *succ = nullptr; + if (!bb->GetSuccs().empty()) { + succ = bb->GetSuccs().front(); + } else { + continue; + } + DEBUG_ASSERT(succ != nullptr, "Get front succsBB failed"); + while (1) { + DEBUG_ASSERT(succ->GetSuccs().size() <= 1, "MarkCatchBBs incorrect cleanup label"); + succ->SetIsCatch(false); + if (!succ->GetSuccs().empty()) { + succ = succ->GetSuccs().front(); + } else { + break; + } + } + } + } + /* Unmark all normally reachable bb as NOT catch. */ + TraverseAndClearCatchMark(*firstBB); +} + +/* + * Mark CleanupEntryBB + * Note: Cleanup bbs and func body bbs are seperated, no edges between them. + * No ehSuccs or eh_prevs between cleanup bbs. + */ +void CGFunc::MarkCleanupEntryBB() +{ + BB *cleanupEntry = nullptr; + FOR_ALL_BB(bb, this) { + bb->SetIsCleanup(0); /* Use to mark cleanup bb */ + bb->SetInternalFlag3(0); /* Use to mark if visited. */ + if (bb->GetFirstStmt() == this->cleanupLabel) { + cleanupEntry = bb; + } + } + /* If a function without cleanup bb, return. */ + if (cleanupEntry == nullptr) { + return; + } + /* after merge bb, update cleanupBB. */ + if (cleanupEntry->GetSuccs().empty()) { + this->cleanupBB = cleanupEntry; + } + SetCleanupLabel(*cleanupEntry); + DEBUG_ASSERT(cleanupEntry->GetEhSuccs().empty(), "CG internal error. Cleanup bb should not have ehSuccs."); +#if DEBUG /* Please don't remove me. */ + /* Check if all of the cleanup bb is at bottom of the function. */ + bool isCleanupArea = true; + if (!mirModule.IsCModule()) { + FOR_ALL_BB_REV(bb, this) { + if (isCleanupArea) { + DEBUG_ASSERT(bb->IsCleanup(), + "CG internal error, cleanup BBs should be at the bottom of the function."); + } else { + DEBUG_ASSERT(!bb->IsCleanup(), + "CG internal error, cleanup BBs should be at the bottom of the function."); + } + + if (bb == cleanupEntry) { + isCleanupArea = false; + } + } + } +#endif /* DEBUG */ + this->cleanupEntryBB = cleanupEntry; +} + +/* Tranverse from current bb's successor and set isCleanup true. */ +void CGFunc::SetCleanupLabel(BB &cleanupEntry) +{ + /* If bb hasn't been visited, return. */ + if (cleanupEntry.GetInternalFlag3()) { + return; + } + cleanupEntry.SetInternalFlag3(1); + cleanupEntry.SetIsCleanup(1); + for (auto tmpBB : cleanupEntry.GetSuccs()) { + if (tmpBB->GetKind() != BB::kBBReturn) { + SetCleanupLabel(*tmpBB); + } else { + DEBUG_ASSERT(ExitbbNotInCleanupArea(cleanupEntry), "exitBB created in cleanupArea."); + } + } +} + +bool CGFunc::ExitbbNotInCleanupArea(const BB &bb) const +{ + for (const BB *nextBB = bb.GetNext(); nextBB != nullptr; nextBB = nextBB->GetNext()) { + if (nextBB->GetKind() == BB::kBBReturn) { + return false; + } + } + return true; +} + +/* + * Do mem barrier optimization for constructor funcs as follow: + * membarstorestore + * write field of this_ ==> write field of this_ + * membarrelease membarrelease. + */ +bool CGFunc::MemBarOpt(const StmtNode &membar) +{ + if (func.GetFormalCount() == 0) { + return false; + } + MIRSymbol *thisSym = func.GetFormal(0); + if (thisSym == nullptr) { + return false; + } + StmtNode *stmt = membar.GetNext(); + for (; stmt != nullptr; stmt = stmt->GetNext()) { + BaseNode *base = nullptr; + if (stmt->GetOpCode() == OP_comment) { + continue; + } else if (stmt->GetOpCode() == OP_iassign) { + base = static_cast(stmt)->Opnd(0); + } else if (stmt->GetOpCode() == OP_call) { + auto *callNode = static_cast(stmt); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + MIRSymbol *fsym = GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(fn->GetStIdx(), false); + DEBUG_ASSERT(fsym != nullptr, "null ptr check"); + if (fsym->GetName() == "MCC_WriteRefFieldNoDec") { + base = callNode->Opnd(0); + } + } + if (base != nullptr) { + Opcode op = base->GetOpCode(); + if (op == OP_regread && thisSym->IsPreg() && + thisSym->GetPreg()->GetPregNo() == static_cast(base)->GetRegIdx()) { + continue; + } + if ((op == OP_dread || op == OP_addrof) && !thisSym->IsPreg() && + static_cast(base)->GetStIdx() == thisSym->GetStIdx()) { + continue; + } + } + break; + } + + CHECK_NULL_FATAL(stmt); + return stmt->GetOpCode() == OP_membarrelease; +} + +void CGFunc::ProcessExitBBVec() +{ + if (exitBBVec.empty()) { + LabelIdx newLabelIdx = CreateLabel(); + BB *retBB = CreateNewBB(newLabelIdx, cleanupBB->IsUnreachable(), BB::kBBReturn, cleanupBB->GetFrequency()); + cleanupBB->PrependBB(*retBB); + exitBBVec.emplace_back(retBB); + return; + } + /* split an empty exitBB */ + BB *bb = exitBBVec[0]; + if (bb->NumInsn() > 0) { + BB *retBBPart = CreateNewBB(false, BB::kBBFallthru, bb->GetFrequency()); + DEBUG_ASSERT(retBBPart != nullptr, "retBBPart should not be nullptr"); + LabelIdx retBBPartLabelIdx = bb->GetLabIdx(); + if (retBBPartLabelIdx != MIRLabelTable::GetDummyLabel()) { + retBBPart->AddLabel(retBBPartLabelIdx); + lab2BBMap[retBBPartLabelIdx] = retBBPart; + } + Insn *insn = bb->GetFirstInsn(); + while (insn != nullptr) { + bb->RemoveInsn(*insn); + retBBPart->AppendInsn(*insn); + insn = bb->GetFirstInsn(); + } + bb->PrependBB(*retBBPart); + LabelIdx newLabelIdx = CreateLabel(); + bb->AddLabel(newLabelIdx); + lab2BBMap[newLabelIdx] = bb; + } +} + +void CGFunc::AddCommonExitBB() +{ + uint32 i = 0; + while (exitBBVec[i]->IsUnreachable() && i < exitBBVec.size()) { + i++; + } + DEBUG_ASSERT(i < exitBBVec.size(), "all exit BBs are unreachable"); + // create fake commonExitBB + commonExitBB = CreateNewBB(true, BB::kBBFallthru, 0); + DEBUG_ASSERT(commonExitBB != nullptr, "cannot create fake commonExitBB"); + for (BB *cgbb : exitBBVec) { + if (!cgbb->IsUnreachable()) { + commonExitBB->PushBackPreds(*cgbb); + } + } +} + +void CGFunc::UpdateCallBBFrequency() +{ + if (!func.HasFreqMap() || func.GetLastFreqMap().empty()) { + return; + } + FOR_ALL_BB(bb, this) { + if (bb->GetKind() != BB::kBBFallthru || !bb->HasCall()) { + continue; + } + DEBUG_ASSERT(bb->GetSuccs().size() <= 1, "fallthru BB has only one successor."); + if (!bb->GetSuccs().empty()) { + bb->SetFrequency((*(bb->GetSuccsBegin()))->GetFrequency()); + } + } +} + +void CGFunc::HandleFunction() +{ + /* select instruction */ + GenerateInstruction(); + /* merge multi return */ + if (!func.GetModule()->IsCModule() || CGOptions::DoRetMerge() || CGOptions::OptimizeForSize()) { + MergeReturn(); + } + if (func.IsJava()) { + DEBUG_ASSERT(exitBBVec.size() <= 1, "there are more than one BB_return in func"); + } + ProcessExitBBVec(); + LmbcGenSaveSpForAlloca(); + + if (func.IsJava()) { + GenerateCleanupCodeForExtEpilog(*cleanupBB); + } else if (!func.GetModule()->IsCModule()) { + GenerateCleanupCode(*cleanupBB); + } + GenSaveMethodInfoCode(*firstBB); + /* build control flow graph */ + theCFG = memPool->New(*this); + theCFG->BuildCFG(); + AddCommonExitBB(); + if (mirModule.GetSrcLang() != kSrcLangC) { + MarkCatchBBs(); + } + MarkCleanupEntryBB(); + DetermineReturnTypeofCall(); + theCFG->MarkLabelTakenBB(); + theCFG->UnreachCodeAnalysis(); + EraseUnreachableStackMapInsns(); + if (mirModule.GetSrcLang() == kSrcLangC) { + theCFG->WontExitAnalysis(); + } + if (CGOptions::IsLazyBinding() && !GetCG()->IsLibcore()) { + ProcessLazyBinding(); + } + if (GetCG()->DoPatchLongBranch()) { + PatchLongBranch(); + } + if (CGOptions::DoEnableHotColdSplit()) { + theCFG->CheckCFGFreq(); + } +} + +void CGFunc::AddDIESymbolLocation(const MIRSymbol *sym, SymbolAlloc *loc) +{ + DEBUG_ASSERT(debugInfo != nullptr, "debugInfo is null!"); + DEBUG_ASSERT(loc->GetMemSegment() != nullptr, "only support those variable that locate at stack now"); + DBGDie *sdie = debugInfo->GetLocalDie(&func, sym->GetNameStrIdx()); + if (sdie == nullptr) { + return; + } + + DBGExprLoc *exprloc = sdie->GetExprLoc(); + CHECK_FATAL(exprloc != nullptr, "exprloc is null in CGFunc::AddDIESymbolLocation"); + exprloc->SetSymLoc(loc); + + GetDbgCallFrameLocations().push_back(exprloc); +} + +void CGFunc::DumpCFG() const +{ + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func.GetStIdx().Idx()); + DEBUG_ASSERT(funcSt != nullptr, "null ptr check"); + LogInfo::MapleLogger() << "\n****** CFG built by CG for " << funcSt->GetName() << " *******\n"; + FOR_ALL_BB_CONST(bb, this) { + LogInfo::MapleLogger() << "=== BB ( " << std::hex << bb << std::dec << " ) <" << bb->GetKindName() << "> ===\n"; + LogInfo::MapleLogger() << "BB id:" << bb->GetId() << "\n"; + if (!bb->GetPreds().empty()) { + LogInfo::MapleLogger() << " pred [ "; + for (auto *pred : bb->GetPreds()) { + LogInfo::MapleLogger() << pred->GetId() << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + if (!bb->GetSuccs().empty()) { + LogInfo::MapleLogger() << " succ [ "; + for (auto *succ : bb->GetSuccs()) { + LogInfo::MapleLogger() << succ->GetId() << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + const StmtNode *stmt = bb->GetFirstStmt(); + if (stmt != nullptr) { + bool done = false; + do { + done = stmt == bb->GetLastStmt(); + stmt->Dump(1); + LogInfo::MapleLogger() << "\n"; + stmt = stmt->GetNext(); + } while (!done); + } else { + LogInfo::MapleLogger() << "\n"; + } + } +} + +void CGFunc::DumpCGIR() const +{ + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func.GetStIdx().Idx()); + DEBUG_ASSERT(funcSt != nullptr, "null ptr check"); + LogInfo::MapleLogger() << "\n****** CGIR for " << funcSt->GetName() << " *******\n"; + FOR_ALL_BB_CONST(bb, this) { + if (bb->IsUnreachable()) { + continue; + } + LogInfo::MapleLogger() << "=== BB " + << " <" << bb->GetKindName(); + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << bb->GetLabIdx(); + LogInfo::MapleLogger() << " ==> @" << func.GetLabelName(bb->GetLabIdx()) << "]"; + } + + LogInfo::MapleLogger() << "> <" << bb->GetId() << "> "; + if (bb->GetLoop()) { + LogInfo::MapleLogger() << "[Loop level " << bb->GetLoop()->GetLoopLevel(); + LogInfo::MapleLogger() << ", head BB " << bb->GetLoop()->GetHeader()->GetId() << "]"; + } + if (bb->IsCleanup()) { + LogInfo::MapleLogger() << "[is_cleanup] "; + } + if (bb->IsUnreachable()) { + LogInfo::MapleLogger() << "[unreachable] "; + } + if (bb->GetFirstStmt() == cleanupLabel) { + LogInfo::MapleLogger() << "cleanup "; + } + if (!bb->GetSuccs().empty()) { + LogInfo::MapleLogger() << "succs: "; + for (auto *succBB : bb->GetSuccs()) { + LogInfo::MapleLogger() << succBB->GetId() << " "; + } + } + if (!bb->GetPreds().empty()) { + LogInfo::MapleLogger() << "preds: "; + for (auto *predBB : bb->GetPreds()) { + LogInfo::MapleLogger() << predBB->GetId() << " "; + } + } + if (!bb->GetEhSuccs().empty()) { + LogInfo::MapleLogger() << "eh_succs: "; + for (auto *ehSuccBB : bb->GetEhSuccs()) { + LogInfo::MapleLogger() << ehSuccBB->GetId() << " "; + } + } + if (!bb->GetEhPreds().empty()) { + LogInfo::MapleLogger() << "eh_preds: "; + for (auto *ehPredBB : bb->GetEhPreds()) { + LogInfo::MapleLogger() << ehPredBB->GetId() << " "; + } + } + LogInfo::MapleLogger() << "===\n"; + LogInfo::MapleLogger() << "frequency:" << bb->GetFrequency() << "\n"; + + FOR_BB_INSNS_CONST(insn, bb) { + insn->Dump(); + } + } +} + +void CGFunc::DumpLoop() const +{ + for (const auto *lp : loops) { + lp->PrintLoops(*lp); + } +} + +void CGFunc::ClearLoopInfo() +{ + loops.clear(); + loops.shrink_to_fit(); + FOR_ALL_BB(bb, this) { + bb->ClearLoopPreds(); + bb->ClearLoopSuccs(); + } +} + +void CGFunc::DumpCFGToDot(const std::string &fileNamePrefix) +{ + std::ofstream file(fileNamePrefix + GetName()); + file << "digraph {" << std::endl; + for (auto *bb : GetAllBBs()) { + if (bb == nullptr) { + continue; + } + auto &succs = bb->GetSuccs(); + if (succs.empty()) { + continue; + } + file << " " << bb->GetId() << "->{"; + for (auto *succ : succs) { + file << succ->GetId() << " "; + } + file << "};"; + } + file << "}" << std::endl; +} + +void CGFunc::PatchLongBranch() +{ + for (BB *bb = firstBB->GetNext(); bb != nullptr; bb = bb->GetNext()) { + bb->SetInternalFlag1(bb->GetInternalFlag1() + bb->GetPrev()->GetInternalFlag1()); + } + BB *next = nullptr; + for (BB *bb = firstBB; bb != nullptr; bb = next) { + next = bb->GetNext(); + if (bb->GetKind() != BB::kBBIf && bb->GetKind() != BB::kBBGoto) { + continue; + } + Insn *insn = bb->GetLastInsn(); + while (insn->IsImmaterialInsn()) { + insn = insn->GetPrev(); + } + BB *tbb = GetBBFromLab2BBMap(GetLabelInInsn(*insn)); + if ((tbb->GetInternalFlag1() - bb->GetInternalFlag1()) < MaxCondBranchDistance()) { + continue; + } + InsertJumpPad(insn); + } +} + +void CGFunc::UpdateAllRegisterVregMapping(MapleMap &newMap) +{ + vregsToPregsMap.clear(); + for (auto it : newMap) { + vregsToPregsMap[it.first] = it.second; + } +} + +bool CgHandleFunction::PhaseRun(maplebe::CGFunc &f) +{ + f.HandleFunction(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgHandleFunction, handlefunction) + +bool CgFixCFLocOsft::PhaseRun(maplebe::CGFunc &f) +{ + if (f.GetCG()->GetCGOptions().WithDwarf()) { + f.DBGFixCallFrameLocationOffsets(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgFixCFLocOsft, dbgfixcallframeoffsets) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/dbg.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/dbg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e2fa441e43e8cc2626e89f80998b80b0f1582b7e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/dbg.cpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dbg.h" +#include "emit.h" + +namespace mpldbg { +using maplebe::CG; +using maplebe::Emitter; +using maplebe::MOperator; +using maplebe::Operand; +using maplebe::OpndDesc; + +struct DbgDescr { + const std::string name; + uint32 opndCount; + /* create 3 OperandType array to store dbg instruction's operand type */ + std::array opndTypes; +}; + +static DbgDescr dbgDescrTable[kOpDbgLast + 1] = { +#define DBG_DEFINE(k, sub, n, o0, o1, o2) {#k, n, {Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2}}, +#include "dbg.def" +#undef DBG_DEFINE + {"undef", 0, {Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef}}}; + +void DbgInsn::Dump() const +{ + MOperator mOp = GetMachineOpcode(); + DbgDescr &dbgDescr = dbgDescrTable[mOp]; + LogInfo::MapleLogger() << "DBG " << dbgDescr.name; + for (uint32 i = 0; i < dbgDescr.opndCount; ++i) { + LogInfo::MapleLogger() << (i == 0 ? " : " : " "); + Operand &curOperand = GetOperand(i); + curOperand.Dump(); + } + LogInfo::MapleLogger() << "\n"; +} + +#if DEBUG +void DbgInsn::Check() const +{ + DbgDescr &dbgDescr = dbgDescrTable[GetMachineOpcode()]; + /* dbg instruction's 3rd /4th/5th operand must be null */ + for (uint32 i = 0; i < dbgDescr.opndCount; ++i) { + Operand &opnd = GetOperand(i); + if (opnd.GetKind() != dbgDescr.opndTypes[i]) { + CHECK_FATAL(false, "incorrect operand in debug insn"); + } + } +} +#endif + +uint32 DbgInsn::GetLoc() const +{ + if (mOp != OP_DBG_loc) { + return 0; + } + return static_cast(static_cast(opnds[0])->GetVal()); +} + +void ImmOperand::Dump() const +{ + LogInfo::MapleLogger() << " " << val; +} +void DBGOpndEmitVisitor::Visit(ImmOperand *v) +{ + emitter.Emit(v->GetVal()); +} +} // namespace mpldbg diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/ebo.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/ebo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..056195e1c031ca586a1427dc61c3c6d94321ffc5 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/ebo.cpp @@ -0,0 +1,1353 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if TARGAARCH64 +#include "aarch64_ebo.h" +#elif TARGRISCV64 +#include "riscv64_ebo.h" +#endif +#if TARGARM32 +#include "arm32_ebo.h" +#endif +#include "securec.h" + +#include "optimize_common.h" + +/* + * The Optimizations include forward propagation, common expression elimination, constant folding, + * dead code elimination and some target optimizations. The main entry of the optimization is run. + * When the Optimization level is less than O2, it can only perform in single block. and in O2 it + * can perform it a sequence of blocks. + */ +namespace maplebe { +using namespace maple; + +#define EBO_DUMP CG_DEBUG_FUNC(*cgFunc) +#define EBO_DUMP_NEWPM CG_DEBUG_FUNC(f) +#define TRUE_OPND cgFunc->GetTrueOpnd() + +constexpr uint32 kEboOpndHashLength = 521; +constexpr uint32 kEboMaxBBNums = 200; + +/* Return the opndInfo for the first mem operand of insn. */ +MemOpndInfo *Ebo::GetMemInfo(InsnInfo &insnInfo) +{ + Insn *insn = insnInfo.insn; + CHECK_FATAL(insn != nullptr, "insnInfo.insn is nullptr!"); + CHECK_FATAL(insn->AccessMem(), "insn is not access memory!"); + uint32 opndNum = insn->GetOperandSize(); + if (insn->IsLoad()) { + for (uint32 i = 0; i < opndNum; ++i) { + if (insn->GetOperand(i).IsMemoryAccessOperand()) { + return static_cast(insnInfo.origOpnd[i]); + } + } + } else if (insn->IsStore()) { + int32 resId = 0; + for (uint32 i = 0; i < opndNum; ++i) { + if (insn->OpndIsDef(i)) { + if (insn->GetOperand(i).IsMemoryAccessOperand()) { + return static_cast(insnInfo.result[resId]); + } else { + resId++; + } + } + } + } + return nullptr; +} + +void Ebo::EnlargeSpaceForLA(Insn &csetInsn) +{ + CHECK_FATAL(live != nullptr, "no live info!"); + live->EnlargeSpaceForLiveAnalysis(*csetInsn.GetBB()); +} + +bool Ebo::IsFrameReg(Operand &opnd) const +{ + if (!opnd.IsRegister()) { + return false; + } + RegOperand ® = static_cast(opnd); + return cgFunc->IsFrameReg(reg); +} + +Operand *Ebo::GetZeroOpnd(uint32 size) const +{ +#if TARGAARCH64 || TARGRISCV64 + return size > k64BitSize ? nullptr : &cgFunc->GetZeroOpnd(size); +#else + return nullptr; +#endif +} + +bool Ebo::IsSaveReg(const Operand &opnd) +{ + if (!opnd.IsRegister()) { + return false; + } + const RegOperand ® = static_cast(opnd); + return cgFunc->IsSaveReg(reg, *cgFunc->GetFunction().GetReturnType(), cgFunc->GetBecommon()); +} + +bool Ebo::IsPhysicalReg(const Operand &opnd) const +{ + if (!opnd.IsRegister()) { + return false; + } + const RegOperand ® = static_cast(opnd); + return reg.IsPhysicalRegister(); +} + +bool Ebo::HasAssignedReg(const Operand &opnd) const +{ + if (!opnd.IsRegister()) { + return false; + } + const auto ® = static_cast(opnd); + return reg.IsVirtualRegister() ? (!IsInvalidReg(reg)) : true; +} + +bool Ebo::IsOfSameClass(const Operand &op0, const Operand &op1) const +{ + if (!op0.IsRegister() || !op1.IsRegister()) { + return false; + } + const auto ®0 = static_cast(op0); + const auto ®1 = static_cast(op1); + return reg0.GetRegisterType() == reg1.GetRegisterType(); +} + +/* return true if opnd of bb is available. */ +bool Ebo::OpndAvailableInBB(const BB &bb, OpndInfo *info) +{ + if (info == nullptr) { + return false; + } + if (info->opnd == nullptr) { + return false; + } + + Operand *op = info->opnd; + if (IsConstantImmOrReg(*op)) { + return true; + } + + int32 hashVal = 0; + if (op->IsRegShift() || op->IsRegister()) { + hashVal = -1; + } else { + hashVal = info->hashVal; + } + if (GetOpndInfo(*op, hashVal) != info) { + return false; + } + /* global operands aren't supported at low levels of optimization. */ + if ((Globals::GetInstance()->GetOptimLevel() < CGOptions::kLevel2) && (&bb != info->bb)) { + return false; + } + if (beforeRegAlloc && IsPhysicalReg(*op)) { + return false; + } + return true; +} + +bool Ebo::ForwardPropCheck(const Operand *opndReplace, const OpndInfo &opndInfo, const Operand &opnd, Insn &insn) +{ + if (opndReplace == nullptr) { + return false; + } + if ((opndInfo.replacementInfo != nullptr) && opndInfo.replacementInfo->redefined) { + return false; + } +#if TARGARM32 + /* for arm32, disable forwardProp in strd insn. */ + if (insn.GetMachineOpcode() == MOP_strd) { + return false; + } + if (opndInfo.mayReDef) { + return false; + } +#endif + if (!(IsConstantImmOrReg(*opndReplace) || + ((OpndAvailableInBB(*insn.GetBB(), opndInfo.replacementInfo) || RegistersIdentical(opnd, *opndReplace)) && + (HasAssignedReg(opnd) == HasAssignedReg(*opndReplace))))) { + return false; + } + /* if beforeRA, replace op should not be PhysicalRe */ + return !beforeRegAlloc || !IsPhysicalReg(*opndReplace); +} + +bool Ebo::RegForwardCheck(Insn &insn, const Operand &opnd, const Operand *opndReplace, Operand &oldOpnd, + const OpndInfo *tmpInfo) +{ + if (IsConstantImmOrReg(opnd)) { + return false; + } + if (!(!beforeRegAlloc || (HasAssignedReg(oldOpnd) == HasAssignedReg(*opndReplace)) || IsZeroRegister(opnd) || + !insn.IsMove())) { + return false; + } + std::set defRegs = insn.GetDefRegs(); + if (!(defRegs.empty() || + ((opnd.IsRegister() && !defRegs.count(static_cast(opnd).GetRegisterNumber())) || + !beforeRegAlloc))) { + return false; + } + if (!(beforeRegAlloc || !IsFrameReg(oldOpnd))) { + return false; + } + if (insn.GetBothDefUseOpnd() != kInsnMaxOpnd) { + return false; + } + if (IsPseudoRet(insn)) { + return false; + } + + return ((IsOfSameClass(oldOpnd, *opndReplace) && (oldOpnd.GetSize() <= opndReplace->GetSize())) || + ((tmpInfo != nullptr) && IsMovToSIMDVmov(insn, *tmpInfo->insn))); +} + +/* For Memory Operand, its info was stored in a hash table, this function is to compute its hash value. */ +int32 Ebo::ComputeOpndHash(const Operand &opnd) const +{ + uint64 hashIdx = reinterpret_cast(&opnd) >> k4ByteSize; + return static_cast(hashIdx % kEboOpndHashLength); +} + +/* Store the operand information. Store it to the vRegInfo if is register. otherwise put it to the hash table. */ +void Ebo::SetOpndInfo(const Operand &opnd, OpndInfo *opndInfo, int32 hashVal) +{ + /* opnd is Register or RegShift */ + if (hashVal == -1) { + const RegOperand ® = GetRegOperand(opnd); + vRegInfo[reg.GetRegisterNumber()] = opndInfo; + return; + } + + CHECK_FATAL(static_cast(static_cast(hashVal)) < exprInfoTable.size(), + "SetOpndInfo hashval outof range!"); + opndInfo->hashVal = hashVal; + opndInfo->hashNext = exprInfoTable.at(hashVal); + exprInfoTable.at(hashVal) = opndInfo; +} + +/* Used to change the info of opnd from opndinfo to newinfo. */ +void Ebo::UpdateOpndInfo(const Operand &opnd, OpndInfo &opndInfo, OpndInfo *newInfo, int32 hashVal) +{ + if (hashVal == -1) { + const RegOperand ® = GetRegOperand(opnd); + vRegInfo[reg.GetRegisterNumber()] = newInfo; + return; + } + DEBUG_ASSERT(hashVal < exprInfoTable.size(), "SetOpndInfo hashval outof range!"); + OpndInfo *info = exprInfoTable.at(hashVal); + if (newInfo != nullptr) { + newInfo->hashNext = opndInfo.hashNext; + opndInfo.hashNext = nullptr; + if (info == &opndInfo) { + exprInfoTable.at(hashVal) = newInfo; + return; + } + while (info != nullptr) { + if (info->hashNext == &opndInfo) { + info->hashNext = newInfo; + return; + } + info = info->hashNext; + } + return; + } + if (info == &opndInfo) { + exprInfoTable.at(hashVal) = opndInfo.hashNext; + return; + } + while (info != nullptr) { + if (info->hashNext == &opndInfo) { + info->hashNext = opndInfo.next; + opndInfo.hashNext = nullptr; + return; + } + info = info->hashNext; + } +} + +/* return true if op1 op2 is equal */ +bool Ebo::OperandEqual(const Operand &op1, const Operand &op2) const +{ + if (&op1 == &op2) { + return true; + } + if (op1.GetKind() != op2.GetKind()) { + return false; + } + return OperandEqSpecial(op1, op2); +} + +OpndInfo *Ebo::GetOpndInfo(const Operand &opnd, int32 hashVal) const +{ + if (hashVal < 0) { + const RegOperand ® = GetRegOperand(opnd); + auto it = vRegInfo.find(reg.GetRegisterNumber()); + return it != vRegInfo.end() ? it->second : nullptr; + } + /* do not find prev memOpend */ + if (opnd.IsMemoryAccessOperand()) { + return nullptr; + } + DEBUG_ASSERT(hashVal < exprInfoTable.size(), "SetOpndInfo hashval outof range!"); + OpndInfo *info = exprInfoTable.at(hashVal); + while (info != nullptr) { + if (&opnd == info->opnd) { + return info; + } + info = info->hashNext; + } + return nullptr; +} + +/* Create a opndInfo for opnd. */ +OpndInfo *Ebo::GetNewOpndInfo(BB &bb, Insn *insn, Operand &opnd, int32 hashVal) +{ + OpndInfo *opndInfo = nullptr; + if (opnd.IsMemoryAccessOperand()) { + opndInfo = eboMp->New(opnd); + } else { + opndInfo = eboMp->New(opnd); + } + /* Initialize the entry. */ + opndInfo->hashVal = hashVal; + opndInfo->opnd = &opnd; + opndInfo->bb = &bb; + opndInfo->insn = insn; + opndInfo->prev = lastOpndInfo; + if (firstOpndInfo == nullptr) { + firstOpndInfo = opndInfo; + } else { + lastOpndInfo->next = opndInfo; + } + lastOpndInfo = opndInfo; + return opndInfo; +} + +/* Update the use infomation for localOpnd because of its use insn currentInsn. */ +OpndInfo *Ebo::OperandInfoUse(BB ¤tBB, Operand &localOpnd) +{ + if (!(localOpnd.IsRegister() || localOpnd.IsRegShift()) && !localOpnd.IsMemoryAccessOperand()) { + return nullptr; + } + int hashVal = 0; + /* only arm32 has regShift */ + if (localOpnd.IsRegister() || localOpnd.IsRegShift()) { + hashVal = -1; + } else { + hashVal = ComputeOpndHash(localOpnd); + } + OpndInfo *opndInfo = GetOpndInfo(localOpnd, hashVal); + + if (opndInfo == nullptr) { + opndInfo = GetNewOpndInfo(currentBB, nullptr, localOpnd, hashVal); + SetOpndInfo(localOpnd, opndInfo, hashVal); + } + IncRef(*opndInfo); + return opndInfo; +} + +/* return true if op0 is identical with op1 */ +bool Ebo::RegistersIdentical(const Operand &op0, const Operand &op1) const +{ + if (&op0 == &op1) { + return true; + } + if (!(op0.IsRegister() && op1.IsRegister())) { + return false; + } + const RegOperand ®0 = static_cast(op0); + const RegOperand ®1 = static_cast(op1); + return ((reg0.IsPhysicalRegister() || !IsInvalidReg(reg0)) && (reg1.IsPhysicalRegister() || !IsInvalidReg(reg1)) && + (reg0.GetRegisterType() == reg1.GetRegisterType()) && + (reg0.GetRegisterNumber() == reg1.GetRegisterNumber())); +} + +InsnInfo *Ebo::GetNewInsnInfo(Insn &insn) +{ + InsnInfo *insnInfo = eboMp->New(*eboMp, insn); + insnInfo->prev = lastInsnInfo; + if (firstInsnInfo == nullptr) { + firstInsnInfo = insnInfo; + } else { + lastInsnInfo->next = insnInfo; + } + lastInsnInfo = insnInfo; + insnInfo->next = nullptr; + return insnInfo; +} + +uint32 Ebo::ComputeHashVal(Insn &insn, const MapleVector &opndInfos) const +{ + uint32 hashVal = 0; + if (insn.AccessMem()) { + hashVal = kEboDefaultMemHash; + if (insn.NoAlias()) { + hashVal = kEboNoAliasMemHash; + } + MemOperand *memOpnd = static_cast(insn.GetMemOpnd()); + if (memOpnd != nullptr) { + Operand *baseReg = memOpnd->GetBaseRegister(); + if ((baseReg != nullptr) && IsFrameReg(*baseReg)) { + hashVal = kEboSpillMemHash; + } + } + } else if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(insn)) { + hashVal = kEboCopyInsnHash; + } else { + uint32 opndNum = insn.GetOperandSize(); + hashVal = insn.GetMachineOpcode(); + for (uint32 i = 0; i < opndNum; ++i) { + hashVal += static_cast(reinterpret_cast(opndInfos.at(i))); + } + hashVal = static_cast(kEboReservedInsnHash + EBO_EXP_INSN_HASH(hashVal)); + } + return hashVal; +} + +/* computeHashVal of insn */ +void Ebo::HashInsn(Insn &insn, const MapleVector &origInfo, const MapleVector &opndInfos) +{ + uint32 hashVal = ComputeHashVal(insn, opndInfos); + /* Create a new insnInfo entry and add the new insn to the hash table. */ + InsnInfo *insnInfo = GetNewInsnInfo(insn); + insnInfo->bb = insn.GetBB(); + insnInfo->insn = &insn; + insnInfo->hashIndex = hashVal; + insnInfo->same = insnInfoTable.at(hashVal); + + if (!beforeRegAlloc) { + if ((insn.IsCall() || insn.IsTailCall() || insn.IsAsmInsn()) && !insn.GetIsThrow()) { + DefineCallerSaveRegisters(*insnInfo); + } else if (IsClinitCheck(insn)) { + DefineClinitSpecialRegisters(*insnInfo); + } + } + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + /* Copy all the opndInfo entries for the operands. */ + insnInfo->origOpnd.emplace_back(origInfo.at(i)); + insnInfo->optimalOpnd.emplace_back(opndInfos.at(i)); + /* Keep the result info. */ + if (!insn.OpndIsDef(i)) { + continue; + } + auto genOpndInfoDef = [this, insnInfo](Operand &op) { + OpndInfo *opndInfo = nullptr; + if ((&op != TRUE_OPND) && + ((op.IsRegister() && (&op) != GetZeroOpnd(op.GetSize())) || + (op.IsMemoryAccessOperand() && (static_cast(op)).GetBaseRegister() != nullptr))) { + opndInfo = OperandInfoDef(*insnInfo->bb, *insnInfo->insn, op); + opndInfo->insnInfo = insnInfo; + } + insnInfo->result.emplace_back(opndInfo); + }; + Operand &op = insn.GetOperand(i); + if (op.IsList() && !static_cast(op).GetOperands().empty()) { + for (auto operand : static_cast(op).GetOperands()) { + genOpndInfoDef(*operand); + } + } else { + genOpndInfoDef(op); + } + } + SetInsnInfo(hashVal, *insnInfo); +} + +/* do decref of orig_info, refCount will be set to 0 */ +void Ebo::RemoveUses(uint32 opndNum, const MapleVector &origInfo) +{ + OpndInfo *info = nullptr; + for (uint32 i = 0; i < opndNum; ++i) { + info = origInfo.at(i); + if (info != nullptr) { + DecRef(*info); + if (info->opnd->IsMemoryAccessOperand()) { + MemOpndInfo *memInfo = static_cast(info); + OpndInfo *baseInfo = memInfo->GetBaseInfo(); + OpndInfo *offsetInfo = memInfo->GetOffsetInfo(); + if (baseInfo != nullptr) { + DecRef(*baseInfo); + } + if (offsetInfo != nullptr) { + DecRef(*offsetInfo); + } + } + } + } +} + +OpndInfo *Ebo::BuildMemOpndInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex) +{ + auto *memOpnd = static_cast(&opnd); + Operand *base = memOpnd->GetBaseRegister(); + Operand *offset = memOpnd->GetOffset(); + OpndInfo *baseInfo = nullptr; + OpndInfo *offsetInfo = nullptr; + if (base != nullptr) { + if (!memOpnd->IsIntactIndexed()) { + baseInfo = OperandInfoUse(bb, *base); + baseInfo = OperandInfoDef(bb, insn, *base); + return baseInfo; + } else { + baseInfo = OperandInfoUse(bb, *base); + } + /* forward prop for base register. */ + if ((baseInfo != nullptr) && base->IsRegister()) { + auto *baseReg = static_cast(base); + Operand *replaceOpnd = baseInfo->replacementOpnd; + OpndInfo *replaceInfo = baseInfo->replacementInfo; + if ((replaceInfo != nullptr) && (replaceOpnd != nullptr) && !cgFunc->IsSPOrFP(*baseReg) && + (!beforeRegAlloc || (!IsPhysicalReg(*replaceOpnd) && !IsPhysicalReg(*base))) && + IsOfSameClass(*base, *replaceOpnd) && memOpnd->IsIntactIndexed() && + (base->GetSize() <= replaceOpnd->GetSize()) && + /* In case that replace opnd was redefined. */ + !replaceInfo->redefined) { + MemOperand *newMem = static_cast(memOpnd->Clone(*cgFunc->GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "newMem is null in Ebo::BuildAllInfo(BB *bb)"); + newMem->SetBaseRegister(*static_cast(replaceOpnd)); + insn.SetOperand(opndIndex, *newMem); + DecRef(*baseInfo); + IncRef(*replaceInfo); + baseInfo = replaceInfo; + } + } + } + if ((offset != nullptr) && offset->IsRegister()) { + offsetInfo = OperandInfoUse(bb, *offset); + } + OpndInfo *opndInfo = OperandInfoUse(bb, insn.GetOperand(opndIndex)); + CHECK_FATAL(opndInfo != nullptr, "opndInfo should not be null ptr"); + MemOpndInfo *memInfo = static_cast(opndInfo); + if (baseInfo != nullptr) { + memInfo->SetBaseInfo(*baseInfo); + } + if (offsetInfo != nullptr) { + memInfo->SetOffsetInfo(*offsetInfo); + } + return memInfo; +} + +OpndInfo *Ebo::BuildOperandInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex, MapleVector &origInfos) +{ + if (opnd.IsList()) { + ListOperand *listOpnd = static_cast(&opnd); + for (auto op : listOpnd->GetOperands()) { + OperandInfoUse(bb, *op); + } + return nullptr; + } + DEBUG_ASSERT(opndIndex < origInfos.size(), "SetOpndInfo hashval outof range!"); + if (opnd.IsConditionCode()) { + Operand &rFlag = cgFunc->GetOrCreateRflag(); + OperandInfoUse(bb, rFlag); + /* if operand is Opnd_cond, the orig_info store the info of rFlag. */ + OpndInfo *tempOpndInfo = GetOpndInfo(rFlag, -1); + origInfos.at(opndIndex) = tempOpndInfo; + return nullptr; + } + + if (!(opnd.IsRegister() || opnd.IsRegShift()) && !opnd.IsMemoryAccessOperand()) { + return nullptr; + } + + if (opnd.IsMemoryAccessOperand()) { + OpndInfo *memInfo = BuildMemOpndInfo(bb, insn, opnd, opndIndex); + CHECK_FATAL(memInfo != nullptr, "build memopnd info failed in Ebo::BuildAllInfo"); + origInfos.at(opndIndex) = memInfo; + return nullptr; + } + OpndInfo *opndInfo = OperandInfoUse(bb, opnd); + origInfos.at(opndIndex) = opndInfo; + return opndInfo; +} + +bool Ebo::ForwardPropagateOpnd(Insn &insn, Operand *&opnd, uint32 opndIndex, OpndInfo *&opndInfo, + MapleVector &origInfos) +{ + CHECK_FATAL(opnd != nullptr, "nullptr check"); + Operand *opndReplace = opndInfo->replacementOpnd; + /* Don't propagate physical registers before register allocation. */ + if (beforeRegAlloc && (opndReplace != nullptr) && (IsPhysicalReg(*opndReplace) || IsPhysicalReg(*opnd))) { + return false; + } + + /* forward propagation of constants */ + CHECK_FATAL(opndIndex < origInfos.size(), "SetOpndInfo hashval outof range!"); + if (!ForwardPropCheck(opndReplace, *opndInfo, *opnd, insn)) { + return false; + } + Operand *oldOpnd = opnd; + opnd = opndInfo->replacementOpnd; + opndInfo = opndInfo->replacementInfo; + + /* constant prop. */ + if (opnd->IsIntImmediate() && oldOpnd->IsRegister()) { + if (DoConstProp(insn, opndIndex, *opnd)) { + DecRef(*origInfos.at(opndIndex)); + /* Update the actual expression info. */ + origInfos.at(opndIndex) = opndInfo; + } + } + /* move reg, wzr, store vreg, mem ==> store wzr, mem */ +#if TARGAARCH64 || TARGRISCV64 + if (IsZeroRegister(*opnd) && opndIndex == 0 && + (insn.GetMachineOpcode() == MOP_wstr || insn.GetMachineOpcode() == MOP_xstr)) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "the new insn is:\n"; + } + insn.SetOperand(opndIndex, *opnd); + DecRef(*origInfos.at(opndIndex)); + /* Update the actual expression info. */ + origInfos.at(opndIndex) = opndInfo; + if (EBO_DUMP) { + insn.Dump(); + } + } +#endif + /* forward prop for registers. */ + if (!RegForwardCheck(insn, *opnd, opndReplace, *oldOpnd, origInfos.at(opndIndex))) { + return false; + } + /* Copies to and from the same register are not needed. */ + if (!beforeRegAlloc && Globals::GetInstance()->GetTarget()->IsEffectiveCopy(insn) && + (opndIndex == kInsnSecondOpnd) && RegistersIdentical(*opnd, insn.GetOperand(kInsnFirstOpnd))) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "===Remove the new insn because Copies to and from the same register. \n"; + } + return true; + } + if (static_cast(opnd)->GetRegisterNumber() == RSP) { + /* Disallow optimization with stack pointer */ + return false; + } + + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "the new insn is:\n"; + } + DecRef(*origInfos.at(opndIndex)); + insn.SetOperand(opndIndex, *opnd); + + if (EBO_DUMP) { + insn.Dump(); + } + IncRef(*opndInfo); + /* Update the actual expression info. */ + origInfos.at(opndIndex) = opndInfo; + /* extend the live range of the replacement operand. */ + if ((opndInfo->bb != insn.GetBB()) && opnd->IsRegister()) { + MarkOpndLiveIntoBB(*opnd, *insn.GetBB(), *opndInfo->bb); + } + return false; +} + +/* + * this func do only one of the following optimization: + * 1. Remove DupInsns + * 2. SpecialSequence OPT + * 3. Remove Redundant "Load" + * 4. Constant Fold + */ +void Ebo::SimplifyInsn(Insn &insn, bool &insnReplaced, bool opndsConstant, const MapleVector &opnds, + const MapleVector &opndInfos, const MapleVector &origInfos) +{ + if (insn.AccessMem()) { + if (!insnReplaced) { + insnReplaced = SpecialSequence(insn, origInfos); + } + return; + } + if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(insn)) { + if (!insnReplaced) { + insnReplaced = SpecialSequence(insn, opndInfos); + } + return; + } + if (!insnReplaced && !insn.HasSideEffects()) { + uint32 opndNum = insn.GetOperandSize(); + if (opndsConstant && (opndNum > 1)) { + if (!insn.GetDefRegs().empty()) { + insnReplaced = Csel2Cset(insn, opnds); + } + } + if (insnReplaced) { + return; + } + if (opndNum >= 2) { + /* special case */ + if (!insn.GetDefRegs().empty() && ResIsNotDefAndUse(insn)) { + if ((opndNum == 3) && (insn.GetDefRegs().size() == 1) && + (((kInsnSecondOpnd < opnds.size()) && (opnds[kInsnSecondOpnd] != nullptr) && + IsConstantImmOrReg(*opnds[kInsnSecondOpnd])) || + ((kInsnThirdOpnd < opnds.size()) && (opnds[kInsnThirdOpnd] != nullptr) && + IsConstantImmOrReg(*opnds[kInsnThirdOpnd])))) { + insnReplaced = SimplifyConstOperand(insn, opnds, opndInfos); + } + } + if (!insnReplaced) { + insnReplaced = SpecialSequence(insn, origInfos); + } + } + } +} + +/* + * this func do: + * 1. delete DupInsn if SimplifyInsn failed. + * 2. buildInsnInfo if delete DupInsn failed(func HashInsn do this). + * 3. update replaceInfo. + */ +void Ebo::FindRedundantInsns(BB &bb, Insn *&insn, const Insn *prev, bool insnReplaced, MapleVector &opnds, + MapleVector &opndInfos, const MapleVector &origInfos) +{ + CHECK_FATAL(insn != nullptr, "nullptr check"); + if (!insnReplaced) { + CHECK_FATAL(origInfos.size() != 0, "null ptr check"); + CHECK_FATAL(opndInfos.size() != 0, "null ptr check"); + HashInsn(*insn, origInfos, opndInfos); + /* Processing the result of the insn. */ + if ((Globals::GetInstance()->GetTarget()->IsEffectiveCopy(*insn) || !insn->GetDefRegs().empty()) && + !insn->IsSpecialIntrinsic()) { + Operand *res = &insn->GetOperand(kInsnFirstOpnd); + if ((res != nullptr) && (res != TRUE_OPND) && (res != GetZeroOpnd(res->GetSize()))) { + CHECK_FATAL(lastInsnInfo != nullptr, "lastInsnInfo is null!"); + OpndInfo *opndInfo = lastInsnInfo->result[0]; + /* Don't propagate for fmov insns. */ + if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(*insn) && (opndInfo != nullptr) && + !IsFmov(*insn)) { + CHECK_FATAL(!opnds.empty(), "null container!"); + opndInfo->replacementOpnd = opnds[kInsnSecondOpnd]; + opndInfo->replacementInfo = opndInfos[kInsnSecondOpnd]; + } else if (insn->GetBothDefUseOpnd() != kInsnMaxOpnd && (opndInfo != nullptr)) { + opndInfo->replacementOpnd = nullptr; + opndInfo->replacementInfo = nullptr; + } + } + } + insn = insn->GetNext(); + } else { + uint32 opndNum = insn->GetOperandSize(); + RemoveUses(opndNum, origInfos); + /* If insn is replaced, reanalyze the new insn to have more opportunities. */ + insn = (prev == nullptr ? bb.GetFirstInsn() : prev->GetNext()); + } +} + +void Ebo::PreProcessSpecialInsn(Insn &insn) +{ + DefineReturnUseRegister(insn); + + if (insn.IsCall() || insn.IsClinit()) { + DefineCallUseSpecialRegister(insn); + } +} + +/* + * this func do : + * 1.build opereand info of bb; + * 2.do Forward propagation after regalloc; + * 3.simplify the insn,include Constant folding,redundant insns elimination. + */ +void Ebo::BuildAllInfo(BB &bb) +{ + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===Enter BuildOperandinfo of bb:" << bb.GetId() << "===\n"; + } + Insn *insn = bb.GetFirstInsn(); + while ((insn != nullptr) && (insn != bb.GetLastInsn()->GetNext())) { + if (!insn->IsTargetInsn()) { + insn = insn->GetNext(); + continue; + } + PreProcessSpecialInsn(*insn); + uint32 opndNum = insn->GetOperandSize(); + if (!insn->IsMachineInstruction() || opndNum == 0) { + insn = insn->GetNext(); + continue; + } + MapleVector opnds(eboAllocator.Adapter()); + MapleVector opndInfos(eboAllocator.Adapter()); + MapleVector origInfos(eboAllocator.Adapter()); + Insn *prev = insn->GetPrev(); + bool insnReplaced = false; + bool opndsConstant = true; + /* start : Process all the operands. */ + for (uint32 i = 0; i < opndNum; ++i) { + if (!insn->OpndIsUse(i)) { + opnds.emplace_back(nullptr); + opndInfos.emplace_back(nullptr); + origInfos.emplace_back(nullptr); + continue; + } + Operand *opnd = &(insn->GetOperand(i)); + opnds.emplace_back(opnd); + opndInfos.emplace_back(nullptr); + origInfos.emplace_back(nullptr); + if (IsConstantImmOrReg(*opnd)) { + continue; + } + OpndInfo *opndInfo = BuildOperandInfo(bb, *insn, *opnd, i, origInfos); + if (opndInfo == nullptr) { + continue; + } + + /* Don't do propagation for special intrinsic insn. */ + if (!insn->IsSpecialIntrinsic()) { + insnReplaced = ForwardPropagateOpnd(*insn, opnd, i, opndInfo, origInfos); + } + if (insnReplaced) { + continue; + } + opnds.at(i) = opnd; + opndInfos.at(i) = opndInfo; + if (!IsConstantImmOrReg(*opnd)) { + opndsConstant = false; + } + } /* End : Process all the operands. */ +#if TARGARM32 + Arm32Insn *currArm32Insn = static_cast(insn); + if (currArm32Insn->IsCondExecution()) { + Operand &rFlag = cgFunc->GetOrCreateRflag(); + OperandInfoUse(bb, rFlag); + } +#endif + + if (insnReplaced) { + RemoveUses(opndNum, origInfos); + Insn *temp = insn->GetNext(); + bb.RemoveInsn(*insn); + insn = temp; + continue; + } + + /* simplify the insn. */ + if (!insn->IsSpecialIntrinsic()) { + SimplifyInsn(*insn, insnReplaced, opndsConstant, opnds, opndInfos, origInfos); + } + FindRedundantInsns(bb, insn, prev, insnReplaced, opnds, opndInfos, origInfos); + } +} + +/* Decrement the use counts for the actual operands of an insnInfo. */ +void Ebo::RemoveInsn(InsnInfo &info) +{ + Insn *insn = info.insn; + CHECK_FATAL(insn != nullptr, "get insn in info failed in Ebo::RemoveInsn"); + uint32 opndNum = insn->GetOperandSize(); + OpndInfo *opndInfo = nullptr; + for (uint32 i = 0; i < opndNum; i++) { + if (!insn->OpndIsUse(i)) { + continue; + } + opndInfo = info.origOpnd[i]; + if (opndInfo != nullptr) { + DecRef(*opndInfo); + Operand *opndTemp = opndInfo->opnd; + if (opndTemp == nullptr) { + continue; + } + if (opndTemp->IsMemoryAccessOperand()) { + MemOpndInfo *memInfo = static_cast(opndInfo); + OpndInfo *baseInfo = memInfo->GetBaseInfo(); + OpndInfo *offInfo = memInfo->GetOffsetInfo(); + if (baseInfo != nullptr) { + DecRef(*baseInfo); + } + if (offInfo != nullptr) { + DecRef(*offInfo); + } + } + } + } +#if TARGARM32 + Arm32CGFunc *a32CGFunc = static_cast(cgFunc); + auto &gotInfosMap = a32CGFunc->GetGotInfosMap(); + for (auto it = gotInfosMap.begin(); it != gotInfosMap.end();) { + if (it->first == insn) { + it = gotInfosMap.erase(it); + } else { + ++it; + } + } + auto &constInfosMap = a32CGFunc->GetConstInfosMap(); + for (auto it = constInfosMap.begin(); it != constInfosMap.end();) { + if (it->first == insn) { + it = constInfosMap.erase(it); + } else { + ++it; + } + } +#endif +} + +/* Mark opnd is live between def bb and into bb. */ +void Ebo::MarkOpndLiveIntoBB(const Operand &opnd, BB &into, BB &def) const +{ + if (live == nullptr) { + return; + } + if (&into == &def) { + return; + } + CHECK_FATAL(opnd.IsRegister(), "expect register here."); + const RegOperand ® = static_cast(opnd); + into.SetLiveInBit(reg.GetRegisterNumber()); + def.SetLiveOutBit(reg.GetRegisterNumber()); +} + +/* return insn information if has insnInfo,else,return lastInsnInfo */ +InsnInfo *Ebo::LocateInsnInfo(const OpndInfo &info) +{ + if (info.insn != nullptr) { + if (info.insnInfo != nullptr) { + return info.insnInfo; + } else { + InsnInfo *insnInfo = lastInsnInfo; + int32 limit = 50; + for (; (insnInfo != nullptr) && (limit != 0); insnInfo = insnInfo->prev, limit--) { + if (insnInfo->insn == info.insn) { + return insnInfo; + } + } + } + } + return nullptr; +} + +/* redundant insns elimination */ +void Ebo::RemoveUnusedInsns(BB &bb, bool normal) +{ + OpndInfo *opndInfo = nullptr; + Operand *opnd = nullptr; + + if (firstInsnInfo == nullptr) { + return; + } + + for (InsnInfo *insnInfo = lastInsnInfo; insnInfo != nullptr; insnInfo = insnInfo->prev) { + Insn *insn = insnInfo->insn; + if ((insn == nullptr) || (insn->GetBB() == nullptr)) { + continue; + } + /* stop looking for insn when it goes out of bb. */ + if (insn->GetBB() != &bb) { + break; + } + + uint32 resNum = insn->GetDefRegs().size(); + if (IsLastAndBranch(bb, *insn)) { + goto insn_is_needed; + } + + if (insn->IsClinit()) { + goto insn_is_needed; + } + + if ((resNum == 0) || IsGlobalNeeded(*insn) || insn->IsStore() || IsDecoupleStaticOp(*insn) || + insn->GetBothDefUseOpnd() != kInsnMaxOpnd) { + goto insn_is_needed; + } + + /* last insn of a 64x1 function is a float, 64x1 function may not be a float */ + if (cgFunc->GetFunction().GetAttr(FUNCATTR_oneelem_simd) && insnInfo == lastInsnInfo) { + goto insn_is_needed; + } + + if (insn->GetMachineOpcode() == MOP_asm || insn->IsAtomic()) { + goto insn_is_needed; + } + + /* Check all result that can be removed. */ + for (uint32 i = 0; i < resNum; ++i) { + opndInfo = insnInfo->result[i]; + /* A couple of checks. */ + if (opndInfo == nullptr) { + continue; + } + if ((opndInfo->bb != &bb) || (opndInfo->insn == nullptr)) { + goto insn_is_needed; + } + opnd = opndInfo->opnd; + if (opnd == GetZeroOpnd(opnd->GetSize())) { + continue; + } + /* this part optimize some spacial case after RA. */ + if (!beforeRegAlloc && Globals::GetInstance()->GetTarget()->IsEffectiveCopy(*insn) && opndInfo && + insn->GetOperand(kInsnSecondOpnd).IsImmediate() && IsSameRedefine(bb, *insn, *opndInfo)) { + goto can_be_removed; + } + /* end special case optimize */ + if ((beforeRegAlloc && IsPhysicalReg(*opnd)) || (IsSaveReg(*opnd) && !opndInfo->redefinedInBB)) { + goto insn_is_needed; + } + /* Copies to and from the same register are not needed. */ + if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(*insn)) { + if (HasAssignedReg(*opnd) && HasAssignedReg(insn->GetOperand(kInsnSecondOpnd)) && + RegistersIdentical(*opnd, insn->GetOperand(kInsnSecondOpnd))) { + /* We may be able to get rid of the copy, but be sure that the operand is marked live into this + * block. */ + if ((insnInfo->origOpnd[kInsnSecondOpnd] != nullptr) && + (&bb != insnInfo->origOpnd[kInsnSecondOpnd]->bb)) { + MarkOpndLiveIntoBB(*opnd, bb, *insnInfo->origOpnd[kInsnSecondOpnd]->bb); + } + /* propagate use count for this opnd to it's input operand. */ + if (opndInfo->same != nullptr) { + opndInfo->same->refCount += opndInfo->refCount; + } + + /* remove the copy causes the previous def to reach the end of the block. */ + if (!opndInfo->redefined && (opndInfo->same != nullptr)) { + opndInfo->same->redefined = false; + opndInfo->same->redefinedInBB = false; + } + goto can_be_removed; + } + } + /* there must bo no direct references to the operand. */ + if (!normal || (opndInfo->refCount != 0)) { + goto insn_is_needed; + } + /* + * When O1, the vreg who live out of bb should be recognized. + * The regs for clinit is also be marked to recognize it can't be deleted. so extend it to O2. + */ + if (opnd->IsRegister()) { + RegOperand *reg = static_cast(opnd); + if (beforeRegAlloc && !reg->IsBBLocalVReg()) { + goto insn_is_needed; + } + } + /* Volatile || sideeffect */ + if (opndInfo->insn->IsVolatile() || opndInfo->insn->HasSideEffects()) { + goto insn_is_needed; + } + + if (!opndInfo->redefinedInBB && LiveOutOfBB(*opnd, *opndInfo->bb)) { + goto insn_is_needed; + } + + if (opndInfo->redefinedInBB && opndInfo->redefinedInsn != nullptr && + opndInfo->redefinedInsn->GetBothDefUseOpnd() != kInsnMaxOpnd) { + goto insn_is_needed; + } + } + + if (!normal || insnInfo->mustNotBeRemoved || insn->GetDoNotRemove()) { + goto insn_is_needed; + } + can_be_removed: + if (EBO_DUMP) { + LogInfo::MapleLogger() << "< ==== Remove Unused insn in bb:" << bb.GetId() << "====\n"; + insn->Dump(); + } + RemoveInsn(*insnInfo); + bb.RemoveInsn(*insn); + insnInfo->insn = nullptr; + insnInfo->bb = nullptr; + for (uint32 i = 0; i < resNum; i++) { + opndInfo = insnInfo->result[i]; + if (opndInfo == nullptr) { + continue; + } + if (opndInfo->redefined && (opndInfo->same != nullptr)) { + OpndInfo *next = opndInfo->same; + next->redefined = true; + if (opndInfo->redefinedInBB && (opndInfo->same->bb == &bb)) { + next->redefinedInBB = true; + } + } + if (!opndInfo->redefinedInBB && (opndInfo->same != nullptr) && (opndInfo->same->bb == &bb)) { + opndInfo->same->redefinedInBB = false; + } + if (!opndInfo->redefined && (opndInfo->same != nullptr)) { + opndInfo->same->redefined = false; + opndInfo->same->redefinedInBB = false; + } + } + optSuccess = true; + continue; + insn_is_needed: + if (!bb.GetEhSuccs().empty()) { + for (uint32 i = 0; i < resNum; i++) { + opndInfo = insnInfo->result[i]; + if ((opndInfo != nullptr) && (opndInfo->opnd != nullptr) && (opndInfo->same != nullptr)) { + UpdateNextInfo(*opndInfo); + } + } + } + + if (!bb.GetEhPreds().empty()) { + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + opndInfo = insnInfo->origOpnd[i]; + if ((opndInfo != nullptr) && (opndInfo->opnd != nullptr) && (opndInfo->same != nullptr)) { + UpdateNextInfo(*opndInfo); + } + if ((opndInfo != nullptr) && opndInfo->opnd && (&bb != opndInfo->bb) && opndInfo->opnd->IsRegister()) { + MarkOpndLiveIntoBB(*opndInfo->opnd, bb, *opndInfo->bb); + } + } + } + } /* end proccess insnInfo in currBB */ +} + +void Ebo::UpdateNextInfo(const OpndInfo &opndInfo) +{ + OpndInfo *nextInfo = opndInfo.same; + while (nextInfo != nullptr) { + if (nextInfo->insn != nullptr) { + InsnInfo *info = LocateInsnInfo(*nextInfo); + if (info != nullptr) { + info->mustNotBeRemoved = true; + } else { + /* + * Couldn't find the insnInfo entry. Make sure that the operand has + * a use count so that the defining insn will not be deleted. + */ + nextInfo->refCount += opndInfo.refCount; + } + } + nextInfo = nextInfo->same; + } +} + +/* back up to last saved OpndInfo */ +void Ebo::BackupOpndInfoList(OpndInfo *saveLast) +{ + if (lastOpndInfo == saveLast) { + return; + } + OpndInfo *opndInfo = lastOpndInfo; + while (opndInfo != saveLast) { + int32 hashVal = 0; + if (opndInfo->opnd->IsRegister() || opndInfo->opnd->IsRegShift()) { + hashVal = -1; + } else { + hashVal = opndInfo->hashVal; + } + UpdateOpndInfo(*opndInfo->opnd, *opndInfo, opndInfo->same, hashVal); + opndInfo = opndInfo->prev; + } + if (saveLast != nullptr) { + saveLast->next = nullptr; + lastOpndInfo = saveLast; + } else { + firstOpndInfo = nullptr; + lastOpndInfo = nullptr; + } +} + +/* back up to last saved insn */ +void Ebo::BackupInsnInfoList(InsnInfo *saveLast) +{ + if (lastInsnInfo == saveLast) { + return; + } + InsnInfo *insnInfo = lastInsnInfo; + while (insnInfo != saveLast) { + SetInsnInfo(insnInfo->hashIndex, *(insnInfo->same)); + insnInfo = insnInfo->prev; + } + if (saveLast != nullptr) { + saveLast->next = nullptr; + lastInsnInfo = saveLast; + } else { + firstInsnInfo = nullptr; + lastInsnInfo = nullptr; + } +} + +/* add bb to eb ,and build operandinfo of bb */ +void Ebo::AddBB2EB(BB &bb) +{ + OpndInfo *saveLastOpndInfo = lastOpndInfo; + InsnInfo *saveLastInsnInfo = lastInsnInfo; + SetBBVisited(bb); + bbNum++; + BuildAllInfo(bb); + /* Stop adding BB to EB if the bbs in the current EB exceeds kEboMaxBBNums */ + if (bbNum < kEboMaxBBNums) { + for (auto *bbSucc : bb.GetSuccs()) { + if ((bbSucc->GetPreds().size() == 1) && IsNotVisited(*bbSucc)) { + AddBB2EB(*bbSucc); + } + } + } + + RemoveUnusedInsns(bb, true); + /* Remove information about Operand's and Insn's in this block. */ + BackupOpndInfoList(saveLastOpndInfo); + BackupInsnInfoList(saveLastInsnInfo); + bbNum--; +} + +/* Perform EBO */ +void Ebo::EboProcess() +{ + FOR_ALL_BB(bb, cgFunc) { + if (IsNotVisited(*bb)) { + bbNum = 0; + AddBB2EB(*bb); + } + } +} + +/* Perform EBO on O1 which the optimization can only be in a single block. */ +void Ebo::EboProcessSingleBB() +{ + FOR_ALL_BB(bb, cgFunc) { + OpndInfo *saveLastOpndInfo = lastOpndInfo; + InsnInfo *saveLastInsnInfo = lastInsnInfo; + BuildAllInfo(*bb); + RemoveUnusedInsns(*bb, true); + /* Remove information about Operand's and Insn's in this block. */ + BackupOpndInfoList(saveLastOpndInfo); + BackupInsnInfoList(saveLastInsnInfo); + } +} + +void Ebo::EboInit() +{ + visitedBBs.resize(cgFunc->NumBBs()); + for (uint32 i = 0; i < cgFunc->NumBBs(); ++i) { + visitedBBs[i] = false; + } + exprInfoTable.resize(kEboMaxOpndHash); + for (uint32 i = 0; i < kEboMaxOpndHash; ++i) { + exprInfoTable.at(i) = nullptr; + } + insnInfoTable.resize(kEboMaxInsnHash); + for (uint32 i = 0; i < kEboMaxInsnHash; ++i) { + insnInfoTable.at(i) = nullptr; + } + if (!beforeRegAlloc) { + BuildCallerSaveRegisters(); + } + optSuccess = false; +} + +/* perform EB optimizations right after instruction selection. */ +void Ebo::Run() +{ + EboInit(); + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2) { + EboProcess(); + } else { + EboProcessSingleBB(); /* Perform SingleBB Optimization when -O1. */ + } + if (optSuccess && cgFunc->GetMirModule().IsCModule()) { + Run(); + } +} + +/* === new pm === */ +bool CgEbo0::PhaseRun(maplebe::CGFunc &f) +{ + if (EBO_DUMP_NEWPM) { + DotGenerator::GenerateDot("ebo0", f, f.GetMirModule()); + } + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + MemPool *eboMp = GetPhaseMemPool(); + Ebo *ebo = nullptr; +#if TARGAARCH64 || TARGRISCV64 + ebo = eboMp->New(f, *eboMp, live, true, PhaseName()); +#endif +#if TARGARM32 + ebo = eboMp->New(f, *eboMp, live, true, "ebo0"); +#endif + ebo->Run(); + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return true; +} + +void CgEbo0::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgEbo0, ebo) + +bool CgEbo1::PhaseRun(maplebe::CGFunc &f) +{ + if (EBO_DUMP_NEWPM) { + DotGenerator::GenerateDot(PhaseName(), f, f.GetMirModule(), true); + } + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + MemPool *eboMp = GetPhaseMemPool(); + Ebo *ebo = nullptr; +#if TARGAARCH64 || TARGRISCV64 + ebo = eboMp->New(f, *eboMp, live, true, PhaseName()); +#endif +#if TARGARM32 + ebo = eboMp->New(f, *eboMp, live, true, PhaseName()); +#endif + ebo->Run(); + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return true; +} + +void CgEbo1::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgEbo1, ebo1) + +bool CgPostEbo::PhaseRun(maplebe::CGFunc &f) +{ + if (EBO_DUMP_NEWPM) { + DotGenerator::GenerateDot(PhaseName(), f, f.GetMirModule()); + } + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + MemPool *eboMp = GetPhaseMemPool(); + Ebo *ebo = nullptr; +#if TARGAARCH64 || TARGRISCV64 + ebo = eboMp->New(f, *eboMp, live, false, PhaseName()); +#endif +#if TARGARM32 + ebo = eboMp->New(f, *eboMp, live, false, PhaseName()); +#endif + ebo->Run(); + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return true; +} + +void CgPostEbo::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPostEbo, postebo) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/eh_func.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/eh_func.cpp new file mode 100644 index 0000000000000000000000000000000000000000..88badb7e8385eb2f1f363e50262fb8b7d09f50e4 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/eh_func.cpp @@ -0,0 +1,772 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "eh_func.h" +#include "cgfunc.h" +#include "cg.h" +#include "mir_builder.h" +#include "switch_lowerer.h" + +namespace maplebe { +using namespace maple; + +void EHFunc::CollectEHInformation(std::vector> &catchVec) +{ + MIRFunction &mirFunc = cgFunc->GetFunction(); + MIRModule *mirModule = mirFunc.GetModule(); + CHECK_FATAL(mirModule != nullptr, "mirModule is nullptr in CGFunc::BuildEHFunc"); + BlockNode *blkNode = mirFunc.GetBody(); + CHECK_FATAL(blkNode != nullptr, "current function body is nullptr in CGFunc::BuildEHFunc"); + EHTry *lastTry = nullptr; /* record last try */ + /* + * curTry: record the current try wrapping the current statement, + * reset to null when meet a endtry + */ + EHTry *curTry = nullptr; + StmtNode *nextStmt = nullptr; + + /* collect all try-catch blocks */ + for (StmtNode *stmt = blkNode->GetFirst(); stmt != nullptr; stmt = nextStmt) { + nextStmt = stmt->GetNext(); + Opcode op = stmt->GetOpCode(); + switch (op) { + case OP_try: { + TryNode *tryNode = static_cast(stmt); + EHTry *ehTry = cgFunc->GetMemoryPool()->New(*(cgFunc->GetFuncScopeAllocator()), *tryNode); + lastTry = ehTry; + curTry = ehTry; + AddTry(*ehTry); + break; + } + case OP_endtry: { + DEBUG_ASSERT(lastTry != nullptr, "lastTry is nullptr when current node is endtry"); + lastTry->SetEndtryNode(*stmt); + lastTry = nullptr; + curTry = nullptr; + break; + } + case OP_catch: { + CatchNode *catchNode = static_cast(stmt); + DEBUG_ASSERT(stmt->GetPrev()->GetOpCode() == OP_label, "catch's previous node is not a label"); + LabelNode *labelStmt = static_cast(stmt->GetPrev()); + catchVec.emplace_back(std::pair(labelStmt->GetLabelIdx(), catchNode)); + /* rename the type of <*void> to <*Throwable> */ + for (uint32 i = 0; i < catchNode->Size(); i++) { + MIRType *ehType = + GlobalTables::GetTypeTable().GetTypeFromTyIdx(catchNode->GetExceptionTyIdxVecElement(i)); + DEBUG_ASSERT(ehType->GetKind() == kTypePointer, "ehType must be kTypePointer."); + MIRPtrType *ehPointedTy = static_cast(ehType); + if (ehPointedTy->GetPointedTyIdx() == static_cast(PTY_void)) { + DEBUG_ASSERT(mirModule->GetThrowableTyIdx() != 0, "throwable type id is 0"); + const MIRType *throwType = + GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirModule->GetThrowableTyIdx()); + MIRType *pointerType = cgFunc->GetBecommon().BeGetOrCreatePointerType(*throwType); + catchNode->SetExceptionTyIdxVecElement(pointerType->GetTypeIndex(), i); + } + } + break; + } + case OP_throw: { + if (!cgFunc->GetCG()->GetCGOptions().GenerateExceptionHandlingCode() || + (cgFunc->GetCG()->IsExclusiveEH() && cgFunc->GetCG()->IsExclusiveFunc(mirFunc))) { + /* remove the statment */ + BlockNode *bodyNode = mirFunc.GetBody(); + bodyNode->RemoveStmt(stmt); + break; + } + UnaryStmtNode *throwNode = static_cast(stmt); + EHThrow *ehReThrow = cgFunc->GetMemoryPool()->New(*throwNode); + ehReThrow->SetJavaTry(curTry); + AddRethrow(*ehReThrow); + break; + } + case OP_block: + CHECK_FATAL(false, "should've lowered earlier"); + default: + break; + } + } +} + +void EHTry::DumpEHTry(const MIRModule &mirModule) +{ + if (tryNode != nullptr) { + tryNode->Dump(); + } + + if (endTryNode != nullptr) { + endTryNode->Dump(); + } + + for (const auto *currCatch : catchVec) { + if (currCatch == nullptr) { + continue; + } + currCatch->Dump(); + } +} + +void EHThrow::ConvertThrowToRuntime(CGFunc &cgFunc, BaseNode &arg) +{ + MIRFunction &mirFunc = cgFunc.GetFunction(); + MIRModule *mirModule = mirFunc.GetModule(); + MIRFunction *calleeFunc = + mirModule->GetMIRBuilder()->GetOrCreateFunction("MCC_ThrowException", static_cast(PTY_void)); + cgFunc.GetBecommon().UpdateTypeTable(*calleeFunc->GetMIRFuncType()); + calleeFunc->SetNoReturn(); + MapleVector args(mirModule->GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(&arg); + CallNode *callAssign = mirModule->GetMIRBuilder()->CreateStmtCall(calleeFunc->GetPuidx(), args); + mirFunc.GetBody()->ReplaceStmt1WithStmt2(rethrow, callAssign); +} + +void EHThrow::ConvertThrowToRethrow(CGFunc &cgFunc) +{ + MIRFunction &mirFunc = cgFunc.GetFunction(); + MIRModule *mirModule = mirFunc.GetModule(); + MIRBuilder *mirBuilder = mirModule->GetMIRBuilder(); + MIRFunction *unFunc = mirBuilder->GetOrCreateFunction("MCC_RethrowException", static_cast(PTY_void)); + cgFunc.GetBecommon().UpdateTypeTable(*unFunc->GetMIRFuncType()); + unFunc->SetNoReturn(); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(rethrow->Opnd(0)); + CallNode *callNode = mirBuilder->CreateStmtCall(unFunc->GetPuidx(), args); + mirFunc.GetBody()->ReplaceStmt1WithStmt2(rethrow, callNode); +} + +void EHThrow::Lower(CGFunc &cgFunc) +{ + BaseNode *opnd0 = rethrow->Opnd(0); + DEBUG_ASSERT(((opnd0->GetPrimType() == GetLoweredPtrType()) || (opnd0->GetPrimType() == PTY_ref)), + "except a dread of a pointer to get its type"); + MIRFunction &mirFunc = cgFunc.GetFunction(); + MIRModule *mirModule = mirFunc.GetModule(); + MIRBuilder *mirBuilder = mirModule->GetMIRBuilder(); + DEBUG_ASSERT(mirBuilder != nullptr, "get mirBuilder failed in EHThrow::Lower"); + MIRSymbol *mirSymbol = nullptr; + BaseNode *arg = nullptr; + MIRType *pstType = nullptr; + switch (opnd0->GetOpCode()) { + case OP_dread: { + DreadNode *drNode = static_cast(opnd0); + mirSymbol = mirFunc.GetLocalOrGlobalSymbol(drNode->GetStIdx()); + DEBUG_ASSERT(mirSymbol != nullptr, "get symbol failed in EHThrow::Lower"); + pstType = mirSymbol->GetType(); + arg = drNode->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + break; + } + case OP_iread: { + IreadNode *irNode = static_cast(opnd0); + MIRPtrType *pointerTy = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(irNode->GetTyIdx())); + if (irNode->GetFieldID() != 0) { + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx()); + MIRStructType *structTy = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structTy = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structTy = static_cast(pointedTy)->GetParentType(); + } + DEBUG_ASSERT(structTy != nullptr, "structTy is nullptr in EHThrow::Lower "); + pstType = structTy->GetFieldType(irNode->GetFieldID()); + } else { + pstType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx()); + } + arg = irNode->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + break; + } + case OP_regread: { + RegreadNode *rrNode = static_cast(opnd0); + MIRPreg *pReg = mirFunc.GetPregTab()->PregFromPregIdx(rrNode->GetRegIdx()); + DEBUG_ASSERT(pReg->GetPrimType() == GetLoweredPtrType(), "must be a pointer type"); + pstType = pReg->GetMIRType(); + arg = rrNode->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + break; + } + case OP_retype: { + RetypeNode *retypeNode = static_cast(opnd0); + pstType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retypeNode->GetTyIdx()); + arg = retypeNode->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + break; + } + case OP_cvt: { + TypeCvtNode *cvtNode = static_cast(opnd0); + PrimType prmType = cvtNode->GetPrimType(); + // prmType supposed to be Pointer. + if ((prmType == PTY_ptr) || (prmType == PTY_ref) || (prmType == PTY_a32) || (prmType == PTY_a64)) { + ConvertThrowToRethrow(cgFunc); + } + return; + } + default: + DEBUG_ASSERT(false, " NYI throw something"); + } + CHECK_FATAL(pstType != nullptr, "pstType is null in EHThrow::Lower"); + if (pstType->GetKind() != kTypePointer) { + LogInfo::MapleLogger() << "Error in function " << mirFunc.GetName() << "\n"; + rethrow->Dump(); + LogInfo::MapleLogger() << "pstType is supposed to be Pointer, but is not"; + pstType->Dump(0); + CHECK_FATAL(false, "throw operand type kind must be kTypePointer"); + } + + MIRType *stType = + GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(pstType)->GetPointedTyIdx()); + if (!IsUnderTry()) { + /* + * in this case the throw happens without a try...endtry wrapping it, need to generate lsda. + * insert 2 labels before and after throw + */ + LabelNode *throwBeginLbl = mirBuilder->CreateStmtLabel(mirBuilder->CreateLabIdx(mirFunc)); + LabelNode *throwEndLbl = mirBuilder->CreateStmtLabel(mirBuilder->CreateLabIdx(mirFunc)); + BlockNode *bodyNode = mirFunc.GetBody(); + bodyNode->InsertBefore(rethrow, throwBeginLbl); + bodyNode->InsertAfter(rethrow, throwEndLbl); + startLabel = throwBeginLbl; + endLabel = throwEndLbl; + } + + if (stType->GetKind() == kTypeClass) { + ConvertThrowToRuntime(cgFunc, *arg); + } else { + ConvertThrowToRethrow(cgFunc); + } +} + +EHFunc::EHFunc(CGFunc &func) + : cgFunc(&func), + tryVec(func.GetFuncScopeAllocator()->Adapter()), + ehTyTable(func.GetFuncScopeAllocator()->Adapter()), + ty2IndexTable(std::less(), func.GetFuncScopeAllocator()->Adapter()), + rethrowVec(func.GetFuncScopeAllocator()->Adapter()) +{ +} + +EHFunc *CGFunc::BuildEHFunc() +{ + EHFunc *newEHFunc = GetMemoryPool()->New(*this); + SetEHFunc(*newEHFunc); + std::vector> catchVec; + newEHFunc->CollectEHInformation(catchVec); + newEHFunc->MergeCatchToTry(catchVec); + newEHFunc->BuildEHTypeTable(catchVec); + newEHFunc->InsertEHSwitchTable(); + newEHFunc->InsertCxaAfterEachCatch(catchVec); + newEHFunc->GenerateCleanupLabel(); + + GetBecommon().BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetVoid()); + if (newEHFunc->NeedFullLSDA()) { + newEHFunc->CreateLSDA(); + } else if (newEHFunc->HasThrow()) { + newEHFunc->LowerThrow(); + } + if (GetCG()->GetCGOptions().GenerateExceptionHandlingCode()) { + newEHFunc->CreateTypeInfoSt(); + } + + return newEHFunc; +} + +bool EHFunc::NeedFullLSDA() const +{ + if (cgFunc->GetFunction().IsJava()) { + return HasTry(); + } else { + return false; + } +} + +bool EHFunc::NeedFastLSDA() const +{ + if (cgFunc->GetFunction().IsJava()) { + return !HasTry(); + } else { + return false; + } +} + +bool EHFunc::HasTry() const +{ + return !tryVec.empty(); +} + +void EHFunc::CreateTypeInfoSt() +{ + MIRFunction &mirFunc = cgFunc->GetFunction(); + bool ctorDefined = false; + if (mirFunc.GetAttr(FUNCATTR_constructor) && !mirFunc.GetAttr(FUNCATTR_static) && (mirFunc.GetBody() != nullptr)) { + ctorDefined = true; + } + + if (!ctorDefined) { + return; + } + + const auto *classType = static_cast(mirFunc.GetClassType()); + if (cgFunc->GetMirModule().IsCModule() && classType == nullptr) { + return; + } + DEBUG_ASSERT(classType != nullptr, ""); + if (classType->GetMethods().empty() && (classType->GetFieldsSize() == 0)) { + return; + } + + if (classType->GetExceptionRootType() == nullptr) { + return; /* not a exception type */ + } +} + +void EHFunc::LowerThrow() +{ + MIRFunction &mirFunc = cgFunc->GetFunction(); + /* just lower without building LSDA */ + for (EHThrow *rethrow : rethrowVec) { + BaseNode *opnd0 = rethrow->GetRethrow()->Opnd(0); + /* except a dread of a point to get its type */ + switch (opnd0->GetOpCode()) { + case OP_retype: { + RetypeNode *retypeNode = static_cast(opnd0); + DEBUG_ASSERT(GlobalTables::GetTypeTable().GetTypeFromTyIdx(retypeNode->GetTyIdx())->GetKind() == + kTypePointer, + "expecting a pointer type"); + rethrow->ConvertThrowToRuntime( + *cgFunc, *retypeNode->CloneTree(mirFunc.GetModule()->GetCurFuncCodeMPAllocator())); + break; + } + case OP_dread: { + DreadNode *drNode = static_cast(opnd0); + DEBUG_ASSERT(mirFunc.GetLocalOrGlobalSymbol(drNode->GetStIdx())->GetType()->GetKind() == kTypePointer, + "expect pointer type"); + rethrow->ConvertThrowToRuntime(*cgFunc, + *drNode->CloneTree(mirFunc.GetModule()->GetCurFuncCodeMPAllocator())); + break; + } + case OP_iread: { + IreadNode *irNode = static_cast(opnd0); + MIRPtrType *receiverPtrType = nullptr; + if (irNode->GetFieldID() != 0) { + MIRPtrType *pointerTy = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(irNode->GetTyIdx())); + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx()); + MIRStructType *structTy = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structTy = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structTy = static_cast(pointedTy)->GetParentType(); + } + DEBUG_ASSERT(structTy != nullptr, "structTy is nullptr in EHFunc::LowerThrow"); + receiverPtrType = static_cast(structTy->GetFieldType(irNode->GetFieldID())); + } else { + receiverPtrType = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(irNode->GetTyIdx())); + receiverPtrType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(receiverPtrType->GetPointedTyIdx())); + } + DEBUG_ASSERT(receiverPtrType->GetKind() == kTypePointer, "expecting a pointer type"); + rethrow->ConvertThrowToRuntime(*cgFunc, + *irNode->CloneTree(mirFunc.GetModule()->GetCurFuncCodeMPAllocator())); + break; + } + case OP_regread: { + RegreadNode *rrNode = static_cast(opnd0); + DEBUG_ASSERT(mirFunc.GetPregTab()->PregFromPregIdx(rrNode->GetRegIdx())->GetPrimType() == + GetLoweredPtrType(), + "expect GetLoweredPtrType()"); + DEBUG_ASSERT(mirFunc.GetPregTab()->PregFromPregIdx(rrNode->GetRegIdx())->GetMIRType()->GetKind() == + kTypePointer, + "expect pointer type"); + rethrow->ConvertThrowToRuntime(*cgFunc, + *rrNode->CloneTree(mirFunc.GetModule()->GetCurFuncCodeMPAllocator())); + break; + } + case OP_constval: { + ConstvalNode *constValNode = static_cast(opnd0); + BaseNode *newNode = constValNode->CloneTree(mirFunc.GetModule()->GetCurFuncCodeMPAllocator()); + DEBUG_ASSERT(newNode != nullptr, "nullptr check"); + rethrow->ConvertThrowToRuntime(*cgFunc, *newNode); + break; + } + case OP_cvt: { + TypeCvtNode *cvtNode = static_cast(opnd0); + PrimType prmType = cvtNode->GetPrimType(); + // prmType supposed to be Pointer. + if ((prmType == PTY_ptr) || (prmType == PTY_ref) || (prmType == PTY_a32) || (prmType == PTY_a64)) { + BaseNode *newNode = cvtNode->CloneTree(mirFunc.GetModule()->GetCurFuncCodeMPAllocator()); + rethrow->ConvertThrowToRuntime(*cgFunc, *newNode); + } + break; + } + default: + DEBUG_ASSERT(false, "unexpected or NYI"); + } + } +} + +/* + * merge catch to try + */ +void EHFunc::MergeCatchToTry(const std::vector> &catchVec) +{ + size_t tryOffsetCount; + for (auto *ehTry : tryVec) { + tryOffsetCount = ehTry->GetTryNode()->GetOffsetsCount(); + for (size_t i = 0; i < tryOffsetCount; i++) { + auto o = ehTry->GetTryNode()->GetOffset(i); + for (const auto &catchVecPair : catchVec) { + LabelIdx lbIdx = catchVecPair.first; + if (lbIdx == o) { + ehTry->PushBackCatchVec(*catchVecPair.second); + break; + } + } + } + CHECK_FATAL(ehTry->GetCatchVecSize() == tryOffsetCount, + "EHTry instance offset does not equal catch node amount."); + } +} + +/* catchvec is going to be released by the caller */ +void EHFunc::BuildEHTypeTable(const std::vector> &catchVec) +{ + if (!catchVec.empty()) { + /* the first one assume to be <*void> */ + TyIdx voidTyIdx(PTY_void); + ehTyTable.emplace_back(voidTyIdx); + ty2IndexTable[voidTyIdx] = 0; + /* create void pointer and update becommon's size table */ + cgFunc->GetBecommon().UpdateTypeTable(*GlobalTables::GetTypeTable().GetVoidPtr()); + } + + /* create the type table for this function, just iterate each catch */ + CatchNode *jCatchNode = nullptr; + size_t catchNodeSize; + for (const auto &catchVecPair : catchVec) { + jCatchNode = catchVecPair.second; + catchNodeSize = jCatchNode->Size(); + for (size_t i = 0; i < catchNodeSize; i++) { + MIRType *mirTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(jCatchNode->GetExceptionTyIdxVecElement(i)); + DEBUG_ASSERT(mirTy->GetKind() == kTypePointer, "mirTy is not pointer type"); + TyIdx ehTyIdx = static_cast(mirTy)->GetPointedTyIdx(); + if (ty2IndexTable.find(ehTyIdx) != ty2IndexTable.end()) { + continue; + } + + ty2IndexTable[ehTyIdx] = ehTyTable.size(); + ehTyTable.emplace_back(ehTyIdx); + MIRClassType *catchType = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(ehTyIdx)); + MIRClassType *rootType = catchType->GetExceptionRootType(); + if (rootType == nullptr) { + rootType = static_cast(GlobalTables::GetTypeTable().GetOrCreateClassType( + "Ljava_2Flang_2FThrowable_3B", *GlobalTables::GetGsymTable().GetModule())); + catchType->SetParentTyIdx(rootType->GetTypeIndex()); + } + } + } +} + +void EHFunc::DumpEHFunc() const +{ + MIRModule &mirModule = *cgFunc->GetFunction().GetModule(); + for (uint32 i = 0; i < this->tryVec.size(); i++) { + LogInfo::MapleLogger() << "\n========== start " << i << " th eh:\n"; + EHTry *ehTry = tryVec[i]; + ehTry->DumpEHTry(mirModule); + LogInfo::MapleLogger() << "========== end " << i << " th eh =========\n"; + } + + LogInfo::MapleLogger() << "\n========== start LSDA type table ========\n"; + for (uint32 i = 0; i < this->ehTyTable.size(); i++) { + LogInfo::MapleLogger() << i << " vector to "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(ehTyTable[i])->Dump(0); + LogInfo::MapleLogger() << "\n"; + } + LogInfo::MapleLogger() << "========== end LSDA type table ========\n"; + + LogInfo::MapleLogger() << "\n========== start type-index map ========\n"; + for (const auto &ty2indexTablePair : ty2IndexTable) { + GlobalTables::GetTypeTable().GetTypeFromTyIdx(ty2indexTablePair.first)->Dump(0); + LogInfo::MapleLogger() << " map to "; + LogInfo::MapleLogger() << ty2indexTablePair.second << "\n"; + } + LogInfo::MapleLogger() << "========== end type-index map ========\n"; +} + +/* + * cleanup_label is an LabelNode, and placed just before endLabel. + * cleanup_label is the first statement of cleanupbb. + * the layout of clean up code is: + * //return bb + * ... + * //cleanup bb = lastbb->prev; cleanupbb->PrependBB(retbb) + * cleanup_label: + * ... + * //lastbb + * endLabel: + * .cfi_endproc + * .Label.xx.end: + * .size + */ +void EHFunc::GenerateCleanupLabel() +{ + MIRModule *mirModule = cgFunc->GetFunction().GetModule(); + cgFunc->SetCleanupLabel(*mirModule->GetMIRBuilder()->CreateStmtLabel(CreateLabel(".LCLEANUP"))); + BlockNode *blockNode = cgFunc->GetFunction().GetBody(); + blockNode->InsertBefore(cgFunc->GetEndLabel(), cgFunc->GetCleanupLabel()); +} + +void EHFunc::InsertDefaultLabelAndAbortFunc(BlockNode &blkNode, SwitchNode &switchNode, const StmtNode &beforeEndLabel) +{ + MIRModule &mirModule = *cgFunc->GetFunction().GetModule(); + LabelIdx dfLabIdx = cgFunc->GetFunction().GetLabelTab()->CreateLabel(); + cgFunc->GetFunction().GetLabelTab()->AddToStringLabelMap(dfLabIdx); + StmtNode *dfLabStmt = mirModule.GetMIRBuilder()->CreateStmtLabel(dfLabIdx); + blkNode.InsertAfter(&beforeEndLabel, dfLabStmt); + MIRFunction *calleeFunc = mirModule.GetMIRBuilder()->GetOrCreateFunction("abort", static_cast(PTY_void)); + cgFunc->GetBecommon().UpdateTypeTable(*calleeFunc->GetMIRFuncType()); + MapleVector args(mirModule.GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); + CallNode *callExit = mirModule.GetMIRBuilder()->CreateStmtCall(calleeFunc->GetPuidx(), args); + blkNode.InsertAfter(dfLabStmt, callExit); + switchNode.SetDefaultLabel(dfLabIdx); +} + +void EHFunc::FillSwitchTable(SwitchNode &switchNode, const EHTry &ehTry) +{ + CatchNode *catchNode = nullptr; + MIRType *exceptionType = nullptr; + MIRPtrType *ptType = nullptr; + size_t catchVecSize = ehTry.GetCatchVecSize(); + /* update switch node's cases */ + for (size_t i = 0; i < catchVecSize; i++) { + catchNode = ehTry.GetCatchNodeAt(i); + for (size_t j = 0; j < catchNode->Size(); j++) { + exceptionType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(catchNode->GetExceptionTyIdxVecElement(j)); + ptType = static_cast(exceptionType); + MapleMap::iterator ty2IdxIt = ty2IndexTable.find(ptType->GetPointedTyIdx()); + DEBUG_ASSERT(ty2IdxIt != ty2IndexTable.end(), "find tyIdx failed!"); + uint32 tableIdx = ty2IdxIt->second; + LabelNode *catchLabelNode = static_cast(catchNode->GetPrev()); + CasePair p(tableIdx, catchLabelNode->GetLabelIdx()); + bool inserted = false; + for (auto x : switchNode.GetSwitchTable()) { + if (x == p) { + inserted = true; + break; + } + } + if (!inserted) { + switchNode.InsertCasePair(p); + } + } + } +} + +/* this is also the landing pad code. */ +void EHFunc::InsertEHSwitchTable() +{ + MIRModule &mirModule = *cgFunc->GetFunction().GetModule(); + BlockNode *blockNode = cgFunc->GetFunction().GetBody(); + CHECK_FATAL(blockNode != nullptr, "get function body failed in EHThrow::InsertEHSwitchTable"); + StmtNode *endLabelPrevNode = nullptr; + SwitchNode *switchNode = nullptr; + for (auto *ehTry : tryVec) { + endLabelPrevNode = cgFunc->GetEndLabel()->GetPrev(); + /* + * get the next statement of the trynode. when no throw happend in try block, jump to the statement directly + * create a switch statement and insert after tryend; + */ + switchNode = mirModule.CurFuncCodeMemPool()->New(mirModule); + /* create a new label as default, and if program excute here, error it */ + InsertDefaultLabelAndAbortFunc(*blockNode, *switchNode, *endLabelPrevNode); + /* create s special symbol that use the second return of __builtin_eh_return() */ + MIRSymbol *mirSymbol = mirModule.GetMIRBuilder()->CreateSymbol(TyIdx(PTY_i32), "__eh_index__", kStVar, kScAuto, + &cgFunc->GetFunction(), kScopeLocal); + switchNode->SetSwitchOpnd(mirModule.GetMIRBuilder()->CreateExprDread(*mirSymbol)); + FillSwitchTable(*switchNode, *ehTry); + SwitchLowerer switchLower(mirModule, *switchNode, *cgFunc->GetFuncScopeAllocator()); + blockNode->InsertBlockAfter(*switchLower.LowerSwitch(), endLabelPrevNode); + ehTry->SetFallthruGoto(endLabelPrevNode->GetNext()); + } + if (!CGOptions::IsQuiet()) { + cgFunc->GetFunction().Dump(); + } +} + +LabelIdx EHFunc::CreateLabel(const std::string &cstr) +{ + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc->GetFunction().GetStIdx().Idx()); + CHECK_FATAL(mirSymbol != nullptr, "get function symbol failed in EHFunc::CreateLabel"); + std::string funcName = mirSymbol->GetName(); + std::string labStr = funcName.append(cstr).append(std::to_string(labelIdx++)); + return cgFunc->GetFunction().GetOrCreateLableIdxFromName(labStr); +} + +/* think about moving this to BELowerer where LowerThrownval is already written */ +void EHFunc::InsertCxaAfterEachCatch(const std::vector> &catchVec) +{ + MIRModule &mirModule = *cgFunc->GetFunction().GetModule(); + BlockNode *funcBody = cgFunc->GetFunction().GetBody(); + CatchNode *jCatchNode = nullptr; + TyIdx voidPTy = GlobalTables::GetTypeTable().GetVoidPtr()->GetTypeIndex(); + for (const auto &catchVecPair : catchVec) { + jCatchNode = catchVecPair.second; + MIRFunction *calleeFunc = mirModule.GetMIRBuilder()->GetOrCreateFunction("MCC_JavaBeginCatch", voidPTy); + cgFunc->GetBecommon().UpdateTypeTable(*calleeFunc->GetMIRFuncType()); + RegreadNode *retRegRead0 = mirModule.CurFuncCodeMemPool()->New(); + retRegRead0->SetRegIdx(-kSregRetval0); + retRegRead0->SetPrimType(GetLoweredPtrType()); + MapleVector args(mirModule.GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(retRegRead0); + CallNode *callAssign = mirModule.GetMIRBuilder()->CreateStmtCall(calleeFunc->GetPuidx(), args); + funcBody->InsertAfter(jCatchNode, callAssign); + } +} + +void EHFunc::CreateLSDAHeader() +{ + constexpr uint8 startEncoding = 0xff; + constexpr uint8 typeEncoding = 0x9b; + constexpr uint8 callSiteEncoding = 0x1; + MIRBuilder *mirBuilder = cgFunc->GetFunction().GetModule()->GetMIRBuilder(); + + LSDAHeader *lsdaHeaders = cgFunc->GetMemoryPool()->New(); + LabelIdx lsdaHdLblIdx = CreateLabel("LSDAHD"); /* LSDA head */ + LabelNode *lsdaHdLblNode = mirBuilder->CreateStmtLabel(lsdaHdLblIdx); + lsdaHeaders->SetLSDALabel(*lsdaHdLblNode); + + LabelIdx lsdaTTStartIdx = CreateLabel("LSDAALLS"); /* LSDA all start; */ + LabelNode *lsdaTTLblNode = mirBuilder->CreateStmtLabel(lsdaTTStartIdx); + LabelIdx lsdaTTEndIdx = CreateLabel("LSDAALLE"); /* LSDA all end; */ + LabelNode *lsdaCSTELblNode = mirBuilder->CreateStmtLabel(lsdaTTEndIdx); + lsdaHeaders->SetTTypeOffset(lsdaTTLblNode, lsdaCSTELblNode); + + lsdaHeaders->SetLPStartEncoding(startEncoding); + lsdaHeaders->SetTTypeEncoding(typeEncoding); + lsdaHeaders->SetCallSiteEncoding(callSiteEncoding); + lsdaHeader = lsdaHeaders; +} + +void EHFunc::FillLSDACallSiteTable() +{ + constexpr uint8 callSiteFirstAction = 0x1; + MIRBuilder *mirBuilder = cgFunc->GetFunction().GetModule()->GetMIRBuilder(); + BlockNode *bodyNode = cgFunc->GetFunction().GetBody(); + + lsdaCallSiteTable = cgFunc->GetMemoryPool()->New(*cgFunc->GetFuncScopeAllocator()); + LabelIdx lsdaCSTStartIdx = CreateLabel("LSDACSTS"); /* LSDA callsite table start; */ + LabelNode *lsdaCSTStartLabel = mirBuilder->CreateStmtLabel(lsdaCSTStartIdx); + LabelIdx lsdaCSTEndIdx = CreateLabel("LSDACSTE"); /* LSDA callsite table end; */ + LabelNode *lsdaCSTEndLabel = mirBuilder->CreateStmtLabel(lsdaCSTEndIdx); + lsdaCallSiteTable->SetCSTable(lsdaCSTStartLabel, lsdaCSTEndLabel); + + /* create LDSACallSite for each EHTry instance */ + for (auto *ehTry : tryVec) { + DEBUG_ASSERT(ehTry != nullptr, "null ptr check"); + /* replace try with a label which is the callsite_start */ + LabelIdx csStartLblIdx = CreateLabel("LSDACS"); + LabelNode *csLblNode = mirBuilder->CreateStmtLabel(csStartLblIdx); + LabelIdx csEndLblIdx = CreateLabel("LSDACE"); + LabelNode *ceLblNode = mirBuilder->CreateStmtLabel(csEndLblIdx); + TryNode *tryNode = ehTry->GetTryNode(); + bodyNode->ReplaceStmt1WithStmt2(tryNode, csLblNode); + StmtNode *endTryNode = ehTry->GetEndtryNode(); + bodyNode->ReplaceStmt1WithStmt2(endTryNode, ceLblNode); + + LabelNode *ladpadEndLabel = nullptr; + if (ehTry->GetFallthruGoto()) { + ladpadEndLabel = mirBuilder->CreateStmtLabel(CreateLabel("LSDALPE")); + bodyNode->InsertBefore(ehTry->GetFallthruGoto(), ladpadEndLabel); + } else { + ladpadEndLabel = ceLblNode; + } + /* When there is only one catch, the exception table is optimized. */ + if (ehTry->GetCatchVecSize() == 1) { + ladpadEndLabel = static_cast(ehTry->GetCatchNodeAt(0)->GetPrev()); + } + + LSDACallSite *lsdaCallSite = cgFunc->GetMemoryPool()->New(); + LabelPair csStart(cgFunc->GetStartLabel(), csLblNode); + LabelPair csLength(csLblNode, ceLblNode); + LabelPair csLandingPad(cgFunc->GetStartLabel(), ladpadEndLabel); + lsdaCallSite->Init(csStart, csLength, csLandingPad, callSiteFirstAction); + ehTry->SetLSDACallSite(*lsdaCallSite); + lsdaCallSiteTable->PushBack(*lsdaCallSite); + } +} + +void EHFunc::CreateLSDA() +{ + constexpr uint8 callSiteCleanUpAction = 0x0; + /* create header */ + CreateLSDAHeader(); + /* create and fill callsite table */ + FillLSDACallSiteTable(); + + for (auto *rethrow : rethrowVec) { + DEBUG_ASSERT(rethrow != nullptr, "null ptr check"); + /* replace throw (void * obj) with call __java_rethrow and unwind resume */ + rethrow->Lower(*cgFunc); + if (rethrow->HasLSDA()) { + LSDACallSite *lsdaCallSite = cgFunc->GetMemoryPool()->New(); + LabelPair csStart(cgFunc->GetStartLabel(), rethrow->GetStartLabel()); + LabelPair csLength(rethrow->GetStartLabel(), rethrow->GetEndLabel()); + LabelPair csLandingPad(nullptr, nullptr); + lsdaCallSite->Init(csStart, csLength, csLandingPad, callSiteCleanUpAction); + lsdaCallSiteTable->PushBack(*lsdaCallSite); + } + } + + /* LSDAAction table */ + CreateLSDAAction(); +} + +void EHFunc::CreateLSDAAction() +{ + constexpr uint8 actionTableNextEncoding = 0x7d; + /* iterate each try and its corresponding catch */ + LSDAActionTable *actionTable = cgFunc->GetMemoryPool()->New(*cgFunc->GetFuncScopeAllocator()); + lsdaActionTable = actionTable; + + for (auto *ehTry : tryVec) { + LSDAAction *lastAction = nullptr; + for (int32 j = static_cast(ehTry->GetCatchVecSize()) - 1; j >= 0; --j) { + CatchNode *catchNode = ehTry->GetCatchNodeAt(j); + DEBUG_ASSERT(catchNode != nullptr, "null ptr check"); + for (uint32 idx = 0; idx < catchNode->Size(); ++idx) { + MIRPtrType *ptType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(catchNode->GetExceptionTyIdxVecElement(idx))); + uint32 tyIndex = ty2IndexTable[ptType->GetPointedTyIdx()]; /* get the index of ptType of ehTyTable; */ + DEBUG_ASSERT(tyIndex != 0, "exception type index not allow equal zero"); + LSDAAction *lsdaAction = cgFunc->GetMemoryPool()->New( + tyIndex, lastAction == nullptr ? 0 : actionTableNextEncoding); + lastAction = lsdaAction; + actionTable->PushBack(*lsdaAction); + } + } + + /* record actionTable group offset, per LSDAAction object in actionTable occupy 2 bytes */ + ehTry->SetCSAction((actionTable->Size() - 1) * 2 + 1); + } +} + +bool CgBuildEHFunc::PhaseRun(maplebe::CGFunc &f) +{ + f.BuildEHFunc(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgBuildEHFunc, buildehfunc) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/emit.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/emit.cpp new file mode 100644 index 0000000000000000000000000000000000000000..39ead08f23ebad241e9d79a4f0eb2264575b9df3 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/emit.cpp @@ -0,0 +1,3759 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "emit.h" +#include +#ifdef _WIN32 +#include +#endif +#include "reflection_analysis.h" +#include "muid_replacement.h" +#include "metadata_layout.h" +#include "string_utils.h" +using namespace namemangler; + +namespace { +using namespace maple; +constexpr uint32 kSizeOfHugesoRoutine = 3; +constexpr uint32 kFromDefIndexMask32Mod = 0x40000000; + +int32 GetPrimitiveTypeSize(const std::string &name) +{ + if (name.length() != 1) { + return -1; + } + char typeName = name[0]; + switch (typeName) { + case 'Z': + return static_cast(GetPrimTypeSize(PTY_u1)); + case 'B': + return static_cast(GetPrimTypeSize(PTY_i8)); + case 'S': + return static_cast(GetPrimTypeSize(PTY_i16)); + case 'C': + return static_cast(GetPrimTypeSize(PTY_u16)); + case 'I': + return static_cast(GetPrimTypeSize(PTY_i32)); + case 'J': + return static_cast(GetPrimTypeSize(PTY_i64)); + case 'F': + return static_cast(GetPrimTypeSize(PTY_f32)); + case 'D': + return static_cast(GetPrimTypeSize(PTY_f64)); + case 'V': + return static_cast(GetPrimTypeSize(PTY_void)); + default: + return -1; + } +} +DBGDieAttr *LFindAttribute(MapleVector &vec, DwAt key) +{ + for (DBGDieAttr *at : vec) + if (at->GetDwAt() == key) { + return at; + } + return nullptr; +} + +DBGAbbrevEntry *LFindAbbrevEntry(MapleVector &abbvec, unsigned int key) +{ + for (DBGAbbrevEntry *daie : abbvec) { + if (!daie) { + continue; + } + if (daie->GetAbbrevId() == key) { + return daie; + } + } + DEBUG_ASSERT(0, ""); + return nullptr; +} + +bool LShouldEmit(unsigned int dwform) +{ + return dwform != DW_FORM_flag_present; +} + +DBGDie *LFindChildDieWithName(DBGDie *die, DwTag tag, const GStrIdx key) +{ + for (DBGDie *c : die->GetSubDieVec()) { + if (c->GetTag() == tag) { + for (DBGDieAttr *a : c->GetAttrVec()) { + if (a->GetDwAt() == DW_AT_name) { + if ((a->GetDwForm() == DW_FORM_string || a->GetDwForm() == DW_FORM_strp) && + a->GetId() == key.GetIdx()) { + return c; + } else { + break; + } + } + } + } + } + return nullptr; +} + +DBGDieAttr *LFindDieAttr(DBGDie *die, DwAt attrname) +{ + for (DBGDieAttr *attr : die->GetAttrVec()) { + if (attr->GetDwAt() == attrname) { + return attr; + } + } + return nullptr; +} + +static void LUpdateAttrValue(DBGDieAttr *attr, int64_t newval) +{ + attr->SetI(int32_t(newval)); +} +} // namespace + +namespace maplebe { +using namespace maple; +using namespace cfi; + +void Emitter::EmitLabelRef(LabelIdx labIdx) +{ + PUIdx pIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + char *idx = strdup(std::to_string(pIdx).c_str()); + fileStream << ".L." << idx << "__" << labIdx; + free(idx); + idx = nullptr; +} + +void Emitter::EmitStmtLabel(LabelIdx labIdx) +{ + EmitLabelRef(labIdx); + fileStream << ":\n"; +} + +void Emitter::EmitLabelPair(const LabelPair &pairLabel) +{ + DEBUG_ASSERT(pairLabel.GetEndOffset() || pairLabel.GetStartOffset(), "NYI"); + EmitLabelRef(pairLabel.GetEndOffset()->GetLabelIdx()); + fileStream << " - "; + EmitLabelRef(pairLabel.GetStartOffset()->GetLabelIdx()); + fileStream << "\n"; +} + +void Emitter::EmitLabelForFunc(const MIRFunction *func, LabelIdx labIdx) +{ + char *idx = strdup(std::to_string(func->GetPuidx()).c_str()); + fileStream << ".L." << idx << "__" << labIdx; + free(idx); + idx = nullptr; +} + +AsmLabel Emitter::GetTypeAsmInfoName(PrimType primType) const +{ + uint32 size = GetPrimTypeSize(primType); + /* case x : x occupies bytes of pty */ + switch (size) { + case k1ByteSize: + return kAsmByte; + case k2ByteSize: +#if TARGAARCH64 || TARGRISCV64 + return kAsmShort; +#else + return kAsmValue; +#endif + case k4ByteSize: + return kAsmLong; + case k8ByteSize: + return kAsmQuad; + default: + DEBUG_ASSERT(false, "NYI"); + break; + } + return kAsmLong; +} + +void Emitter::EmitFileInfo(const std::string &fileName) +{ +#if defined(_WIN32) || defined(DARWIN) + char *curDirName = getcwd(nullptr, 0); +#else + char *curDirName = get_current_dir_name(); +#endif + CHECK_FATAL(curDirName != nullptr, "null ptr check "); + Emit(asmInfo->GetCmnt()); + std::string path(curDirName); +#ifdef _WIN32 + std::string cgFile(path.append("\\mplcg")); +#else + std::string cgFile(path.append("/mplcg")); +#endif + Emit(cgFile); + Emit("\n"); + + std::string compile("Compiling "); + Emit(asmInfo->GetCmnt()); + Emit(compile); + Emit("\n"); + + std::string beOptions("Be options"); + Emit(asmInfo->GetCmnt()); + Emit(beOptions); + Emit("\n"); + + path = curDirName; + path.append("/").append(fileName); + /* strip path before out/ */ + std::string out = "/out/"; + size_t pos = path.find(out.c_str(), 0, out.length()); + if (pos != std::string::npos) { + path.erase(0, pos + 1); + } + std::string irFile("\""); + irFile.append(path).append("\""); + Emit(asmInfo->GetFile()); + Emit(irFile); + Emit("\n"); + + /* save directory path in index 8 */ + SetFileMapValue(0, path); + + /* .file #num src_file_name */ + if (cg->GetCGOptions().WithLoc()) { + /* .file 1 mpl_file_name */ + if (cg->GetCGOptions().WithAsm()) { + Emit("\t// "); + } + Emit(asmInfo->GetFile()); + Emit("1 "); + Emit(irFile); + Emit("\n"); + SetFileMapValue(1, irFile); /* save ir file in 1 */ + if (cg->GetCGOptions().WithSrc()) { + /* insert a list of src files */ + uint32 i = 2; + for (auto it : cg->GetMIRModule()->GetSrcFileInfo()) { + if (cg->GetCGOptions().WithAsm()) { + Emit("\t// "); + } + Emit(asmInfo->GetFile()); + Emit(it.second).Emit(" \""); + std::string kStr = GlobalTables::GetStrTable().GetStringFromStrIdx(it.first); + Emit(kStr); + Emit("\"\n"); + SetFileMapValue(i++, kStr); + } + } + } + free(curDirName); + + EmitInlineAsmSection(); +#if TARGARM32 + Emit("\t.syntax unified\n"); + /* + * "The arm instruction set is a subset of + * the most commonly used 32-bit ARM instructions." + * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0210c/CACBCAAE.html + */ + Emit("\t.arm\n"); + Emit("\t.fpu vfpv4\n"); + Emit("\t.arch armv7-a\n"); + Emit("\t.eabi_attribute Tag_ABI_PCS_RW_data, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_PCS_RO_data, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_PCS_GOT_use, 2\n"); + if (CGOptions::GetABIType() == CGOptions::kABIHard) { + Emit("\t.eabi_attribute Tag_ABI_VFP_args, 1\n"); + } + Emit("\t.eabi_attribute Tag_ABI_FP_denormal, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_FP_exceptions, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_FP_number_model, 3\n"); + Emit("\t.eabi_attribute Tag_ABI_align_needed, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_align_preserved, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_enum_size, 2\n"); + Emit("\t.eabi_attribute 30, 6\n"); + Emit("\t.eabi_attribute Tag_CPU_unaligned_access, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_PCS_wchar_t, 4\n"); +#endif /* TARGARM32 */ +} + +void Emitter::EmitInlineAsmSection() +{ + MapleVector &asmSections = cg->GetMIRModule()->GetAsmDecls(); + if (!asmSections.empty()) { + Emit("#APP\n"); + for (auto &singleSection : asmSections) { + Emit("\t"); + Emit(singleSection); + Emit("\n"); + } + Emit("#NO_APP\n"); + } +} +void Emitter::EmitAsmLabel(AsmLabel label) +{ + switch (label) { + case kAsmData: { + (void)Emit(asmInfo->GetData()); + (void)Emit("\n"); + return; + } + case kAsmText: { + (void)Emit(asmInfo->GetText()); + (void)Emit("\n"); + return; + } + case kAsmType: { + (void)Emit(asmInfo->GetType()); + return; + } + case kAsmByte: { + (void)Emit(asmInfo->GetByte()); + return; + } + case kAsmShort: { + (void)Emit(asmInfo->GetShort()); + return; + } + case kAsmValue: { + (void)Emit(asmInfo->GetValue()); + return; + } + case kAsmLong: { + (void)Emit(asmInfo->GetLong()); + return; + } + case kAsmQuad: { + (void)Emit(asmInfo->GetQuad()); + return; + } + case kAsmZero: + (void)Emit(asmInfo->GetZero()); + return; + default: + DEBUG_ASSERT(false, "should not run here"); + return; + } +} + +void Emitter::EmitAsmLabel(const MIRSymbol &mirSymbol, AsmLabel label) +{ + MIRType *mirType = mirSymbol.GetType(); + std::string symName; + if (mirSymbol.GetStorageClass() == kScPstatic && mirSymbol.IsLocal()) { + PUIdx pIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + symName = mirSymbol.GetName() + std::to_string(pIdx); + } else { + symName = mirSymbol.GetName(); + } + if (mirSymbol.GetAsmAttr() != UStrIdx(0) && + (mirSymbol.GetStorageClass() == kScPstatic || mirSymbol.GetStorageClass() == kScPstatic)) { + std::string asmSection = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirSymbol.GetAsmAttr()); + symName = asmSection; + } + if (Globals::GetInstance()->GetBECommon()->IsEmptyOfTypeAlignTable()) { + DEBUG_ASSERT(false, "container empty check"); + } + + switch (label) { + case kAsmGlbl: { + Emit(asmInfo->GetGlobal()); + Emit(symName); + Emit("\n"); + return; + } + case kAsmHidden: { + Emit(asmInfo->GetHidden()); + Emit(symName); + Emit("\n"); + return; + } + case kAsmLocal: { + Emit(asmInfo->GetLocal()); + Emit(symName); + Emit("\n"); + return; + } + case kAsmWeak: { + Emit(asmInfo->GetWeak()); + Emit(symName); + Emit("\n"); + return; + } + case kAsmZero: { + uint64 size = Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()); + EmitNullConstant(size); + return; + } + case kAsmComm: { + std::string size; + if (isFlexibleArray) { + size = std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()) + + arraySize); + } else { + size = std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex())); + } + (void)Emit(asmInfo->GetComm()).Emit(symName).Emit(", ").Emit(size).Emit(", "); +#if PECOFF +#if TARGARM || TARGAARCH64 || TARGARK || TARGRISCV64 + std::string align = std::to_string( + static_cast(log2(Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex())))); +#else + std::string align = + std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex())); +#endif + emit(align.c_str()); +#else /* ELF */ + /* output align, symbol name begin with "classInitProtectRegion" align is 4096 */ + MIRTypeKind kind = mirSymbol.GetType()->GetKind(); + MIRStorageClass storage = mirSymbol.GetStorageClass(); + if (symName.find("classInitProtectRegion") == 0) { + Emit(4096); + } else if (((kind == kTypeStruct) || (kind == kTypeClass) || (kind == kTypeArray) || + (kind == kTypeUnion)) && + ((storage == kScGlobal) || (storage == kScPstatic) || (storage == kScFstatic))) { + int32 align = Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex()); + if (GetPointerSize() < align) { + (void)Emit(std::to_string(align)); + } else { + (void)Emit(std::to_string(k8ByteSize)); + } + } else { + (void)Emit( + std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex()))); + } +#endif + Emit("\n"); + return; + } + case kAsmAlign: { + uint8 align = mirSymbol.GetAttrs().GetAlignValue(); + if (align == 0) { + if (mirSymbol.GetType()->GetKind() == kTypeStruct || mirSymbol.GetType()->GetKind() == kTypeClass || + mirSymbol.GetType()->GetKind() == kTypeArray || mirSymbol.GetType()->GetKind() == kTypeUnion) { +#if TARGX86 || TARGX86_64 + return; +#else + align = kAlignOfU8; +#endif + } else { + align = Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirSymbol.GetType()->GetTypeIndex()); +#if TARGARM32 || TARGAARCH64 || TARGARK || TARGRISCV64 + if (CGOptions::IsArm64ilp32() && mirSymbol.GetType()->GetPrimType() == PTY_a32) { + align = kAlignOfU8; + } else { + align = static_cast(log2(align)); + } +#endif + } + } + Emit(asmInfo->GetAlign()); + Emit(std::to_string(align)); + Emit("\n"); + return; + } + case kAsmSyname: { + Emit(symName); + Emit(":\n"); + return; + } + case kAsmSize: { + Emit(asmInfo->GetSize()); + Emit(symName); + Emit(", "); +#if TARGX86 || TARGX86_64 + Emit(".-"); + Emit(symName); +#else + std::string size; + if (isFlexibleArray) { + size = std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()) + + arraySize); + } else { + size = std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex())); + } + Emit(size); +#endif + Emit("\n"); + return; + } + case kAsmType: { + Emit(asmInfo->GetType()); + if (GetCG()->GetMIRModule()->IsCModule() && (symName == "sys_nerr" || symName == "sys_errlist")) { + /* eliminate warning from deprecated C name */ + Emit("strerror"); + } else { + Emit(symName); + } + Emit(","); + Emit(asmInfo->GetAtobt()); + Emit("\n"); + return; + } + default: + DEBUG_ASSERT(false, "should not run here"); + return; + } +} + +void Emitter::EmitNullConstant(uint64 size) +{ + EmitAsmLabel(kAsmZero); + Emit(std::to_string(size)); + Emit("\n"); +} + +void Emitter::EmitCombineBfldValue(StructEmitInfo &structEmitInfo) +{ + uint8 charBitWidth = GetPrimTypeSize(PTY_i8) * kBitsPerByte; + auto emitBfldValue = [&structEmitInfo, charBitWidth, this](bool flag) { + while (structEmitInfo.GetCombineBitFieldWidth() > charBitWidth) { + uint8 shift = flag ? (structEmitInfo.GetCombineBitFieldWidth() - charBitWidth) : 0U; + uint64 tmp = (structEmitInfo.GetCombineBitFieldValue() >> shift) & 0x00000000000000ffUL; + EmitAsmLabel(kAsmByte); + Emit(std::to_string(tmp)); + Emit("\n"); + structEmitInfo.DecreaseCombineBitFieldWidth(charBitWidth); + uint64 value = + flag ? structEmitInfo.GetCombineBitFieldValue() - (tmp << structEmitInfo.GetCombineBitFieldWidth()) + : structEmitInfo.GetCombineBitFieldValue() >> charBitWidth; + structEmitInfo.SetCombineBitFieldValue(value); + } + }; + if (CGOptions::IsBigEndian()) { + /* + * If the total number of bits in the bit field is not a multiple of 8, + * the bits must be aligned to 8 bits to prevent errors in the emit. + */ + auto width = static_cast(RoundUp(structEmitInfo.GetCombineBitFieldWidth(), charBitWidth)); + if (structEmitInfo.GetCombineBitFieldWidth() < width) { + structEmitInfo.SetCombineBitFieldValue(structEmitInfo.GetCombineBitFieldValue() + << (width - structEmitInfo.GetCombineBitFieldWidth())); + structEmitInfo.IncreaseCombineBitFieldWidth( + static_cast(width - structEmitInfo.GetCombineBitFieldWidth())); + } + emitBfldValue(true); + } else { + emitBfldValue(false); + } + if (structEmitInfo.GetCombineBitFieldWidth() != 0) { + EmitAsmLabel(kAsmByte); + uint64 value = structEmitInfo.GetCombineBitFieldValue() & 0x00000000000000ffUL; + Emit(std::to_string(value)); + Emit("\n"); + } + CHECK_FATAL(charBitWidth != 0, "divide by zero"); + if ((structEmitInfo.GetNextFieldOffset() % charBitWidth) != 0) { + uint8 value = charBitWidth - (structEmitInfo.GetNextFieldOffset() % charBitWidth); + structEmitInfo.IncreaseNextFieldOffset(value); + } + structEmitInfo.SetTotalSize(structEmitInfo.GetNextFieldOffset() / charBitWidth); + structEmitInfo.SetCombineBitFieldValue(0); + structEmitInfo.SetCombineBitFieldWidth(0); +} + +void Emitter::EmitBitFieldConstant(StructEmitInfo &structEmitInfo, MIRConst &mirConst, const MIRType *nextType, + uint64 fieldOffset) +{ + MIRType &mirType = mirConst.GetType(); + if (fieldOffset > structEmitInfo.GetNextFieldOffset()) { + uint16 curFieldOffset = structEmitInfo.GetNextFieldOffset() - structEmitInfo.GetCombineBitFieldWidth(); + structEmitInfo.SetCombineBitFieldWidth(fieldOffset - curFieldOffset); + EmitCombineBfldValue(structEmitInfo); + DEBUG_ASSERT(structEmitInfo.GetNextFieldOffset() <= fieldOffset, + "structEmitInfo's nextFieldOffset should be <= fieldOffset"); + structEmitInfo.SetNextFieldOffset(fieldOffset); + } + uint32 fieldSize = static_cast(mirType).GetFieldSize(); + MIRIntConst &fieldValue = static_cast(mirConst); + /* Truncate the size of FieldValue to the bit field size. */ + if (fieldSize < fieldValue.GetActualBitWidth()) { + fieldValue.Trunc(fieldSize); + } + /* Clear higher Bits for signed value */ + if (structEmitInfo.GetCombineBitFieldValue() != 0) { + structEmitInfo.SetCombineBitFieldValue((~(~0ULL << structEmitInfo.GetCombineBitFieldWidth())) & + structEmitInfo.GetCombineBitFieldValue()); + } + if (CGOptions::IsBigEndian()) { + uint64 beValue = fieldValue.GetExtValue(); + if (fieldValue.IsNegative()) { + beValue = beValue - ((beValue >> fieldSize) << fieldSize); + } + structEmitInfo.SetCombineBitFieldValue((structEmitInfo.GetCombineBitFieldValue() << fieldSize) + beValue); + } else { + structEmitInfo.SetCombineBitFieldValue((fieldValue.GetExtValue() << structEmitInfo.GetCombineBitFieldWidth()) + + structEmitInfo.GetCombineBitFieldValue()); + } + structEmitInfo.IncreaseCombineBitFieldWidth(fieldSize); + structEmitInfo.IncreaseNextFieldOffset(fieldSize); + if ((nextType == nullptr) || (kTypeBitField != nextType->GetKind())) { + /* emit structEmitInfo->combineBitFieldValue */ + EmitCombineBfldValue(structEmitInfo); + } +} + +void Emitter::EmitStr(const std::string &mplStr, bool emitAscii, bool emitNewline) +{ + const char *str = mplStr.c_str(); + size_t len = mplStr.size(); + + if (emitAscii) { + Emit("\t.ascii\t\""); /* Do not terminate with \0 */ + } else { + Emit("\t.string\t\""); + } + + /* + * don't expand special character in a writeout to .s, + * convert all \s to \\s in string for storing in .string + */ + for (size_t i = 0; i < len; i++) { + /* Referred to GNU AS: 3.6.1.1 Strings */ + constexpr int kBufSize = 5; + constexpr int kFirstChar = 0; + constexpr int kSecondChar = 1; + constexpr int kThirdChar = 2; + constexpr int kLastChar = 4; + char buf[kBufSize]; + if (isprint(*str)) { + buf[kFirstChar] = *str; + buf[kSecondChar] = 0; + if (*str == '\\' || *str == '\"') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = *str; + buf[kThirdChar] = 0; + } + Emit(buf); + } else if (*str == '\b') { + Emit("\\b"); + } else if (*str == '\n') { + Emit("\\n"); + } else if (*str == '\r') { + Emit("\\r"); + } else if (*str == '\t') { + Emit("\\t"); + } else if (*str == '\0') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = '0'; + buf[kThirdChar] = 0; + Emit(buf); + } else { + /* all others, print as number */ + int ret = snprintf_s(buf, sizeof(buf), k4BitSize, "\\%03o", (*str) & 0xFF); + if (ret < 0) { + FATAL(kLncFatal, "snprintf_s failed"); + } + buf[kLastChar] = '\0'; + Emit(buf); + } + str++; + } + + Emit("\""); + if (emitNewline) { + Emit("\n"); + } +} + +void Emitter::EmitStrConstant(const MIRStrConst &mirStrConst, bool isIndirect) +{ + if (isIndirect) { + uint32 strId = mirStrConst.GetValue().GetIdx(); + + if (stringPtr.find(mirStrConst.GetValue()) == stringPtr.end()) { + stringPtr.insert(mirStrConst.GetValue()); + } + if (CGOptions::IsArm64ilp32()) { + (void)Emit("\t.word\t").Emit(".LSTR__").Emit(std::to_string(strId).c_str()); + } else { + EmitAsmLabel(kAsmQuad); + (void)Emit(".LSTR__").Emit(std::to_string(strId).c_str()); + } + return; + } + + const std::string ustr = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirStrConst.GetValue()); + size_t len = ustr.size(); + if (isFlexibleArray) { + arraySize += static_cast(len) + 1; + } + EmitStr(ustr, false, false); +} + +void Emitter::EmitStr16Constant(const MIRStr16Const &mirStr16Const) +{ + Emit("\t.byte "); + /* note: for now, u16string is emitted 2 bytes without any \u indication */ + const std::u16string &str16 = GlobalTables::GetU16StrTable().GetStringFromStrIdx(mirStr16Const.GetValue()); + constexpr int bufSize = 9; + char buf[bufSize]; + char16_t c = str16[0]; + /* fetch the type of char16_t c's top 8 bit data */ + int ret1 = snprintf_s(buf, sizeof(buf), bufSize - 1, "%d,%d", (c >> 8) & 0xFF, c & 0xFF); + if (ret1 < 0) { + FATAL(kLncFatal, "snprintf_s failed"); + } + buf[bufSize - 1] = '\0'; + Emit(buf); + for (uint32 i = 1; i < str16.length(); ++i) { + c = str16[i]; + /* fetch the type of char16_t c's top 8 bit data */ + int ret2 = snprintf_s(buf, sizeof(buf), bufSize - 1, ",%d,%d", (c >> 8) & 0xFF, c & 0xFF); + if (ret2 < 0) { + FATAL(kLncFatal, "snprintf_s failed"); + } + buf[bufSize - 1] = '\0'; + Emit(buf); + } + if ((str16.length() & 0x1) == 1) { + Emit(",0,0"); + } +} + +void Emitter::EmitScalarConstant(MIRConst &mirConst, bool newLine, bool flag32, bool isIndirect) +{ + MIRType &mirType = mirConst.GetType(); + AsmLabel asmName = GetTypeAsmInfoName(mirType.GetPrimType()); + switch (mirConst.GetKind()) { + case kConstInt: { + MIRIntConst &intCt = static_cast(mirConst); + uint32 sizeInBits = GetPrimTypeBitSize(mirType.GetPrimType()); + if (intCt.GetActualBitWidth() > sizeInBits) { + intCt.Trunc(sizeInBits); + } + if (flag32) { + EmitAsmLabel(AsmLabel::kAsmLong); + } else { + EmitAsmLabel(asmName); + } + Emit(intCt.GetValue()); + if (isFlexibleArray) { + arraySize += (sizeInBits / kBitsPerByte); + } + break; + } + case kConstFloatConst: { + MIRFloatConst &floatCt = static_cast(mirConst); + EmitAsmLabel(asmName); + Emit(std::to_string(floatCt.GetIntValue())); + if (isFlexibleArray) { + arraySize += k4ByteFloatSize; + } + break; + } + case kConstDoubleConst: { + MIRDoubleConst &doubleCt = static_cast(mirConst); + EmitAsmLabel(asmName); + Emit(std::to_string(doubleCt.GetIntValue())); + if (isFlexibleArray) { + arraySize += k8ByteDoubleSize; + } + break; + } + case kConstStrConst: { + MIRStrConst &strCt = static_cast(mirConst); + if (cg->GetMIRModule()->IsCModule()) { + EmitStrConstant(strCt, isIndirect); + } else { + EmitStrConstant(strCt); + } + break; + } + case kConstStr16Const: { + MIRStr16Const &str16Ct = static_cast(mirConst); + EmitStr16Constant(str16Ct); + break; + } + case kConstAddrof: { + MIRAddrofConst &symAddr = static_cast(mirConst); + StIdx stIdx = symAddr.GetSymbolIndex(); + MIRSymbol *symAddrSym = + stIdx.IsGlobal() + ? GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()) + : CG::GetCurCGFunc()->GetMirModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + DEBUG_ASSERT(symAddrSym != nullptr, "null ptr check"); + std::string str; + if (CGOptions::IsArm64ilp32()) { + str = ".word"; + } else { + str = ".quad"; + } + if (stIdx.IsGlobal() == false && symAddrSym->GetStorageClass() == kScPstatic) { + PUIdx pIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + (void)Emit("\t" + str + "\t" + symAddrSym->GetName() + std::to_string(pIdx)); + } else { + (void)Emit("\t" + str + "\t" + symAddrSym->GetName()); + } + if (symAddr.GetOffset() != 0) { + (void)Emit(" + ").Emit(symAddr.GetOffset()); + } + if (symAddr.GetFieldID() > 1) { + MIRStructType *structType = static_cast(symAddrSym->GetType()); + DEBUG_ASSERT(structType != nullptr, "EmitScalarConstant: non-zero fieldID for non-structure"); + (void)Emit(" + ").Emit( + Globals::GetInstance()->GetBECommon()->GetFieldOffset(*structType, symAddr.GetFieldID()).first); + } + break; + } + case kConstAddrofFunc: { + MIRAddroffuncConst &funcAddr = static_cast(mirConst); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFuncTable().at(funcAddr.GetValue()); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + std::string str; + if (CGOptions::IsArm64ilp32()) { + str = ".word"; + } else { + str = ".quad"; + } + (void)Emit("\t" + str + "\t" + symAddrSym->GetName()); + break; + } + case kConstLblConst: { + MIRLblConst &lbl = static_cast(mirConst); + if (CGOptions::IsArm64ilp32()) { + (void)Emit("\t.word\t"); + } else { + EmitAsmLabel(kAsmQuad); + } + EmitLabelRef(lbl.GetValue()); + break; + } + default: + DEBUG_ASSERT(false, "NYI"); + break; + } + if (newLine) { + Emit("\n"); + } +} + +void Emitter::EmitAddrofFuncConst(const MIRSymbol &mirSymbol, MIRConst &elemConst, size_t idx) +{ + MIRAddroffuncConst &funcAddr = static_cast(elemConst); + const std::string stName = mirSymbol.GetName(); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(funcAddr.GetValue()); + const std::string &funcName = func->GetName(); + if ((idx == kFuncDefNameIndex) && mirSymbol.IsMuidFuncInfTab()) { + Emit("\t.long\t.Label.name."); + Emit(funcName + " - ."); + Emit("\n"); + return; + } + if ((idx == kFuncDefSizeIndex) && mirSymbol.IsMuidFuncInfTab()) { + Emit("\t.long\t.Label.end."); + Emit(funcName + " - "); + Emit(funcName + "\n"); + return; + } + if ((idx == static_cast(MethodProperty::kPaddrData)) && mirSymbol.IsReflectionMethodsInfo()) { +#ifdef USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + Emit(funcName + " - .\n"); + return; + } + if (((idx == static_cast(MethodInfoCompact::kPaddrData)) && mirSymbol.IsReflectionMethodsInfoCompact()) || + ((idx == static_cast(ClassRO::kClinitAddr)) && mirSymbol.IsReflectionClassInfoRO())) { + Emit("\t.long\t"); + Emit(funcName + " - .\n"); + return; + } + + if (mirSymbol.IsReflectionMethodAddrData()) { +#ifdef USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + Emit(funcName + " - .\n"); + return; + } + + if (idx == kFuncDefAddrIndex && mirSymbol.IsMuidFuncDefTab()) { +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 6 means kBindingStateMethodDef:6 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x6\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateMethodDef:6. */ +#else + Emit("__BindingProtectRegion__ + 6\n"); +#endif /* USE_32BIT_REF */ + } else { +#if defined(USE_32BIT_REF) +#if defined(MPL_LNK_ADDRESS_VIA_BASE) + Emit(funcName + "\n"); +#else /* MPL_LNK_ADDRESS_VIA_BASE */ + Emit(funcName + "-.\n"); +#endif /* MPL_LNK_ADDRESS_VIA_BASE */ +#else /* USE_32BIT_REF */ + Emit(funcName + "\n"); +#endif /* USE_32BIT_REF */ + } + return; + } + + if (idx == kFuncDefAddrIndex && mirSymbol.IsMuidFuncDefOrigTab()) { + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ +#if defined(USE_32BIT_REF) +#if defined(MPL_LNK_ADDRESS_VIA_BASE) + Emit(funcName + "\n"); +#else /* MPL_LNK_ADDRESS_VIA_BASE */ + Emit(funcName + "-.\n"); +#endif /* MPL_LNK_ADDRESS_VIA_BASE */ +#else /* USE_32BIT_REF */ + Emit(funcName + "\n"); +#endif /* USE_32BIT_REF */ + } + return; + } + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + Emit(funcName); + if ((stName.find(VTAB_PREFIX_STR) == 0) || (stName.find(ITAB_PREFIX_STR) == 0) || + (stName.find(ITAB_CONFLICT_PREFIX_STR) == 0)) { + Emit(" - .\n"); + return; + } + if (cg->GetCGOptions().GeneratePositionIndependentExecutable()) { + Emit(" - "); + Emit(stName); + } + Emit("\n"); +} + +void Emitter::EmitAddrofSymbolConst(const MIRSymbol &mirSymbol, MIRConst &elemConst, size_t idx) +{ + MIRAddrofConst &symAddr = static_cast(elemConst); + const std::string stName = mirSymbol.GetName(); + + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(symAddr.GetSymbolIndex().Idx()); + const std::string &symAddrName = symAddrSym->GetName(); + + if (((idx == static_cast(FieldProperty::kPOffset)) && mirSymbol.IsReflectionFieldsInfo()) || + mirSymbol.IsReflectionFieldOffsetData()) { +#if USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + Emit(symAddrName + " - .\n"); + return; + } + + if (((idx == static_cast(FieldPropertyCompact::kPOffset)) && mirSymbol.IsReflectionFieldsInfoCompact()) || + ((idx == static_cast(MethodProperty::kSigName)) && mirSymbol.IsReflectionMethodsInfo()) || + ((idx == static_cast(MethodSignatureProperty::kParameterTypes)) && + mirSymbol.IsReflectionMethodSignature())) { + Emit("\t.long\t"); + Emit(symAddrName + " - .\n"); + return; + } + + if (((idx == static_cast(MethodProperty::kDeclarclass)) || + (idx == static_cast(MethodProperty::kPaddrData))) && + mirSymbol.IsReflectionMethodsInfo()) { +#if USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + if (idx == static_cast(MethodProperty::kDeclarclass)) { + Emit(symAddrName + " - .\n"); + } else { + Emit(symAddrName + " - . + 2\n"); + } + return; + } + + if ((idx == static_cast(MethodInfoCompact::kPaddrData)) && mirSymbol.IsReflectionMethodsInfoCompact()) { + Emit("\t.long\t"); + Emit(symAddrName + " - . + 2\n"); + return; + } + + if ((idx == static_cast(FieldProperty::kDeclarclass)) && mirSymbol.IsReflectionFieldsInfo()) { +#if USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + Emit(symAddrName + " - .\n"); + return; + } + + if ((idx == kDataDefAddrIndex) && (mirSymbol.IsMuidDataUndefTab() || mirSymbol.IsMuidDataDefTab())) { + if (symAddrSym->IsReflectionClassInfo()) { + Emit(".LDW.ref." + symAddrName + ":\n"); + } + Emit(kPtrPrefixStr + symAddrName + ":\n"); +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + if (mirSymbol.IsMuidDataUndefTab()) { + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { + if (symAddrSym->IsReflectionClassInfo()) { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 1 means kBindingStateCinfUndef:1 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x1\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateCinfUndef:1. */ +#else + Emit("__BindingProtectRegion__ + 1\n"); +#endif /* USE_32BIT_REF */ + } else { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 3 means kBindingStateDataUndef:3 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x3\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateDataUndef:3. */ +#else + Emit("__BindingProtectRegion__ + 3\n"); +#endif /* USE_32BIT_REF */ + } + } else { + Emit("0\n"); + } + } else { + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { + if (symAddrSym->IsReflectionClassInfo()) { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 2 means kBindingStateCinfDef:2 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x2\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateCinfDef:2. */ +#else + Emit("__BindingProtectRegion__ + 2\n"); +#endif /* USE_32BIT_REF */ + } else { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 4 means kBindingStateDataDef:4 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x4\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateDataDef:4. */ +#else + Emit("__BindingProtectRegion__ + 4\n"); +#endif /* USE_32BIT_REF */ + } + } else { +#if defined(USE_32BIT_REF) +#if defined(MPL_LNK_ADDRESS_VIA_BASE) + Emit(symAddrName + "\n"); +#else /* MPL_LNK_ADDRESS_VIA_BASE */ + Emit(symAddrName + "-.\n"); +#endif /* MPL_LNK_ADDRESS_VIA_BASE */ +#else /* USE_32BIT_REF */ + Emit(symAddrName + "\n"); +#endif /* USE_32BIT_REF */ + } + } + return; + } + + if (idx == kDataDefAddrIndex && mirSymbol.IsMuidDataDefOrigTab()) { + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + +#if defined(USE_32BIT_REF) +#if defined(MPL_LNK_ADDRESS_VIA_BASE) + Emit(symAddrName + "\n"); +#else /* MPL_LNK_ADDRESS_VIA_BASE */ + Emit(symAddrName + "-.\n"); +#endif /* MPL_LNK_ADDRESS_VIA_BASE */ +#else /* USE_32BIT_REF */ + Emit(symAddrName + "\n"); +#endif /* USE_32BIT_REF */ + } + return; + } + + if (StringUtils::StartsWith(stName, kLocalClassInfoStr)) { +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + Emit(symAddrName); + Emit(" - . + ").Emit(kDataRefIsOffset); + Emit("\n"); + return; + } +#ifdef USE_32BIT_REF + if (mirSymbol.IsReflectionHashTabBucket() || (stName.find(ITAB_PREFIX_STR) == 0) || + (mirSymbol.IsReflectionClassInfo() && (idx == static_cast(ClassProperty::kInfoRo)))) { + Emit("\t.word\t"); + } else { +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + } +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + + if ((stName.find(ITAB_CONFLICT_PREFIX_STR) == 0) || (stName.find(ITAB_PREFIX_STR) == 0)) { + Emit(symAddrName + " - .\n"); + return; + } + if (mirSymbol.IsMuidRangeTab()) { + if (idx == kRangeBeginIndex) { + Emit(symAddrSym->GetMuidTabName() + "_begin\n"); + } else { + Emit(symAddrSym->GetMuidTabName() + "_end\n"); + } + return; + } + + if (symAddrName.find(GCTIB_PREFIX_STR) == 0) { + Emit(cg->FindGCTIBPatternName(symAddrName)); + } else { + Emit(symAddrName); + } + + if ((((idx == static_cast(ClassRO::kIfields)) || (idx == static_cast(ClassRO::kMethods))) && + mirSymbol.IsReflectionClassInfoRO()) || + mirSymbol.IsReflectionHashTabBucket()) { + Emit(" - ."); + if (symAddrSym->IsReflectionFieldsInfoCompact() || symAddrSym->IsReflectionMethodsInfoCompact()) { + /* Mark the least significant bit as 1 for compact fieldinfo */ + Emit(" + ").Emit(MethodFieldRef::kMethodFieldRefIsCompact); + } + } else if (mirSymbol.IsReflectionClassInfo()) { + if ((idx == static_cast(ClassProperty::kItab)) || (idx == static_cast(ClassProperty::kVtab)) || + (idx == static_cast(ClassProperty::kInfoRo))) { + Emit(" - . + ").Emit(kDataRefIsOffset); + } else if (idx == static_cast(ClassProperty::kGctib)) { + if (cg->FindGCTIBPatternName(symAddrName).find(REF_PREFIX_STR) == 0) { + Emit(" - . + ").Emit(kGctibRefIsIndirect); + } else { + Emit(" - ."); + } + } + } else if (mirSymbol.IsReflectionClassInfoRO()) { + if (idx == static_cast(ClassRO::kSuperclass)) { + Emit(" - . + ").Emit(kDataRefIsOffset); + } + } + + if (cg->GetCGOptions().GeneratePositionIndependentExecutable()) { + Emit(" - "); + Emit(stName); + } + Emit("\n"); +} + +MIRAddroffuncConst *Emitter::GetAddroffuncConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst) +{ + MIRAddroffuncConst *innerFuncAddr = nullptr; + size_t addrIndex = mirSymbol.IsReflectionMethodsInfo() ? static_cast(MethodProperty::kPaddrData) + : static_cast(MethodInfoCompact::kPaddrData); + MIRConst *pAddrConst = aggConst.GetConstVecItem(addrIndex); + if (pAddrConst->GetKind() == kConstAddrof) { + /* point addr data. */ + MIRAddrofConst *pAddr = safe_cast(pAddrConst); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(pAddr->GetSymbolIndex().Idx()); + MIRAggConst *methodAddrAggConst = safe_cast(symAddrSym->GetKonst()); + MIRAggConst *addrAggConst = safe_cast(methodAddrAggConst->GetConstVecItem(0)); + MIRConst *funcAddrConst = addrAggConst->GetConstVecItem(0); + if (funcAddrConst->GetKind() == kConstAddrofFunc) { + /* func sybmol. */ + innerFuncAddr = safe_cast(funcAddrConst); + } else if (funcAddrConst->GetKind() == kConstInt) { + /* def table index, replaced by def table for lazybinding. */ + std::string funcDefTabName = + namemangler::kMuidFuncDefTabPrefixStr + cg->GetMIRModule()->GetFileNameAsPostfix(); + MIRSymbol *funDefTabSy = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(funcDefTabName)); + MIRAggConst &funDefTabAggConst = static_cast(*funDefTabSy->GetKonst()); + MIRIntConst *funcAddrIndexConst = safe_cast(funcAddrConst); + uint64 indexDefTab = funcAddrIndexConst->GetExtValue(); + MIRAggConst *defTabAggConst = safe_cast(funDefTabAggConst.GetConstVecItem(indexDefTab)); + MIRConst *funcConst = defTabAggConst->GetConstVecItem(0); + if (funcConst->GetKind() == kConstAddrofFunc) { + innerFuncAddr = safe_cast(funcConst); + } + } + } else if (pAddrConst->GetKind() == kConstAddrofFunc) { + innerFuncAddr = safe_cast(pAddrConst); + } + return innerFuncAddr; +} + +int64 Emitter::GetFieldOffsetValue(const std::string &className, const MIRIntConst &intConst, + const std::map &strIdx2Type) +{ + uint64 idx = intConst.GetExtValue(); + bool isDefTabIndex = idx & 0x1; + int64 fieldIdx = idx >> 1; + if (isDefTabIndex) { + /* it's def table index. */ + return fieldIdx; + } else { + /* really offset. */ + uint8 charBitWidth = GetPrimTypeSize(PTY_i8) * kBitsPerByte; + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(className); + auto it = strIdx2Type.find(strIdx); + CHECK_FATAL(it->second != nullptr, "valid iterator check"); + DEBUG_ASSERT(it != strIdx2Type.end(), "Can not find type"); + MIRType &ty = *it->second; + MIRStructType &structType = static_cast(ty); + std::pair fieldOffsetPair = + Globals::GetInstance()->GetBECommon()->GetFieldOffset(structType, fieldIdx); + int64 fieldOffset = fieldOffsetPair.first * static_cast(charBitWidth) + fieldOffsetPair.second; + return fieldOffset; + } +} + +void Emitter::InitRangeIdx2PerfixStr() +{ + rangeIdx2PrefixStr[RangeIdx::kVtabAndItab] = kMuidVtabAndItabPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kItabConflict] = kMuidItabConflictPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kVtabOffset] = kMuidVtabOffsetPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kFieldOffset] = kMuidFieldOffsetPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kValueOffset] = kMuidValueOffsetPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kLocalClassInfo] = kMuidLocalClassInfoStr; + rangeIdx2PrefixStr[RangeIdx::kConststr] = kMuidConststrPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kSuperclass] = kMuidSuperclassPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kGlobalRootlist] = kMuidGlobalRootlistPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kClassmetaData] = kMuidClassMetadataPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kClassBucket] = kMuidClassMetadataBucketPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kJavatext] = kMuidJavatextPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kDataSection] = kMuidDataSectionStr; + rangeIdx2PrefixStr[RangeIdx::kJavajni] = kRegJNITabPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kJavajniFunc] = kRegJNIFuncTabPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kDecoupleStaticKey] = kDecoupleStaticKeyStr; + rangeIdx2PrefixStr[RangeIdx::kDecoupleStaticValue] = kDecoupleStaticValueStr; + rangeIdx2PrefixStr[RangeIdx::kBssStart] = kBssSectionStr; + rangeIdx2PrefixStr[RangeIdx::kLinkerSoHash] = kLinkerHashSoStr; + rangeIdx2PrefixStr[RangeIdx::kArrayClassCache] = kArrayClassCacheTable; + rangeIdx2PrefixStr[RangeIdx::kArrayClassCacheName] = kArrayClassCacheNameTable; +} + +void Emitter::EmitIntConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst, uint32 itabConflictIndex, + const std::map &strIdx2Type, size_t idx) +{ + MIRConst *elemConst = aggConst.GetConstVecItem(idx); + const std::string stName = mirSymbol.GetName(); + + MIRIntConst *intConst = safe_cast(elemConst); + DEBUG_ASSERT(intConst != nullptr, "Uexpected const type"); + + /* ignore abstract function addr */ + if ((idx == static_cast(MethodInfoCompact::kPaddrData)) && mirSymbol.IsReflectionMethodsInfoCompact()) { + return; + } + + if (((idx == static_cast(MethodProperty::kVtabIndex)) && (mirSymbol.IsReflectionMethodsInfo())) || + ((idx == static_cast(MethodInfoCompact::kVtabIndex)) && mirSymbol.IsReflectionMethodsInfoCompact())) { + MIRAddroffuncConst *innerFuncAddr = GetAddroffuncConst(mirSymbol, aggConst); + if (innerFuncAddr != nullptr) { + Emit(".Label.name." + + GlobalTables::GetFunctionTable().GetFunctionFromPuidx(innerFuncAddr->GetValue())->GetName()); + Emit(":\n"); + } + } + /* refer to DeCouple::GenOffsetTableType */ + constexpr int fieldTypeIdx = 2; + constexpr int methodTypeIdx = 2; + bool isClassInfo = + (idx == static_cast(ClassRO::kClassName) || idx == static_cast(ClassRO::kAnnotation)) && + mirSymbol.IsReflectionClassInfoRO(); + bool isMethodsInfo = (idx == static_cast(MethodProperty::kMethodName) || + idx == static_cast(MethodProperty::kSigName) || + idx == static_cast(MethodProperty::kAnnotation)) && + mirSymbol.IsReflectionMethodsInfo(); + bool isFieldsInfo = + (idx == static_cast(FieldProperty::kTypeName) || idx == static_cast(FieldProperty::kName) || + idx == static_cast(FieldProperty::kAnnotation)) && + mirSymbol.IsReflectionFieldsInfo(); + bool isMethodSignature = (idx == static_cast(MethodSignatureProperty::kSignatureOffset)) && + mirSymbol.IsReflectionMethodSignature(); + /* RegisterTable has been Int Array, visit element instead of field. */ + bool isInOffsetTab = (idx == 1 || idx == methodTypeIdx) && (StringUtils::StartsWith(stName, kVtabOffsetTabStr) || + StringUtils::StartsWith(stName, kFieldOffsetTabStr)); + /* The 1 && 2 of Decouple static struct is the string name */ + bool isStaticStr = (idx == 1 || idx == 2) && aggConst.GetConstVec().size() == kSizeOfDecoupleStaticStruct && + StringUtils::StartsWith(stName, kDecoupleStaticKeyStr); + /* process conflict table index larger than itabConflictIndex * 2 + 2 element */ + bool isConflictPerfix = (idx >= (static_cast(itabConflictIndex) * 2 + 2)) && (idx % 2 == 0) && + StringUtils::StartsWith(stName, ITAB_CONFLICT_PREFIX_STR); + bool isArrayClassCacheName = mirSymbol.IsArrayClassCacheName(); + if (isClassInfo || isMethodsInfo || isFieldsInfo || mirSymbol.IsRegJNITab() || isInOffsetTab || isStaticStr || + isConflictPerfix || isArrayClassCacheName || isMethodSignature) { + /* compare with all 1s */ + uint32 index = static_cast((safe_cast(elemConst))->GetExtValue()) & 0xFFFFFFFF; + bool isHotReflectStr = (index & 0x00000003) != 0; /* use the last two bits of index in this expression */ + std::string hotStr; + if (isHotReflectStr) { + uint32 tag = (index & 0x00000003) - kCStringShift; /* use the last two bits of index in this expression */ + if (tag == kLayoutBootHot) { + hotStr = kReflectionStartHotStrtabPrefixStr; + } else if (tag == kLayoutBothHot) { + hotStr = kReflectionBothHotStrTabPrefixStr; + } else { + hotStr = kReflectionRunHotStrtabPrefixStr; + } + } + std::string reflectStrTabPrefix = isHotReflectStr ? hotStr : kReflectionStrtabPrefixStr; + std::string strTabName = reflectStrTabPrefix + cg->GetMIRModule()->GetFileNameAsPostfix(); + /* left shift 2 bit to get low 30 bit data for MIRIntConst */ + elemConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(index >> 2, elemConst->GetType()); + intConst = safe_cast(elemConst); + aggConst.SetItem(static_cast(idx), intConst, aggConst.GetFieldIdItem(idx)); +#ifdef USE_32BIT_REF + if (stName.find(ITAB_CONFLICT_PREFIX_STR) == 0) { + EmitScalarConstant(*elemConst, false, true); + } else { + EmitScalarConstant(*elemConst, false); + } +#else + EmitScalarConstant(*elemConst, false); +#endif /* USE_32BIT_REF */ + Emit("+" + strTabName); + if (mirSymbol.IsRegJNITab() || mirSymbol.IsReflectionMethodsInfo() || mirSymbol.IsReflectionFieldsInfo() || + mirSymbol.IsArrayClassCacheName() || mirSymbol.IsReflectionMethodSignature()) { + Emit("-."); + } + if (StringUtils::StartsWith(stName, kDecoupleStaticKeyStr)) { + Emit("-."); + } + if (mirSymbol.IsReflectionClassInfoRO()) { + if (idx == static_cast(ClassRO::kAnnotation)) { + Emit("-."); + } else if (idx == static_cast(ClassRO::kClassName)) { + /* output in hex format to show it is a flag of bits. */ + std::stringstream ss; + ss << std::hex << "0x" << MByteRef::kPositiveOffsetBias; + Emit(" - . + " + ss.str()); + } + } + if (StringUtils::StartsWith(stName, ITAB_PREFIX_STR)) { + Emit("-."); + } + if (StringUtils::StartsWith(stName, ITAB_CONFLICT_PREFIX_STR)) { + /* output in hex format to show it is a flag of bits. */ + std::stringstream ss; + ss << std::hex << "0x" << MByteRef32::kPositiveOffsetBias; + Emit(" - . + " + ss.str()); + } + if ((idx == 1 || idx == methodTypeIdx) && StringUtils::StartsWith(stName, kVtabOffsetTabStr)) { + Emit("-."); + } + if ((idx == 1 || idx == fieldTypeIdx) && StringUtils::StartsWith(stName, kFieldOffsetTabStr)) { + Emit("-."); + } + Emit("\n"); + } else if (idx == kFuncDefAddrIndex && mirSymbol.IsMuidFuncUndefTab()) { +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + EmitAsmLabel(kAsmQuad); +#endif /* USE_32BIT_REF */ + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 5 means kBindingStateMethodUndef:5 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x5\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateMethodUndef:5. */ +#else + Emit("__BindingProtectRegion__ + 5\n"); +#endif /* USE_32BIT_REF */ + } else { + Emit("0\n"); + } + } else if (idx == static_cast(FieldProperty::kPClassType) && mirSymbol.IsReflectionFieldsInfo()) { +#ifdef USE_32BIT_REF + Emit("\t.long\t"); + const int width = 4; +#else + EmitAsmLabel(kAsmQuad); + const int width = 8; +#endif /* USE_32BIT_REF */ + uint32 muidDataTabAddr = static_cast((safe_cast(elemConst))->GetExtValue()); + if (muidDataTabAddr != 0) { + bool isDefTabIndex = (muidDataTabAddr & kFromDefIndexMask32Mod) == kFromDefIndexMask32Mod; + std::string muidDataTabPrefix = isDefTabIndex ? kMuidDataDefTabPrefixStr : kMuidDataUndefTabPrefixStr; + std::string muidDataTabName = muidDataTabPrefix + cg->GetMIRModule()->GetFileNameAsPostfix(); + (void)Emit(muidDataTabName + "+"); + uint32 muidDataTabIndex = muidDataTabAddr & 0x3FFFFFFF; /* high 2 bit is the mask of muid tab */ + (void)Emit(std::to_string(muidDataTabIndex * width)); + (void)Emit("-.\n"); + } else { + (void)Emit(muidDataTabAddr); + Emit("\n"); + } + return; + } else if (mirSymbol.IsRegJNIFuncTab()) { + std::string strTabName = kRegJNITabPrefixStr + cg->GetMIRModule()->GetFileNameAsPostfix(); + EmitScalarConstant(*elemConst, false); +#ifdef TARGARM32 + (void)Emit("+" + strTabName).Emit("+").Emit(MByteRef::kPositiveOffsetBias).Emit("-.\n"); +#else + Emit("+" + strTabName + "\n"); +#endif + } else if (mirSymbol.IsReflectionMethodAddrData()) { +#ifdef USE_32BIT_REF + Emit("\t.long\t"); +#else + EmitAsmLabel(kAsmQuad); +#endif /* USE_32BIT_REF */ + Emit(intConst->GetValue()); + Emit("\n"); + } else if (mirSymbol.IsReflectionFieldOffsetData()) { + /* Figure out instance field offset now. */ + size_t prefixStrLen = strlen(kFieldOffsetDataPrefixStr); + size_t pos = stName.find("_FieldID_"); + std::string typeName = stName.substr(prefixStrLen, pos - prefixStrLen); +#ifdef USE_32BIT_REF + std::string widthFlag = ".long"; +#else + std::string widthFlag = ".quad"; +#endif /* USE_32BIT_REF */ + int64 fieldOffset = GetFieldOffsetValue(typeName, *intConst, strIdx2Type); + uint64 fieldIdx = intConst->GetExtValue(); + bool isDefTabIndex = fieldIdx & 0x1; + if (isDefTabIndex) { + /* it's def table index. */ + Emit("\t// " + typeName + " static field, data def table index " + std::to_string(fieldOffset) + "\n"); + } else { + /* really offset. */ + fieldIdx >>= 1; + Emit("\t// " + typeName + "\t field" + std::to_string(fieldIdx) + "\n"); + } + Emit("\t" + widthFlag + "\t" + std::to_string(fieldOffset) + "\n"); + } else if (((idx == static_cast(FieldProperty::kPOffset)) && mirSymbol.IsReflectionFieldsInfo()) || + ((idx == static_cast(FieldPropertyCompact::kPOffset)) && + mirSymbol.IsReflectionFieldsInfoCompact())) { + std::string typeName; + std::string widthFlag; +#ifdef USE_32BIT_REF + const int width = 4; +#else + const int width = 8; +#endif /* USE_32BIT_REF */ + if (mirSymbol.IsReflectionFieldsInfo()) { + typeName = stName.substr(strlen(kFieldsInfoPrefixStr)); +#ifdef USE_32BIT_REF + widthFlag = ".long"; +#else + widthFlag = ".quad"; +#endif /* USE_32BIT_REF */ + } else { + size_t prefixStrLen = strlen(kFieldsInfoCompactPrefixStr); + typeName = stName.substr(prefixStrLen); + widthFlag = ".long"; + } + int64 fieldIdx = intConst->GetExtValue(); + MIRSymbol *pOffsetData = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kFieldOffsetDataPrefixStr + typeName)); + if (pOffsetData != nullptr) { + fieldIdx *= width; + std::string fieldOffset = kFieldOffsetDataPrefixStr + typeName; + Emit("\t" + widthFlag + "\t" + std::to_string(fieldIdx) + " + " + fieldOffset + " - .\n"); + } else { + /* pOffsetData null, means FieldMeta.offset is really offset */ + int64 fieldOffset = GetFieldOffsetValue(typeName, *intConst, strIdx2Type); + Emit("\t// " + typeName + "\t field" + std::to_string(fieldIdx) + "\n"); + Emit("\t" + widthFlag + "\t" + std::to_string(fieldOffset) + "\n"); + } + } else if ((idx == static_cast(ClassProperty::kObjsize)) && mirSymbol.IsReflectionClassInfo()) { + std::string delimiter = "$$"; + std::string typeName = + stName.substr(strlen(CLASSINFO_PREFIX_STR), stName.find(delimiter) - strlen(CLASSINFO_PREFIX_STR)); + uint32 objSize = 0; + std::string comments; + + if (typeName.size() > 1 && typeName[0] == '$') { + /* fill element size for array class; */ + std::string newTypeName = typeName.substr(1); + /* another $(arraysplitter) */ + if (newTypeName.find("$") == std::string::npos) { + CHECK_FATAL(false, "can not find $ in std::string"); + } + typeName = newTypeName.substr(newTypeName.find("$") + 1); + int32 pTypeSize; + + /* we only need to calculate primitive type in arrays. */ + if ((pTypeSize = GetPrimitiveTypeSize(typeName)) != -1) { + objSize = static_cast(pTypeSize); + } + comments = "// elemobjsize"; + } else { + comments = "// objsize"; + } + + if (!objSize) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName); + auto it = strIdx2Type.find(strIdx); + DEBUG_ASSERT(it != strIdx2Type.end(), "Can not find type"); + MIRType *mirType = it->second; + ASSERT_NOT_NULL(mirType); + objSize = Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()); + } + /* objSize should not exceed 16 bits */ + CHECK_FATAL(objSize <= 0xffff, "Error:the objSize is too large"); + Emit("\t.short\t" + std::to_string(objSize) + comments + "\n"); + } else if (mirSymbol.IsMuidRangeTab()) { + MIRIntConst *subIntCt = safe_cast(elemConst); + int flag = subIntCt->GetExtValue(); + InitRangeIdx2PerfixStr(); + if (rangeIdx2PrefixStr.find(flag) == rangeIdx2PrefixStr.end()) { + EmitScalarConstant(*elemConst, false); + Emit("\n"); + return; + } + std::string prefix = rangeIdx2PrefixStr[flag]; +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + (void)Emit("\t.word\t"); +#endif + if (idx == kRangeBeginIndex) { + Emit(prefix + "_begin\n"); + } else { + Emit(prefix + "_end\n"); + } + } else { +#ifdef USE_32BIT_REF + if (StringUtils::StartsWith(stName, ITAB_CONFLICT_PREFIX_STR) || + StringUtils::StartsWith(stName, ITAB_PREFIX_STR) || StringUtils::StartsWith(stName, VTAB_PREFIX_STR)) { + EmitScalarConstant(*elemConst, false, true); + } else { + EmitScalarConstant(*elemConst, false); + } +#else + EmitScalarConstant(*elemConst, false); +#endif /* USE_32BIT_REF */ + Emit("\n"); + } +} + +void Emitter::EmitConstantTable(const MIRSymbol &mirSymbol, MIRConst &mirConst, + const std::map &strIdx2Type) +{ + const std::string stName = mirSymbol.GetName(); + MIRAggConst &aggConst = static_cast(mirConst); + uint32 itabConflictIndex = 0; + for (size_t i = 0; i < aggConst.GetConstVec().size(); ++i) { + MIRConst *elemConst = aggConst.GetConstVecItem(i); + if (i == 0 && StringUtils::StartsWith(stName, ITAB_CONFLICT_PREFIX_STR)) { +#ifdef USE_32BIT_REF + itabConflictIndex = static_cast((safe_cast(elemConst))->GetValue()) & 0xffff; +#else + itabConflictIndex = safe_cast(elemConst)->GetExtValue() & 0xffffffff; +#endif + } + if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { + if (elemConst->GetKind() == kConstAddrofFunc) { /* addroffunc const */ + EmitAddrofFuncConst(mirSymbol, *elemConst, i); + } else if (elemConst->GetKind() == kConstAddrof) { /* addrof symbol const */ + EmitAddrofSymbolConst(mirSymbol, *elemConst, i); + } else { /* intconst */ + EmitIntConst(mirSymbol, aggConst, itabConflictIndex, strIdx2Type, i); + } + } else if (elemConst->GetType().GetKind() == kTypeArray || elemConst->GetType().GetKind() == kTypeStruct) { + if (StringUtils::StartsWith(mirSymbol.GetName(), namemangler::kOffsetTabStr) && (i == 0 || i == 1)) { + /* EmitOffsetValueTable */ +#ifdef USE_32BIT_REF + Emit("\t.long\t"); +#else + EmitAsmLabel(kAsmQuad); +#endif + if (i == 0) { + (void)Emit(namemangler::kVtabOffsetTabStr + cg->GetMIRModule()->GetFileNameAsPostfix() + " - .\n"); + } else { + (void)Emit(namemangler::kFieldOffsetTabStr + cg->GetMIRModule()->GetFileNameAsPostfix() + " - .\n"); + } + } else { + EmitConstantTable(mirSymbol, *elemConst, strIdx2Type); + } + } + } +} + +void Emitter::EmitArrayConstant(MIRConst &mirConst) +{ + MIRType &mirType = mirConst.GetType(); + MIRAggConst &arrayCt = static_cast(mirConst); + MIRArrayType &arrayType = static_cast(mirType); + size_t uNum = arrayCt.GetConstVec().size(); + uint32 dim = arrayType.GetSizeArrayItem(0); + TyIdx scalarIdx = arrayType.GetElemTyIdx(); + MIRType *subTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(scalarIdx); + if (uNum == 0 && dim) { + while (subTy->GetKind() == kTypeArray) { + MIRArrayType *aSubTy = static_cast(subTy); + if (aSubTy->GetSizeArrayItem(0) > 0) { + dim *= (aSubTy->GetSizeArrayItem(0)); + } + scalarIdx = aSubTy->GetElemTyIdx(); + subTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(scalarIdx); + } + } + for (size_t i = 0; i < uNum; ++i) { + MIRConst *elemConst = arrayCt.GetConstVecItem(i); + if (IsPrimitiveVector(subTy->GetPrimType())) { + EmitVectorConstant(*elemConst); + } else if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { + if (cg->GetMIRModule()->IsCModule()) { + bool strLiteral = false; + if (arrayType.GetDim() == 1) { + MIRType *ety = arrayType.GetElemType(); + if (ety->GetPrimType() == PTY_i8 || ety->GetPrimType() == PTY_u8) { + strLiteral = true; + } + } + EmitScalarConstant(*elemConst, true, false, strLiteral == false); + } else { + EmitScalarConstant(*elemConst); + } + } else if (elemConst->GetType().GetKind() == kTypeArray) { + EmitArrayConstant(*elemConst); + } else if (elemConst->GetType().GetKind() == kTypeStruct || elemConst->GetType().GetKind() == kTypeClass || + elemConst->GetType().GetKind() == kTypeUnion) { + EmitStructConstant(*elemConst); + } else if (elemConst->GetKind() == kConstAddrofFunc) { + EmitScalarConstant(*elemConst); + } else { + DEBUG_ASSERT(false, "should not run here"); + } + } + int64 iNum = (arrayType.GetSizeArrayItem(0) > 0) ? (static_cast(arrayType.GetSizeArrayItem(0))) - uNum : 0; + if (iNum > 0) { + if (!cg->GetMIRModule()->IsCModule()) { + CHECK_FATAL(!Globals::GetInstance()->GetBECommon()->IsEmptyOfTypeSizeTable(), "container empty check"); + CHECK_FATAL(!arrayCt.GetConstVec().empty(), "container empty check"); + } + if (uNum > 0) { + uint64 unInSizeInByte = + static_cast(iNum) * static_cast(Globals::GetInstance()->GetBECommon()->GetTypeSize( + arrayCt.GetConstVecItem(0)->GetType().GetTypeIndex())); + if (unInSizeInByte != 0) { + EmitNullConstant(unInSizeInByte); + } + } else { + uint64 size = Globals::GetInstance()->GetBECommon()->GetTypeSize(scalarIdx.GetIdx()) * dim; + Emit("\t.zero\t").Emit(static_cast(size)).Emit("\n"); + } + } +} + +void Emitter::EmitVectorConstant(MIRConst &mirConst) +{ + MIRType &mirType = mirConst.GetType(); + MIRAggConst &vecCt = static_cast(mirConst); + size_t uNum = vecCt.GetConstVec().size(); + for (size_t i = 0; i < uNum; ++i) { + MIRConst *elemConst = vecCt.GetConstVecItem(i); + if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { + bool strLiteral = false; + EmitScalarConstant(*elemConst, true, false, strLiteral == false); + } else { + DEBUG_ASSERT(false, "should not run here"); + } + } + size_t lanes = GetVecLanes(mirType.GetPrimType()); + if (lanes > uNum) { + MIRIntConst zConst(0, vecCt.GetConstVecItem(0)->GetType()); + for (size_t i = uNum; i < lanes; i++) { + EmitScalarConstant(zConst, true, false, false); + } + } +} + +void Emitter::EmitStructConstant(MIRConst &mirConst) +{ + uint32_t subStructFieldCounts = 0; + EmitStructConstant(mirConst, subStructFieldCounts); +} + +void Emitter::EmitStructConstant(MIRConst &mirConst, uint32 &subStructFieldCounts) +{ + StructEmitInfo *sEmitInfo = cg->GetMIRModule()->GetMemPool()->New(); + CHECK_FATAL(sEmitInfo != nullptr, "create a new struct emit info failed in Emitter::EmitStructConstant"); + MIRType &mirType = mirConst.GetType(); + MIRAggConst &structCt = static_cast(mirConst); + MIRStructType &structType = static_cast(mirType); + auto structPack = static_cast(structType.GetTypeAttrs().GetPack()); + /* all elements of struct. */ + uint8 num; + if (structType.GetKind() == kTypeUnion) { + num = 1; + } else { + num = static_cast(structType.GetFieldsSize()); + } + BECommon *beCommon = Globals::GetInstance()->GetBECommon(); + /* total size of emitted elements size. */ + uint32 size = beCommon->GetTypeSize(structType.GetTypeIndex()); + uint32 fieldIdx = 1; + if (structType.GetKind() == kTypeUnion) { + fieldIdx = structCt.GetFieldIdItem(0); + } + for (uint32 i = 0; i < num; ++i) { + if (((i + 1) == num) && cg->GetMIRModule()->GetSrcLang() == kSrcLangC) { + isFlexibleArray = beCommon->GetHasFlexibleArray(mirType.GetTypeIndex().GetIdx()); + arraySize = 0; + } + MIRConst *elemConst; + if (structType.GetKind() == kTypeStruct) { + elemConst = structCt.GetAggConstElement(i + 1); + } else { + elemConst = structCt.GetAggConstElement(fieldIdx); + } + MIRType *elemType = structType.GetElemType(i); + if (structType.GetKind() == kTypeUnion) { + elemType = &(elemConst->GetType()); + } + MIRType *nextElemType = nullptr; + if (i != static_cast(num - 1)) { + nextElemType = structType.GetElemType(i + 1); + } + uint64 elemSize = beCommon->GetTypeSize(elemType->GetTypeIndex()); + uint8 charBitWidth = GetPrimTypeSize(PTY_i8) * kBitsPerByte; + if (elemType->GetKind() == kTypeBitField) { + if (elemConst == nullptr) { + MIRIntConst *zeroFill = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, *elemType); + elemConst = zeroFill; + } + std::pair fieldOffsetPair = beCommon->GetFieldOffset(structType, fieldIdx); + uint64_t fieldOffset = static_cast(static_cast(fieldOffsetPair.first)) * + static_cast(charBitWidth) + + static_cast(static_cast(fieldOffsetPair.second)); + EmitBitFieldConstant(*sEmitInfo, *elemConst, nextElemType, fieldOffset); + } else { + if (elemConst != nullptr) { + if (IsPrimitiveVector(elemType->GetPrimType())) { + EmitVectorConstant(*elemConst); + } else if (IsPrimitiveScalar(elemType->GetPrimType())) { + EmitScalarConstant(*elemConst, true, false, true); + } else if (elemType->GetKind() == kTypeArray) { + if (elemType->GetSize() != 0) { + EmitArrayConstant(*elemConst); + } + } else if ((elemType->GetKind() == kTypeStruct) || (elemType->GetKind() == kTypeClass) || + (elemType->GetKind() == kTypeUnion)) { + EmitStructConstant(*elemConst, subStructFieldCounts); + fieldIdx += subStructFieldCounts; + } else { + DEBUG_ASSERT(false, "should not run here"); + } + } else { + EmitNullConstant(elemSize); + } + sEmitInfo->IncreaseTotalSize(elemSize); + sEmitInfo->SetNextFieldOffset(sEmitInfo->GetTotalSize() * charBitWidth); + } + + if (nextElemType != nullptr && kTypeBitField != nextElemType->GetKind()) { + DEBUG_ASSERT(i < static_cast(num - 1), "NYI"); + uint8 nextAlign = Globals::GetInstance()->GetBECommon()->GetTypeAlign(nextElemType->GetTypeIndex()); + auto fieldAttr = structType.GetFields()[i + 1].second.second; + nextAlign = fieldAttr.IsPacked() ? 1 : std::min(nextAlign, structPack); + DEBUG_ASSERT(nextAlign != 0, "expect non-zero"); + /* append size, append 0 when align need. */ + uint64 totalSize = sEmitInfo->GetTotalSize(); + uint64 psize = (totalSize % nextAlign == 0) ? 0 : (nextAlign - (totalSize % nextAlign)); + if (psize != 0) { + EmitNullConstant(psize); + sEmitInfo->IncreaseTotalSize(psize); + sEmitInfo->SetNextFieldOffset(sEmitInfo->GetTotalSize() * charBitWidth); + } + /* element is uninitialized, emit null constant. */ + } + fieldIdx++; + } + if (structType.GetKind() == kTypeStruct) { + /* The reason of subtracting one is that fieldIdx adds one at the end of the cycle. */ + subStructFieldCounts = fieldIdx - 1; + } else if (structType.GetKind() == kTypeUnion) { + subStructFieldCounts = static_cast(beCommon->GetStructFieldCount(structType.GetTypeIndex())); + } + + isFlexibleArray = false; + uint64 opSize = size - sEmitInfo->GetTotalSize(); + if (opSize != 0) { + EmitNullConstant(opSize); + } +} + +/* BlockMarker is for Debugging/Profiling */ +void Emitter::EmitBlockMarker(const std::string &markerName, const std::string §ionName, bool withAddr, + const std::string &addrName) +{ + /* + * .type $marker_name$, %object + * .global $marker_name$ + * .data + * .align 3 + * $marker_name$: + * .quad 0xdeadbeefdeadbeef + * .size $marker_name$, 8 + */ + Emit(asmInfo->GetType()); + Emit(markerName); + Emit(", %object\n"); + if (CGOptions::IsEmitBlockMarker()) { /* exposed as global symbol, for profiling */ + Emit(asmInfo->GetGlobal()); + } else { /* exposed as local symbol, for release. */ + Emit(asmInfo->GetLocal()); + } + Emit(markerName); + Emit("\n"); + + if (!sectionName.empty()) { + Emit("\t.section ." + sectionName); + if (sectionName.find("ro") == 0) { + Emit(",\"a\",%progbits\n"); + } else { + Emit(",\"aw\",%progbits\n"); + } + } else { + EmitAsmLabel(kAsmData); + } + Emit(asmInfo->GetAlign()); +#if TARGX86 || TARGX86_64 + Emit("8\n" + markerName + ":\n"); +#else + Emit("3\n" + markerName + ":\n"); +#endif + EmitAsmLabel(kAsmQuad); + if (withAddr) { + Emit(addrName + "\n"); + } else { + Emit("0xdeadbeefdeadbeef\n"); /* hexspeak in aarch64 represents crash or dead lock */ + } + Emit(asmInfo->GetSize()); + Emit(markerName + ", 8\n"); +} + +void Emitter::EmitLiteral(const MIRSymbol &literal, const std::map &strIdx2Type) +{ + /* + * .type _C_STR_xxxx, %object + * .local _C_STR_xxxx + * .data + * .align 3 + * _C_STR_xxxx: + * .quad __cinf_Ljava_2Flang_2FString_3B + * .... + * .size _C_STR_xxxx, 40 + */ + if (literal.GetStorageClass() == kScUnused) { + return; + } + EmitAsmLabel(literal, kAsmType); + /* literal should always be fstatic and readonly? */ + EmitAsmLabel(literal, kAsmLocal); /* alwasy fstatic */ + (void)Emit("\t.section\t." + std::string(kMapleLiteralString) + ",\"aw\", %progbits\n"); + EmitAsmLabel(literal, kAsmAlign); + EmitAsmLabel(literal, kAsmSyname); + /* literal is an array */ + MIRConst *mirConst = literal.GetKonst(); + CHECK_FATAL(mirConst != nullptr, "mirConst should not be nullptr in EmitLiteral"); + if (literal.HasAddrOfValues()) { + EmitConstantTable(literal, *mirConst, strIdx2Type); + } else { + EmitArrayConstant(*mirConst); + } + EmitAsmLabel(literal, kAsmSize); +} + +void Emitter::EmitFuncLayoutInfo(const MIRSymbol &layout) +{ + /* + * .type $marker_name$, %object + * .global $marker_name$ + * .data + * .align 3 + * $marker_name$: + * .quad funcaddr + * .size $marker_name$, 8 + */ + MIRConst *mirConst = layout.GetKonst(); + MIRAggConst *aggConst = safe_cast(mirConst); + DEBUG_ASSERT(aggConst != nullptr, "null ptr check"); + if (aggConst->GetConstVec().size() != static_cast(LayoutType::kLayoutTypeCount)) { + maple::LogInfo::MapleLogger(kLlErr) << "something wrong happen in funclayoutsym\t" + << "constVec size\t" << aggConst->GetConstVec().size() << "\n"; + return; + } + for (size_t i = 0; i < static_cast(LayoutType::kLayoutTypeCount); ++i) { + std::string markerName = "__MBlock_" + GetLayoutTypeString(i) + "_func_start"; + CHECK_FATAL(aggConst->GetConstVecItem(i)->GetKind() == kConstAddrofFunc, "expect kConstAddrofFunc type"); + MIRAddroffuncConst *funcAddr = safe_cast(aggConst->GetConstVecItem(i)); + DEBUG_ASSERT(funcAddr != nullptr, "null ptr check"); + Emit(asmInfo->GetType()); + Emit(markerName + ", %object\n"); + Emit(asmInfo->GetGlobal()); + Emit(markerName + "\n"); + EmitAsmLabel(kAsmData); +#if TARGX86 || TARGX86_64 + EmitAsmLabel(layout, kAsmAlign); + Emit(markerName + ":\n"); +#else + Emit(asmInfo->GetAlign()); + Emit("3\n" + markerName + ":\n"); +#endif + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word "); +#endif + Emit(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(funcAddr->GetValue())->GetName()); + Emit("\n"); + Emit(asmInfo->GetSize()); + Emit(markerName + ", 8\n"); + } +} + +void Emitter::EmitStaticFields(const std::vector &fields) +{ + for (auto *itSymbol : fields) { + EmitAsmLabel(*itSymbol, kAsmType); + /* literal should always be fstatic and readonly? */ + EmitAsmLabel(*itSymbol, kAsmLocal); /* alwasy fstatic */ + EmitAsmLabel(kAsmData); + EmitAsmLabel(*itSymbol, kAsmAlign); + EmitAsmLabel(*itSymbol, kAsmSyname); + /* literal is an array */ + MIRConst *mirConst = itSymbol->GetKonst(); + EmitArrayConstant(*mirConst); + } +} + +void Emitter::EmitLiterals(std::vector> &literals, + const std::map &strIdx2Type) +{ + /* + * load literals profile + * currently only used here, so declare it as local + */ + if (!cg->GetMIRModule()->GetProfile().GetLiteralProfileSize()) { + for (const auto &literalPair : literals) { + EmitLiteral(*(literalPair.first), strIdx2Type); + } + return; + } + /* emit hot literal start symbol */ + EmitBlockMarker("__MBlock_literal_hot_begin", "", false); + /* + * emit literals into .data section + * emit literals in the profile first + */ + for (auto &literalPair : literals) { + if (cg->GetMIRModule()->GetProfile().CheckLiteralHot(literalPair.first->GetName())) { + /* it's in the literal profiling data, means it's "hot" */ + EmitLiteral(*(literalPair.first), strIdx2Type); + literalPair.second = true; + } + } + /* emit hot literal end symbol */ + EmitBlockMarker("__MBlock_literal_hot_end", "", false); + + /* emit cold literal start symbol */ + EmitBlockMarker("__MBlock_literal_cold_begin", "", false); + /* emit other literals (not in the profile) next. */ + for (const auto &literalPair : literals) { + if (!literalPair.second) { + /* not emit yet */ + EmitLiteral(*(literalPair.first), strIdx2Type); + } + } + /* emit cold literal end symbol */ + EmitBlockMarker("__MBlock_literal_cold_end", "", false); +} + +void Emitter::GetHotAndColdMetaSymbolInfo(const std::vector &mirSymbolVec, + std::vector &hotFieldInfoSymbolVec, + std::vector &coldFieldInfoSymbolVec, + const std::string &prefixStr, bool forceCold) +{ + bool isHot = false; + for (auto mirSymbol : mirSymbolVec) { + CHECK_FATAL(prefixStr.length() < mirSymbol->GetName().length(), "string length check"); + std::string name = mirSymbol->GetName().substr(prefixStr.length()); + std::string klassJavaDescriptor; + namemangler::DecodeMapleNameToJavaDescriptor(name, klassJavaDescriptor); + if (prefixStr == kFieldsInfoPrefixStr) { + isHot = cg->GetMIRModule()->GetProfile().CheckFieldHot(klassJavaDescriptor); + } else if (prefixStr == kMethodsInfoPrefixStr) { + isHot = cg->GetMIRModule()->GetProfile().CheckMethodHot(klassJavaDescriptor); + } else { + isHot = cg->GetMIRModule()->GetProfile().CheckClassHot(klassJavaDescriptor); + } + if (isHot && !forceCold) { + hotFieldInfoSymbolVec.emplace_back(mirSymbol); + } else { + coldFieldInfoSymbolVec.emplace_back(mirSymbol); + } + } +} + +void Emitter::EmitMetaDataSymbolWithMarkFlag(const std::vector &mirSymbolVec, + const std::map &strIdx2Type, + const std::string &prefixStr, const std::string §ionName, + bool isHotFlag) +{ + if (cg->GetMIRModule()->IsCModule()) { + return; + } + if (mirSymbolVec.empty()) { + return; + } + const std::string &markString = "__MBlock" + prefixStr; + const std::string &hotOrCold = isHotFlag ? "hot" : "cold"; + EmitBlockMarker((markString + hotOrCold + "_begin"), sectionName, false); + if (prefixStr == kFieldsInfoCompactPrefixStr || prefixStr == kMethodsInfoCompactPrefixStr || + prefixStr == kFieldOffsetDataPrefixStr || prefixStr == kMethodAddrDataPrefixStr) { + for (auto s : mirSymbolVec) { + EmitMethodFieldSequential(*s, strIdx2Type, sectionName); + } + } else { + for (auto s : mirSymbolVec) { + EmitClassInfoSequential(*s, strIdx2Type, sectionName); + } + } + EmitBlockMarker((markString + hotOrCold + "_end"), sectionName, false); +} + +void Emitter::MarkVtabOrItabEndFlag(const std::vector &mirSymbolVec) +{ + for (auto mirSymbol : mirSymbolVec) { + auto *aggConst = safe_cast(mirSymbol->GetKonst()); + if ((aggConst == nullptr) || (aggConst->GetConstVec().empty())) { + continue; + } + size_t size = aggConst->GetConstVec().size(); + MIRConst *elemConst = aggConst->GetConstVecItem(size - 1); + DEBUG_ASSERT(elemConst != nullptr, "null ptr check"); + if (elemConst->GetKind() == kConstAddrofFunc) { + maple::LogInfo::MapleLogger(kLlErr) << "ERROR: the last vtab/itab content should not be funcAddr\n"; + } else { + if (elemConst->GetKind() != kConstInt) { + CHECK_FATAL(elemConst->GetKind() == kConstAddrof, "must be"); + continue; + } + MIRIntConst *tabConst = static_cast(elemConst); +#ifdef USE_32BIT_REF + /* #define COLD VTAB ITAB END FLAG 0X4000000000000000 */ + tabConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(tabConst->GetValue()) | 0X40000000, tabConst->GetType()); +#else + /* #define COLD VTAB ITAB END FLAG 0X4000000000000000 */ + tabConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + tabConst->GetExtValue() | 0X4000000000000000, tabConst->GetType()); +#endif + aggConst->SetItem(static_cast(size) - 1, tabConst, aggConst->GetFieldIdItem(size - 1)); + } + } +} + +void Emitter::EmitStringPointers() +{ + if (CGOptions::OptimizeForSize()) { + (void)Emit(asmInfo->GetSection()).Emit(".rodata,\"aMS\",@progbits,1").Emit("\n"); +#if TARGX86 || TARGX86_64 + Emit("\t.align 8\n"); +#else + Emit("\t.align 3\n"); +#endif + } else { + (void)Emit(asmInfo->GetSection()).Emit(".rodata").Emit("\n"); + } + for (auto idx : localStrPtr) { + if (idx == 0) { + continue; + } + if (!CGOptions::OptimizeForSize()) { +#if TARGX86 || TARGX86_64 + Emit("\t.align 8\n"); +#else + Emit("\t.align 3\n"); +#endif + } + uint32 strId = idx.GetIdx(); + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(idx); + (void)Emit(".LUstr_").Emit(strId).Emit(":\n"); + std::string mplstr(str); + EmitStr(mplstr, false, true); + } + for (auto idx : stringPtr) { + if (idx == 0) { + continue; + } + if (!CGOptions::OptimizeForSize()) { +#if TARGX86 || TARGX86_64 + Emit("\t.align 8\n"); +#else + Emit("\t.align 3\n"); +#endif + } + uint32 strId = idx.GetIdx(); + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(idx); + Emit(asmInfo->GetAlign()); +#if TARGX86 || TARGX86_64 + Emit("8\n"); +#else + Emit("3\n"); +#endif + Emit(".LSTR__").Emit(strId).Emit(":\n"); + std::string mplstr(str); + EmitStr(mplstr, false, true); + } +} + +void Emitter::EmitLocalVariable(const CGFunc &cgFunc) +{ + /* function local pstatic initialization */ + if (cg->GetMIRModule()->IsCModule()) { + MIRSymbolTable *lSymTab = cgFunc.GetMirModule().CurFunction()->GetSymTab(); + if (lSymTab != nullptr) { + size_t lsize = lSymTab->GetSymbolTableSize(); + for (size_t i = 0; i < lsize; i++) { + if (i < cgFunc.GetLSymSize() && !cg->GetMIRModule()->IsCModule()) { + continue; + } + MIRSymbol *st = lSymTab->GetSymbolFromStIdx(static_cast(i)); + if (st != nullptr && st->GetStorageClass() == kScPstatic) { + /* + * Local static names can repeat. + * Append the current program unit index to the name. + */ + PUIdx pIdx = cgFunc.GetMirModule().CurFunction()->GetPuidx(); + std::string localname = st->GetName() + std::to_string(pIdx); + static std::vector emittedLocalSym; + bool found = false; + for (auto name : emittedLocalSym) { + if (name == localname) { + found = true; + break; + } + } + if (found) { + continue; + } + emittedLocalSym.push_back(localname); + + /* cg created data should be located in .text */ + /* [cgFunc.GetLSymSize(), lSymTab->GetSymbolTableSize()) -> cg created symbol */ + if (i < cgFunc.GetLSymSize()) { + if (st->IsThreadLocal()) { + (void)Emit("\t.section\t.tdata,\"awT\",@progbits\n"); + } else { + Emit(asmInfo->GetSection()); + Emit(asmInfo->GetData()); + Emit("\n"); + } + } else { + CHECK_FATAL(st->GetStorageClass() == kScPstatic && st->GetSKind() == kStConst, "cg should create constant!"); + /* cg created data should be located in .text */ + (void)Emit("\t.section\t.text\n"); + } + EmitAsmLabel(*st, kAsmAlign); + EmitAsmLabel(*st, kAsmLocal); + MIRType *ty = st->GetType(); + MIRConst *ct = st->GetKonst(); + if (ct == nullptr) { + EmitAsmLabel(*st, kAsmComm); + } else if (kTypeStruct == ty->GetKind() || kTypeUnion == ty->GetKind() || + kTypeClass == ty->GetKind()) { + EmitAsmLabel(*st, kAsmSyname); + EmitStructConstant(*ct); + } else if (kTypeArray == ty->GetKind()) { + if (ty->GetSize() != 0) { + EmitAsmLabel(*st, kAsmSyname); + EmitArrayConstant(*ct); + } + } else { + EmitAsmLabel(*st, kAsmSyname); + EmitScalarConstant(*ct, true, false, true /* isIndirect */); + } + } + } + } + } +} + +void Emitter::EmitGlobalVar(const MIRSymbol &globalVar) +{ + EmitAsmLabel(globalVar, kAsmType); + if (globalVar.sectionAttr != UStrIdx(0)) { /* check section info if it is from inline asm */ + Emit("\t.section\t"); + Emit(GlobalTables::GetUStrTable().GetStringFromStrIdx(globalVar.sectionAttr)); + Emit(",\"aw\",%progbits\n"); + } else { + EmitAsmLabel(globalVar, kAsmLocal); + } + EmitAsmLabel(globalVar, kAsmComm); +} + +void Emitter::EmitGlobalVars(std::vector> &globalVars) +{ + if (GetCG()->IsLmbc() && GetCG()->GetGP() != nullptr) { + (void)Emit(asmInfo->GetLocal()).Emit("\t").Emit(GetCG()->GetGP()->GetName()).Emit("\n"); + (void)Emit(asmInfo->GetComm()).Emit("\t").Emit(GetCG()->GetGP()->GetName()); + (void)Emit(", ").Emit(GetCG()->GetMIRModule()->GetGlobalMemSize()).Emit(", ").Emit("8\n"); + } + /* load globalVars profile */ + if (globalVars.empty()) { + return; + } + std::unordered_set hotVars; + std::ifstream inFile; + if (!CGOptions::IsGlobalVarProFileEmpty()) { + inFile.open(CGOptions::GetGlobalVarProFile()); + if (inFile.fail()) { + maple::LogInfo::MapleLogger(kLlErr) + << "Cannot open globalVar profile file " << CGOptions::GetGlobalVarProFile() << "\n"; + } + } + if (CGOptions::IsGlobalVarProFileEmpty() || inFile.fail()) { + for (const auto &globalVarPair : globalVars) { + EmitGlobalVar(*(globalVarPair.first)); + } + return; + } + std::string globalVarName; + while (inFile >> globalVarName) { + (void)hotVars.insert(globalVarName); + } + inFile.close(); + bool hotBeginSet = false; + bool coldBeginSet = false; + for (auto &globalVarPair : globalVars) { + if (hotVars.find(globalVarPair.first->GetName()) != hotVars.end()) { + if (!hotBeginSet) { + /* emit hot globalvar start symbol */ + EmitBlockMarker("__MBlock_globalVars_hot_begin", "", true, globalVarPair.first->GetName()); + hotBeginSet = true; + } + EmitGlobalVar(*(globalVarPair.first)); + globalVarPair.second = true; + } + } + for (const auto &globalVarPair : globalVars) { + if (!globalVarPair.second) { /* not emit yet */ + if (!coldBeginSet) { + /* emit hot globalvar start symbol */ + EmitBlockMarker("__MBlock_globalVars_cold_begin", "", true, globalVarPair.first->GetName()); + coldBeginSet = true; + } + EmitGlobalVar(*(globalVarPair.first)); + } + } + MIRSymbol *endSym = globalVars.back().first; + MIRType *mirType = endSym->GetType(); + ASSERT_NOT_NULL(endSym); + ASSERT_NOT_NULL(mirType); + const std::string kStaticVarEndAdd = + std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex())) + "+" + + endSym->GetName(); + EmitBlockMarker("__MBlock_globalVars_cold_end", "", true, kStaticVarEndAdd); +} + +void Emitter::EmitUninitializedSymbolsWithPrefixSection(const MIRSymbol &symbol, const std::string §ionName) +{ + EmitAsmLabel(symbol, kAsmType); + Emit(asmInfo->GetSection()); + auto sectionConstrains = symbol.IsThreadLocal() ? ",\"awT\"," : ",\"aw\","; + (void)Emit(sectionName).Emit(sectionConstrains); + if (sectionName == ".bss" || StringUtils::StartsWith(sectionName, ".bss.") || sectionName == ".tbss" || + StringUtils::StartsWith(sectionName, ".tbss.")) { + Emit("%nobits\n"); + } else { + Emit("%progbits\n"); + } + if (symbol.GetAttr(ATTR_weak)) { + EmitAsmLabel(symbol, kAsmWeak); + } else if (symbol.GetStorageClass() == kScGlobal) { + EmitAsmLabel(symbol, kAsmGlbl); + } + EmitAsmLabel(symbol, kAsmAlign); + EmitAsmLabel(symbol, kAsmSyname); + EmitAsmLabel(symbol, kAsmZero); + EmitAsmLabel(symbol, kAsmSize); +} + +void Emitter::EmitGlobalVariable() +{ + std::vector typeStVec; + std::vector typeNameStVec; + std::map strIdx2Type; + + /* Create name2type map which will be used by reflection. */ + for (MIRType *type : GlobalTables::GetTypeTable().GetTypeTable()) { + if (type == nullptr || (type->GetKind() != kTypeClass && type->GetKind() != kTypeInterface)) { + continue; + } + GStrIdx strIdx = type->GetNameStrIdx(); + strIdx2Type[strIdx] = type; + } + + /* sort symbols; classinfo-->field-->method */ + size_t size = GlobalTables::GetGsymTable().GetSymbolTableSize(); + std::vector classInfoVec; + std::vector vtabVec; + std::vector staticFieldsVec; + std::vector> globalVarVec; + std::vector itabVec; + std::vector itabConflictVec; + std::vector vtabOffsetVec; + std::vector fieldOffsetVec; + std::vector valueOffsetVec; + std::vector localClassInfoVec; + std::vector constStrVec; + std::vector> literalVec; + std::vector muidVec = {nullptr}; + std::vector fieldOffsetDatas; + std::vector methodAddrDatas; + std::vector methodSignatureDatas; + std::vector staticDecoupleKeyVec; + std::vector staticDecoupleValueVec; + std::vector superClassStVec; + std::vector arrayClassCacheVec; + std::vector arrayClassCacheNameVec; + + for (size_t i = 0; i < size; ++i) { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); + if (mirSymbol == nullptr || mirSymbol->IsDeleted() || mirSymbol->GetStorageClass() == kScUnused) { + continue; + } + if (mirSymbol->GetSKind() == kStFunc) { + EmitAliasAndRef(*mirSymbol); + } + + if (mirSymbol->GetName().find(VTAB_PREFIX_STR) == 0) { + vtabVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(ITAB_PREFIX_STR) == 0) { + itabVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(ITAB_CONFLICT_PREFIX_STR) == 0) { + itabConflictVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kVtabOffsetTabStr) == 0) { + vtabOffsetVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kFieldOffsetTabStr) == 0) { + fieldOffsetVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kOffsetTabStr) == 0) { + valueOffsetVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsArrayClassCache()) { + arrayClassCacheVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsArrayClassCacheName()) { + arrayClassCacheNameVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kLocalClassInfoStr) == 0) { + localClassInfoVec.emplace_back(mirSymbol); + continue; + } else if (StringUtils::StartsWith(mirSymbol->GetName(), namemangler::kDecoupleStaticKeyStr)) { + staticDecoupleKeyVec.emplace_back(mirSymbol); + continue; + } else if (StringUtils::StartsWith(mirSymbol->GetName(), namemangler::kDecoupleStaticValueStr)) { + staticDecoupleValueVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsLiteral()) { + literalVec.emplace_back(std::make_pair(mirSymbol, false)); + continue; + } else if (mirSymbol->IsConstString() || mirSymbol->IsLiteralPtr()) { + MIRConst *mirConst = mirSymbol->GetKonst(); + if (mirConst != nullptr && mirConst->GetKind() == kConstAddrof) { + constStrVec.emplace_back(mirSymbol); + continue; + } + } else if (mirSymbol->IsReflectionClassInfoPtr()) { + /* _PTR__cinf is emitted in dataDefTab and dataUndefTab */ + continue; + } else if (mirSymbol->IsMuidTab()) { + if (!GetCG()->GetMIRModule()->IsCModule()) { + muidVec[0] = mirSymbol; + EmitMuidTable(muidVec, strIdx2Type, mirSymbol->GetMuidTabName()); + } + continue; + } else if (mirSymbol->IsCodeLayoutInfo()) { + if (!GetCG()->GetMIRModule()->IsCModule()) { + EmitFuncLayoutInfo(*mirSymbol); + } + continue; + } else if (mirSymbol->GetName().find(kStaticFieldNamePrefixStr) == 0) { + staticFieldsVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kGcRootList) == 0) { + EmitGlobalRootList(*mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kFunctionProfileTabPrefixStr) == 0) { + muidVec[0] = mirSymbol; + EmitMuidTable(muidVec, strIdx2Type, kFunctionProfileTabPrefixStr); + continue; + } else if (mirSymbol->IsReflectionFieldOffsetData()) { + fieldOffsetDatas.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsReflectionMethodAddrData()) { + methodAddrDatas.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsReflectionSuperclassInfo()) { + superClassStVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsReflectionMethodSignature()) { + methodSignatureDatas.push_back(mirSymbol); + continue; + } + + if (mirSymbol->IsReflectionInfo()) { + if (mirSymbol->IsReflectionClassInfo()) { + classInfoVec.emplace_back(mirSymbol); + } + continue; + } + /* symbols we do not emit here. */ + if (mirSymbol->GetSKind() == kStFunc || mirSymbol->GetSKind() == kStJavaClass || + mirSymbol->GetSKind() == kStJavaInterface) { + continue; + } + if (mirSymbol->GetStorageClass() == kScTypeInfo) { + typeStVec.emplace_back(mirSymbol); + continue; + } + if (mirSymbol->GetStorageClass() == kScTypeInfoName) { + typeNameStVec.emplace_back(mirSymbol); + continue; + } + if (mirSymbol->GetStorageClass() == kScTypeCxxAbi) { + continue; + } + + MIRType *mirType = mirSymbol->GetType(); + if (mirType == nullptr) { + continue; + } + if (GetCG()->GetMIRModule()->IsCModule() && mirSymbol->GetStorageClass() == kScExtern) { + /* only emit weak & initialized extern at present */ + if (mirSymbol->IsWeak() || mirSymbol->IsConst()) { + EmitAsmLabel(*mirSymbol, kAsmWeak); + } else { + continue; + } + } + /* + * emit uninitialized global/static variables. + * these variables store in .comm section. + */ + if ((mirSymbol->GetStorageClass() == kScGlobal || mirSymbol->GetStorageClass() == kScFstatic) && + !mirSymbol->IsConst()) { + if (mirSymbol->IsGctibSym()) { + /* GCTIB symbols are generated in GenerateObjectMaps */ + continue; + } + if (mirSymbol->GetStorageClass() != kScGlobal) { + globalVarVec.emplace_back(std::make_pair(mirSymbol, false)); + continue; + } + if (mirSymbol->sectionAttr != UStrIdx(0)) { + auto §ionName = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirSymbol->sectionAttr); + EmitUninitializedSymbolsWithPrefixSection(*mirSymbol, sectionName); + continue; + } else if (mirSymbol->IsThreadLocal()) { + EmitUninitializedSymbolsWithPrefixSection(*mirSymbol, ".tbss"); + continue; + } else if (CGOptions::IsNoCommon() || + (!CGOptions::IsNoCommon() && mirSymbol->GetAttr(ATTR_static_init_zero))) { + EmitUninitializedSymbolsWithPrefixSection(*mirSymbol, ".bss"); + continue; + } + EmitAsmLabel(*mirSymbol, kAsmType); + EmitAsmLabel(*mirSymbol, kAsmComm); + continue; + } + + /* emit initialized global/static variables. */ + if (mirSymbol->GetStorageClass() == kScGlobal || + (mirSymbol->GetStorageClass() == kScExtern && GetCG()->GetMIRModule()->IsCModule()) || + (mirSymbol->GetStorageClass() == kScFstatic && !mirSymbol->IsReadOnly())) { + /* Emit section */ + EmitAsmLabel(*mirSymbol, kAsmType); + if (mirSymbol->IsReflectionStrTab()) { + std::string sectionName = ".reflection_strtab"; + if (mirSymbol->GetName().find(kReflectionStartHotStrtabPrefixStr) == 0) { + sectionName = ".reflection_start_hot_strtab"; + } else if (mirSymbol->GetName().find(kReflectionBothHotStrTabPrefixStr) == 0) { + sectionName = ".reflection_both_hot_strtab"; + } else if (mirSymbol->GetName().find(kReflectionRunHotStrtabPrefixStr) == 0) { + sectionName = ".reflection_run_hot_strtab"; + } + Emit("\t.section\t" + sectionName + ",\"a\",%progbits\n"); + } else if (mirSymbol->GetName().find(kDecoupleOption) == 0) { + Emit("\t.section\t." + std::string(kDecoupleStr) + ",\"a\",%progbits\n"); + } else if (mirSymbol->IsRegJNITab()) { + Emit("\t.section\t.reg_jni_tab,\"a\", %progbits\n"); + } else if (mirSymbol->GetName().find(kCompilerVersionNum) == 0) { + Emit("\t.section\t." + std::string(kCompilerVersionNumStr) + ",\"a\", %progbits\n"); + } else if (mirSymbol->GetName().find(kSourceMuid) == 0) { + Emit("\t.section\t." + std::string(kSourceMuidSectionStr) + ",\"a\", %progbits\n"); + } else if (mirSymbol->GetName().find(kCompilerMfileStatus) == 0) { + Emit("\t.section\t." + std::string(kCompilerMfileStatus) + ",\"a\", %progbits\n"); + } else if (mirSymbol->IsRegJNIFuncTab()) { + Emit("\t.section\t.reg_jni_func_tab,\"aw\", %progbits\n"); + } else if (mirSymbol->IsReflectionPrimitiveClassInfo()) { + Emit("\t.section\t.primitive_classinfo,\"awG\", %progbits,__primitive_classinfo__,comdat\n"); + } else if (mirSymbol->IsReflectionHashTabBucket()) { + std::string stName = mirSymbol->GetName(); + const std::string delimiter = "$$"; + if (stName.find(delimiter) == std::string::npos) { + FATAL(kLncFatal, "Can not find delimiter in target "); + } + std::string secName = stName.substr(0, stName.find(delimiter)); + /* remove leading "__" in sec name. */ + secName.erase(0, 2); + Emit("\t.section\t." + secName + ",\"a\",%progbits\n"); + } else { + bool isThreadLocal = mirSymbol->IsThreadLocal(); + if (cg->GetMIRModule()->IsJavaModule()) { + (void)Emit("\t.section\t." + std::string(kMapleGlobalVariable) + ",\"aw\", @progbits\n"); + } else if (mirSymbol->sectionAttr != UStrIdx(0)) { + auto §ionName = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirSymbol->sectionAttr); + auto sectionConstrains = isThreadLocal ? ",\"awT\"," : ",\"aw\","; + (void)Emit("\t.section\t" + sectionName + sectionConstrains + "@progbits\n"); + } else if (isThreadLocal) { + (void)Emit("\t.section\t.tdata,\"awT\",@progbits\n"); + } else { + (void)Emit("\t.data\n"); + } + } + /* Emit size and align by type */ + if (mirSymbol->GetStorageClass() == kScGlobal) { + if (mirSymbol->GetAttr(ATTR_weak) || mirSymbol->IsReflectionPrimitiveClassInfo()) { + EmitAsmLabel(*mirSymbol, kAsmWeak); + } else { + EmitAsmLabel(*mirSymbol, kAsmGlbl); + } + if (theMIRModule->IsJavaModule()) { + EmitAsmLabel(*mirSymbol, kAsmHidden); + } + } else if (mirSymbol->GetStorageClass() == kScFstatic) { + if (mirSymbol->sectionAttr == UStrIdx(0)) { + EmitAsmLabel(*mirSymbol, kAsmLocal); + } + } + if (mirSymbol->IsReflectionStrTab()) { /* reflection-string-tab also aligned to 8B boundaries. */ + Emit(asmInfo->GetAlign()); +#if TARGX86 || TARGX86_64 + Emit("8\n"); +#else + Emit("3\n"); +#endif + } else { + EmitAsmLabel(*mirSymbol, kAsmAlign); + } + EmitAsmLabel(*mirSymbol, kAsmSyname); + MIRConst *mirConst = mirSymbol->GetKonst(); + if (IsPrimitiveVector(mirType->GetPrimType())) { + EmitVectorConstant(*mirConst); + } else if (IsPrimitiveScalar(mirType->GetPrimType())) { + if (!CGOptions::IsArm64ilp32()) { + if (IsAddress(mirType->GetPrimType())) { + uint32 sizeinbits = GetPrimTypeBitSize(mirConst->GetType().GetPrimType()); + CHECK_FATAL(sizeinbits == k64BitSize, "EmitGlobalVariable: pointer must be of size 8"); + } + } + if (cg->GetMIRModule()->IsCModule()) { + EmitScalarConstant(*mirConst, true, false, true); + } else { + EmitScalarConstant(*mirConst); + } + } else if (mirType->GetKind() == kTypeArray) { + if (mirSymbol->HasAddrOfValues()) { + EmitConstantTable(*mirSymbol, *mirConst, strIdx2Type); + } else { + EmitArrayConstant(*mirConst); + } + } else if (mirType->GetKind() == kTypeStruct || mirType->GetKind() == kTypeClass || + mirType->GetKind() == kTypeUnion) { + if (mirSymbol->HasAddrOfValues()) { + EmitConstantTable(*mirSymbol, *mirConst, strIdx2Type); + } else { + EmitStructConstant(*mirConst); + } + } else { + DEBUG_ASSERT(false, "NYI"); + } + EmitAsmLabel(*mirSymbol, kAsmSize); + /* emit constant float/double */ + } else if (mirSymbol->IsReadOnly()) { + MIRConst *mirConst = mirSymbol->GetKonst(); + if (mirConst->GetKind() == maple::kConstStrConst) { + auto strCt = static_cast(mirConst); + localStrPtr.push_back(strCt->GetValue()); + } else { + EmitAsmLabel(*mirSymbol, kAsmType); + (void)Emit(asmInfo->GetSection()).Emit(asmInfo->GetRodata()).Emit("\n"); + if (!CGOptions::OptimizeForSize()) { + EmitAsmLabel(*mirSymbol, kAsmAlign); + } + EmitAsmLabel(*mirSymbol, kAsmSyname); + EmitScalarConstant(*mirConst); + } + } else if (mirSymbol->GetStorageClass() == kScPstatic) { + EmitAsmLabel(*mirSymbol, kAsmType); + Emit(asmInfo->GetSection()); + Emit(asmInfo->GetData()); + Emit("\n"); + EmitAsmLabel(*mirSymbol, kAsmAlign); + EmitAsmLabel(*mirSymbol, kAsmLocal); + MIRConst *ct = mirSymbol->GetKonst(); + if (ct == nullptr) { + EmitAsmLabel(*mirSymbol, kAsmComm); + } else if (IsPrimitiveScalar(mirType->GetPrimType())) { + EmitAsmLabel(*mirSymbol, kAsmSyname); + EmitScalarConstant(*ct, true, false, true); + } else if (kTypeArray == mirType->GetKind()) { + EmitAsmLabel(*mirSymbol, kAsmSyname); + EmitArrayConstant(*ct); + } else if (kTypeStruct == mirType->GetKind() || kTypeClass == mirType->GetKind() || + kTypeUnion == mirType->GetKind()) { + EmitAsmLabel(*mirSymbol, kAsmSyname); + EmitStructConstant(*ct); + } else { + CHECK_FATAL(0, "Unknown type in Global pstatic"); + } + } + } /* end proccess all mirSymbols. */ + EmitStringPointers(); + /* emit global var */ + EmitGlobalVars(globalVarVec); + /* emit literal std::strings */ + EmitLiterals(literalVec, strIdx2Type); + /* emit static field std::strings */ + EmitStaticFields(staticFieldsVec); + + if (GetCG()->GetMIRModule()->IsCModule()) { + return; + } + + EmitMuidTable(constStrVec, strIdx2Type, kMuidConststrPrefixStr); + + /* emit classinfo, field, method */ + std::vector fieldInfoStVec; + std::vector fieldInfoStCompactVec; + std::vector methodInfoStVec; + std::vector methodInfoStCompactVec; + + std::string sectionName = kMuidClassMetadataPrefixStr; + Emit("\t.section ." + sectionName + ",\"aw\",%progbits\n"); + Emit(sectionName + "_begin:\n"); + + for (size_t i = 0; i < classInfoVec.size(); ++i) { + MIRSymbol *mirSymbol = classInfoVec[i]; + if (mirSymbol != nullptr && mirSymbol->GetKonst() != nullptr && mirSymbol->IsReflectionClassInfo()) { + /* Emit classinfo */ + EmitClassInfoSequential(*mirSymbol, strIdx2Type, sectionName); + std::string stName = mirSymbol->GetName(); + std::string className = stName.substr(strlen(CLASSINFO_PREFIX_STR)); + /* Get classinfo ro symbol */ + MIRSymbol *classInfoROSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(CLASSINFO_RO_PREFIX_STR + className)); + EmitClassInfoSequential(*classInfoROSt, strIdx2Type, sectionName); + /* Get fields */ + MIRSymbol *fieldSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kFieldsInfoPrefixStr + className)); + MIRSymbol *fieldStCompact = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kFieldsInfoCompactPrefixStr + className)); + /* Get methods */ + MIRSymbol *methodSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kMethodsInfoPrefixStr + className)); + MIRSymbol *methodStCompact = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kMethodsInfoCompactPrefixStr + className)); + + if (fieldSt != nullptr) { + fieldInfoStVec.emplace_back(fieldSt); + } + if (fieldStCompact != nullptr) { + fieldInfoStCompactVec.emplace_back(fieldStCompact); + } + if (methodSt != nullptr) { + methodInfoStVec.emplace_back(methodSt); + } + if (methodStCompact != nullptr) { + methodInfoStCompactVec.emplace_back(methodStCompact); + } + } + } + Emit(sectionName + "_end:\n"); + + std::vector hotVtabStVec; + std::vector coldVtabStVec; + std::vector hotItabStVec; + std::vector coldItabStVec; + std::vector hotItabCStVec; + std::vector coldItabCStVec; + std::vector hotMethodsInfoCStVec; + std::vector coldMethodsInfoCStVec; + std::vector hotFieldsInfoCStVec; + std::vector coldFieldsInfoCStVec; + GetHotAndColdMetaSymbolInfo(vtabVec, hotVtabStVec, coldVtabStVec, VTAB_PREFIX_STR, + ((CGOptions::IsLazyBinding() || CGOptions::IsHotFix()) && !cg->IsLibcore())); + GetHotAndColdMetaSymbolInfo(itabVec, hotItabStVec, coldItabStVec, ITAB_PREFIX_STR, + ((CGOptions::IsLazyBinding() || CGOptions::IsHotFix()) && !cg->IsLibcore())); + GetHotAndColdMetaSymbolInfo(itabConflictVec, hotItabCStVec, coldItabCStVec, ITAB_CONFLICT_PREFIX_STR, + ((CGOptions::IsLazyBinding() || CGOptions::IsHotFix()) && !cg->IsLibcore())); + GetHotAndColdMetaSymbolInfo(fieldInfoStVec, hotFieldsInfoCStVec, coldFieldsInfoCStVec, kFieldsInfoPrefixStr); + GetHotAndColdMetaSymbolInfo(methodInfoStVec, hotMethodsInfoCStVec, coldMethodsInfoCStVec, kMethodsInfoPrefixStr); + + std::string sectionNameIsEmpty; + std::string fieldSectionName("rometadata.field"); + std::string methodSectionName("rometadata.method"); + + /* fieldinfo */ + EmitMetaDataSymbolWithMarkFlag(hotFieldsInfoCStVec, strIdx2Type, kFieldsInfoPrefixStr, fieldSectionName, true); + EmitMetaDataSymbolWithMarkFlag(coldFieldsInfoCStVec, strIdx2Type, kFieldsInfoPrefixStr, fieldSectionName, false); + EmitMetaDataSymbolWithMarkFlag(fieldInfoStCompactVec, strIdx2Type, kFieldsInfoCompactPrefixStr, fieldSectionName, + false); + /* methodinfo */ + EmitMetaDataSymbolWithMarkFlag(hotMethodsInfoCStVec, strIdx2Type, kMethodsInfoPrefixStr, methodSectionName, true); + EmitMetaDataSymbolWithMarkFlag(coldMethodsInfoCStVec, strIdx2Type, kMethodsInfoPrefixStr, methodSectionName, false); + EmitMetaDataSymbolWithMarkFlag(methodInfoStCompactVec, strIdx2Type, kMethodsInfoCompactPrefixStr, methodSectionName, + false); + + /* itabConflict */ + MarkVtabOrItabEndFlag(coldItabCStVec); + EmitMuidTable(hotItabCStVec, strIdx2Type, kMuidItabConflictPrefixStr); + EmitMetaDataSymbolWithMarkFlag(coldItabCStVec, strIdx2Type, ITAB_CONFLICT_PREFIX_STR, + kMuidColdItabConflictPrefixStr, false); + + /* + * vtab + * And itab to vtab section + */ + for (auto sym : hotItabStVec) { + hotVtabStVec.emplace_back(sym); + } + for (auto sym : coldItabStVec) { + coldVtabStVec.emplace_back(sym); + } + MarkVtabOrItabEndFlag(coldVtabStVec); + EmitMuidTable(hotVtabStVec, strIdx2Type, kMuidVtabAndItabPrefixStr); + EmitMetaDataSymbolWithMarkFlag(coldVtabStVec, strIdx2Type, VTAB_AND_ITAB_PREFIX_STR, kMuidColdVtabAndItabPrefixStr, + false); + + /* vtab_offset */ + EmitMuidTable(vtabOffsetVec, strIdx2Type, kMuidVtabOffsetPrefixStr); + /* field_offset */ + EmitMuidTable(fieldOffsetVec, strIdx2Type, kMuidFieldOffsetPrefixStr); + /* value_offset */ + EmitMuidTable(valueOffsetVec, strIdx2Type, kMuidValueOffsetPrefixStr); + /* local clasinfo */ + EmitMuidTable(localClassInfoVec, strIdx2Type, kMuidLocalClassInfoStr); + /* Emit decouple static */ + EmitMuidTable(staticDecoupleKeyVec, strIdx2Type, kDecoupleStaticKeyStr); + EmitMuidTable(staticDecoupleValueVec, strIdx2Type, kDecoupleStaticValueStr); + + /* super class */ + EmitMuidTable(superClassStVec, strIdx2Type, kMuidSuperclassPrefixStr); + + /* field offset rw */ + EmitMetaDataSymbolWithMarkFlag(fieldOffsetDatas, strIdx2Type, kFieldOffsetDataPrefixStr, sectionNameIsEmpty, false); + /* method address rw */ + EmitMetaDataSymbolWithMarkFlag(methodAddrDatas, strIdx2Type, kMethodAddrDataPrefixStr, sectionNameIsEmpty, false); + /* method address ro */ + std::string methodSignatureSectionName("romethodsignature"); + EmitMetaDataSymbolWithMarkFlag(methodSignatureDatas, strIdx2Type, kMethodSignaturePrefixStr, + methodSignatureSectionName, false); + + /* array class cache table */ + EmitMuidTable(arrayClassCacheVec, strIdx2Type, kArrayClassCacheTable); + /* array class cache name table */ + EmitMuidTable(arrayClassCacheNameVec, strIdx2Type, kArrayClassCacheNameTable); + +#if !defined(TARGARM32) + /* finally emit __gxx_personality_v0 DW.ref */ + if (!cg->GetMIRModule()->IsCModule()) { + EmitDWRef("__mpl_personality_v0"); + } +#endif +} +void Emitter::EmitAddressString(const std::string &address) +{ +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); + Emit(address); +#else + Emit("\t.word\t" + address); +#endif +} +void Emitter::EmitGlobalRootList(const MIRSymbol &mirSymbol) +{ + Emit("\t.section .maple.gcrootsmap").Emit(",\"aw\",%progbits\n"); + std::vector nameVec; + std::string name = mirSymbol.GetName(); + nameVec.emplace_back(name); + nameVec.emplace_back(name + "Size"); + bool gcrootsFlag = true; + uint64 vecSize = 0; + for (const auto &gcrootsName : nameVec) { +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + Emit("\t.type\t" + gcrootsName + ", @object\n" + "\t.p2align 3\n"); +#else + Emit("\t.type\t" + gcrootsName + ", %object\n" + "\t.p2align 3\n"); +#endif + Emit("\t.global\t" + gcrootsName + "\n"); + if (gcrootsFlag) { + Emit(kMuidGlobalRootlistPrefixStr).Emit("_begin:\n"); + } + Emit(gcrootsName + ":\n"); + if (gcrootsFlag) { + MIRAggConst *aggConst = safe_cast(mirSymbol.GetKonst()); + if (aggConst == nullptr) { + continue; + } + size_t i = 0; + while (i < aggConst->GetConstVec().size()) { + MIRConst *elemConst = aggConst->GetConstVecItem(i); + if (elemConst->GetKind() == kConstAddrof) { + MIRAddrofConst *symAddr = safe_cast(elemConst); + CHECK_FATAL(symAddr != nullptr, "nullptr of symAddr"); + MIRSymbol *symAddrSym = + GlobalTables::GetGsymTable().GetSymbolFromStidx(symAddr->GetSymbolIndex().Idx()); + const std::string &symAddrName = symAddrSym->GetName(); + EmitAddressString(symAddrName + "\n"); + } else { + EmitScalarConstant(*elemConst); + } + i++; + } + vecSize = i; + } else { + EmitAddressString(std::to_string(vecSize) + "\n"); + } + Emit("\t.size\t" + gcrootsName + ",.-").Emit(gcrootsName + "\n"); + if (gcrootsFlag) { + Emit(kMuidGlobalRootlistPrefixStr).Emit("_end:\n"); + } + gcrootsFlag = false; + } +} + +void Emitter::EmitMuidTable(const std::vector &vec, const std::map &strIdx2Type, + const std::string §ionName) +{ + MIRSymbol *st = nullptr; + if (!vec.empty()) { + st = vec[0]; + } + if (st != nullptr && st->IsMuidRoTab()) { + Emit("\t.section ." + sectionName + ",\"a\",%progbits\n"); + } else { + Emit("\t.section ." + sectionName + ",\"aw\",%progbits\n"); + } + Emit(sectionName + "_begin:\n"); + bool isConstString = sectionName == kMuidConststrPrefixStr; + for (size_t i = 0; i < vec.size(); i++) { + MIRSymbol *st1 = vec[i]; + DEBUG_ASSERT(st1 != nullptr, "null ptr check"); + if (st1->GetStorageClass() == kScUnused) { + continue; + } + EmitAsmLabel(*st1, kAsmType); + if (st1->GetStorageClass() == kScFstatic) { + EmitAsmLabel(*st1, kAsmLocal); + } else { + EmitAsmLabel(*st1, kAsmGlbl); + EmitAsmLabel(*st1, kAsmHidden); + } + EmitAsmLabel(*st1, kAsmAlign); + EmitAsmLabel(*st1, kAsmSyname); + MIRConst *mirConst = st1->GetKonst(); + CHECK_FATAL(mirConst != nullptr, "mirConst should not be nullptr in EmitMuidTable"); + if (mirConst->GetKind() == kConstAddrof) { + MIRAddrofConst *symAddr = safe_cast(mirConst); + CHECK_FATAL(symAddr != nullptr, "call static_cast failed in EmitMuidTable"); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(symAddr->GetSymbolIndex().Idx()); + if (isConstString) { + EmitAddressString(symAddrSym->GetName() + " - . + "); + Emit(kDataRefIsOffset); + Emit("\n"); + } else { + EmitAddressString(symAddrSym->GetName() + "\n"); + } + } else if (mirConst->GetKind() == kConstInt) { + EmitScalarConstant(*mirConst, true); + } else { + EmitConstantTable(*st1, *mirConst, strIdx2Type); + } + EmitAsmLabel(*st1, kAsmSize); + } + Emit(sectionName + "_end:\n"); +} + +void Emitter::EmitClassInfoSequential(const MIRSymbol &mirSymbol, const std::map &strIdx2Type, + const std::string §ionName) +{ + EmitAsmLabel(mirSymbol, kAsmType); + if (!sectionName.empty()) { + Emit("\t.section ." + sectionName); + if (StringUtils::StartsWith(sectionName, "ro")) { + Emit(",\"a\",%progbits\n"); + } else { + Emit(",\"aw\",%progbits\n"); + } + } else { + EmitAsmLabel(kAsmData); + } + EmitAsmLabel(mirSymbol, kAsmGlbl); + EmitAsmLabel(mirSymbol, kAsmHidden); + EmitAsmLabel(mirSymbol, kAsmAlign); + EmitAsmLabel(mirSymbol, kAsmSyname); + MIRConst *mirConst = mirSymbol.GetKonst(); + CHECK_FATAL(mirConst != nullptr, "mirConst should not be nullptr in EmitClassInfoSequential"); + EmitConstantTable(mirSymbol, *mirConst, strIdx2Type); + EmitAsmLabel(mirSymbol, kAsmSize); +} + +void Emitter::EmitMethodDeclaringClass(const MIRSymbol &mirSymbol, const std::string §ionName) +{ + std::string symName = mirSymbol.GetName(); + std::string emitSyName = symName + "_DeclaringClass"; + std::string declaringClassName = symName.substr(strlen(kFieldsInfoCompactPrefixStr) + 1); + Emit(asmInfo->GetType()); + Emit(emitSyName + ", %object\n"); + if (!sectionName.empty()) { + Emit("\t.section ." + sectionName + "\n"); + } else { + EmitAsmLabel(kAsmData); + } + Emit(asmInfo->GetLocal()); + Emit(emitSyName + "\n"); + Emit(asmInfo->GetAlign()); + Emit(" 2\n" + emitSyName + ":\n"); + Emit("\t.long\t"); + Emit(CLASSINFO_PREFIX_STR + declaringClassName + " - .\n"); + Emit(asmInfo->GetSize()); + Emit(emitSyName + ", 4\n"); +} + +void Emitter::EmitMethodFieldSequential(const MIRSymbol &mirSymbol, const std::map &strIdx2Type, + const std::string §ionName) +{ + std::string symName = mirSymbol.GetName(); + if (symName.find(kMethodsInfoCompactPrefixStr) != std::string::npos) { + EmitMethodDeclaringClass(mirSymbol, sectionName); + } + EmitAsmLabel(mirSymbol, kAsmType); + if (!sectionName.empty()) { + Emit("\t.section ." + sectionName + "\n"); + } else { + EmitAsmLabel(kAsmData); + } + EmitAsmLabel(mirSymbol, kAsmLocal); + + /* Emit(2) is 4 bit align */ + Emit(asmInfo->GetAlign()).Emit(2).Emit("\n"); + EmitAsmLabel(mirSymbol, kAsmSyname); + MIRConst *ct = mirSymbol.GetKonst(); + EmitConstantTable(mirSymbol, *ct, strIdx2Type); + std::string symbolName = mirSymbol.GetName(); + Emit("\t.size\t" + symbolName + ", .-"); + Emit(symbolName + "\n"); +} + +void Emitter::EmitDWRef(const std::string &name) +{ + /* + * .hidden DW.ref._ZTI3xxx + * .weak DW.ref._ZTI3xxx + * .section .data.DW.ref._ZTI3xxx,"awG",@progbits,DW.ref._ZTI3xxx,comdat + * .align 3 + * .type DW.ref._ZTI3xxx, %object + * .size DW.ref._ZTI3xxx, 8 + * DW.ref._ZTI3xxx: + * .xword _ZTI3xxx + */ + Emit("\t.hidden DW.ref." + name + "\n"); + Emit("\t.weak DW.ref." + name + "\n"); + Emit("\t.section .data.DW.ref." + name + ",\"awG\",%progbits,DW.ref."); + Emit(name + ",comdat\n"); + Emit(asmInfo->GetAlign()); +#if TARGX86 || TARGX86_64 + Emit("8\n"); +#else + Emit("3\n"); +#endif + Emit("\t.type DW.ref." + name + ", \%object\n"); + Emit("\t.size DW.ref." + name + ",8\n"); + Emit("DW.ref." + name + ":\n"); +#if TARGAARCH64 || TARGRISCV64 + Emit("\t.xword " + name + "\n"); +#else + Emit("\t.word " + name + "\n"); +#endif +} + +void Emitter::EmitDecSigned(int64 num) +{ + std::ios::fmtflags flag(fileStream.flags()); + fileStream << std::dec << num; + fileStream.flags(flag); +} + +void Emitter::EmitDecUnsigned(uint64 num) +{ + std::ios::fmtflags flag(fileStream.flags()); + fileStream << std::dec << num; + fileStream.flags(flag); +} + +void Emitter::EmitHexUnsigned(uint64 num) +{ + std::ios::fmtflags flag(fileStream.flags()); + fileStream << "0x" << std::hex << num; + fileStream.flags(flag); +} + +#define XSTR(s) str(s) +#define str(s) #s + +void Emitter::EmitDIHeader() +{ + if (cg->GetMIRModule()->GetSrcLang() == kSrcLangC) { + (void)Emit("\t.section ." + std::string("c_text") + ",\"ax\"\n"); + } else { + (void)Emit("\t.section ." + std::string(namemangler::kMuidJavatextPrefixStr) + ",\"ax\"\n"); + } + Emit(".L" XSTR(TEXT_BEGIN) ":\n"); +} + +void Emitter::EmitDIFooter() +{ + if (cg->GetMIRModule()->GetSrcLang() == kSrcLangC) { + (void)Emit("\t.section ." + std::string("c_text") + ",\"ax\"\n"); + } else { + (void)Emit("\t.section ." + std::string(namemangler::kMuidJavatextPrefixStr) + ",\"ax\"\n"); + } + Emit(".L" XSTR(TEXT_END) ":\n"); +} + +void Emitter::EmitDIHeaderFileInfo() +{ + Emit("// dummy header file 1\n"); + Emit("// dummy header file 2\n"); + Emit("// dummy header file 3\n"); +} + +void Emitter::AddLabelDieToLabelIdxMapping(DBGDie *lblDie, LabelIdx lblIdx) +{ + InsertLabdie2labidxTable(lblDie, lblIdx); +} + +LabelIdx Emitter::GetLabelIdxForLabelDie(DBGDie *lblDie) +{ + auto it = labdie2labidxTable.find(lblDie); + CHECK_FATAL(it != labdie2labidxTable.end(), ""); + return it->second; +} + +void Emitter::ApplyInPrefixOrder(DBGDie *die, const std::function &func) +{ + func(die); + DEBUG_ASSERT(die, ""); + if (die->GetSubDieVec().size() > 0) { + for (auto c : die->GetSubDieVec()) { + ApplyInPrefixOrder(c, func); + } + /* mark the end of the sibling list */ + func(nullptr); + } +} + +void Emitter::EmitDIFormSpecification(unsigned int dwform) +{ + switch (dwform) { + case DW_FORM_string: + Emit(".string "); + break; + case DW_FORM_strp: + case DW_FORM_data4: + case DW_FORM_ref4: + Emit(".4byte "); + break; + case DW_FORM_data1: + Emit(".byte "); + break; + case DW_FORM_data2: + Emit(".2byte "); + break; + case DW_FORM_data8: + Emit(".8byte "); + break; + case DW_FORM_sec_offset: + /* if DWARF64, should be .8byte? */ + Emit(".4byte "); + break; + case DW_FORM_addr: /* Should we use DWARF64? for now, we generate .8byte as gcc does for DW_FORM_addr */ + Emit(".8byte "); + break; + case DW_FORM_exprloc: + Emit(".uleb128 "); + break; + default: + CHECK_FATAL(maple::GetDwFormName(dwform) != nullptr, + "GetDwFormName() return null in Emitter::EmitDIFormSpecification"); + LogInfo::MapleLogger() << "unhandled : " << maple::GetDwFormName(dwform) << std::endl; + DEBUG_ASSERT(0, "NYI"); + } +} + +void Emitter::EmitDIAttrValue(DBGDie *die, DBGDieAttr *attr, DwAt attrName, DwTag tagName, DebugInfo *di) +{ + MapleVector &attrvec = die->GetAttrVec(); + + switch (attr->GetDwForm()) { + case DW_FORM_string: { + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(attr->GetId()); + Emit("\"").Emit(name).Emit("\""); + Emit(CMNT "len = "); + EmitDecUnsigned(name.length() + 1); + break; + } + case DW_FORM_strp: + Emit(".L" XSTR(DEBUG_STR_LABEL)); + fileStream << attr->GetId(); + break; + case DW_FORM_data1: +#if DEBUG + if (attr->GetI() == kDbgDefaultVal) { + EmitHexUnsigned(attr->GetI()); + } else +#endif + EmitHexUnsigned(uint8_t(attr->GetI())); + break; + case DW_FORM_data2: +#if DEBUG + if (attr->GetI() == kDbgDefaultVal) { + EmitHexUnsigned(attr->GetI()); + } else +#endif + EmitHexUnsigned(uint16_t(attr->GetI())); + break; + case DW_FORM_data4: +#if DEBUG + if (attr->GetI() == kDbgDefaultVal) { + EmitHexUnsigned(attr->GetI()); + } else +#endif + EmitHexUnsigned(uint32_t(attr->GetI())); + break; + case DW_FORM_data8: + if (attrName == DW_AT_high_pc) { + if (tagName == DW_TAG_compile_unit) { + Emit(".L" XSTR(TEXT_END) "-.L" XSTR(TEXT_BEGIN)); + } else if (tagName == DW_TAG_subprogram) { + DBGDieAttr *name = LFindAttribute(attrvec, DW_AT_name); + if (name == nullptr) { + DBGDieAttr *spec = LFindAttribute(attrvec, DW_AT_specification); + CHECK_FATAL(spec != nullptr, "spec is null in Emitter::EmitDIAttrValue"); + DBGDie *decl = di->GetDie(spec->GetId()); + name = LFindAttribute(decl->GetAttrVec(), DW_AT_name); + } + CHECK_FATAL(name != nullptr, "name is null in Emitter::EmitDIAttrValue"); + const std::string &str = GlobalTables::GetStrTable().GetStringFromStrIdx(name->GetId()); + + MIRBuilder *mirbuilder = GetCG()->GetMIRModule()->GetMIRBuilder(); + MIRFunction *mfunc = mirbuilder->GetFunctionFromName(str); + MapleMap>::iterator it = + CG::GetFuncWrapLabels().find(mfunc); + if (it != CG::GetFuncWrapLabels().end()) { + EmitLabelForFunc(mfunc, (*it).second.second); /* end label */ + } else { + EmitLabelRef(attr->GetId()); /* maybe deadbeef */ + } + Emit("-"); + if (it != CG::GetFuncWrapLabels().end()) { + EmitLabelForFunc(mfunc, (*it).second.first); /* start label */ + } else { + DBGDieAttr *lowpc = LFindAttribute(attrvec, DW_AT_low_pc); + CHECK_FATAL(lowpc != nullptr, "lowpc is null in Emitter::EmitDIAttrValue"); + EmitLabelRef(lowpc->GetId()); /* maybe deadbeef */ + } + } + } else { + EmitHexUnsigned(static_cast(static_cast(attr->GetI()))); + } + break; + case DW_FORM_sec_offset: + if (attrName == DW_AT_stmt_list) { + Emit(".L"); + Emit(XSTR(DEBUG_LINE_0)); + } + break; + case DW_FORM_addr: + if (attrName == DW_AT_low_pc) { + if (tagName == DW_TAG_compile_unit) { + Emit(".L" XSTR(TEXT_BEGIN)); + } else if (tagName == DW_TAG_subprogram) { + /* if decl, name should be found; if def, we try DW_AT_specification */ + DBGDieAttr *name = LFindAttribute(attrvec, DW_AT_name); + if (name == nullptr) { + DBGDieAttr *spec = LFindAttribute(attrvec, DW_AT_specification); + CHECK_FATAL(spec != nullptr, "spec is null in Emitter::EmitDIAttrValue"); + DBGDie *decl = di->GetDie(spec->GetId()); + name = LFindAttribute(decl->GetAttrVec(), DW_AT_name); + } + CHECK_FATAL(name != nullptr, "name is null in Emitter::EmitDIAttrValue"); + const std::string &str = GlobalTables::GetStrTable().GetStringFromStrIdx(name->GetId()); + MIRBuilder *mirbuilder = GetCG()->GetMIRModule()->GetMIRBuilder(); + MIRFunction *mfunc = mirbuilder->GetFunctionFromName(str); + MapleMap>::iterator it = + CG::GetFuncWrapLabels().find(mfunc); + if (it != CG::GetFuncWrapLabels().end()) { + EmitLabelForFunc(mfunc, (*it).second.first); /* it is a */ + } else { + EmitLabelRef(attr->GetId()); /* maybe deadbeef */ + } + } else if (tagName == DW_TAG_label) { + LabelIdx labelIdx = GetLabelIdxForLabelDie(die); + DBGDie *subpgm = die->GetParent(); + DEBUG_ASSERT(subpgm->GetTag() == DW_TAG_subprogram, + "Label DIE should be a child of a Subprogram DIE"); + DBGDieAttr *fnameAttr = LFindAttribute(subpgm->GetAttrVec(), DW_AT_name); + if (!fnameAttr) { + DBGDieAttr *specAttr = LFindAttribute(subpgm->GetAttrVec(), DW_AT_specification); + CHECK_FATAL(specAttr, "pointer is null"); + DBGDie *twin = di->GetDie(static_cast(specAttr->GetU())); + fnameAttr = LFindAttribute(twin->GetAttrVec(), DW_AT_name); + } + CHECK_FATAL(fnameAttr, ""); + const std::string &fnameStr = GlobalTables::GetStrTable().GetStringFromStrIdx(fnameAttr->GetId()); + auto *res = memPool->New(fnameStr.c_str(), labelIdx, *memPool); + cfi::CFIOpndEmitVisitor cfiVisitor(*this); + res->Accept(cfiVisitor); + } + } else if (attrName == DW_AT_high_pc) { + if (tagName == DW_TAG_compile_unit) { + Emit(".L" XSTR(TEXT_END) "-.L" XSTR(TEXT_BEGIN)); + } + } else { + Emit("XXX--ADDR--XXX"); + } + break; + case DW_FORM_ref4: + if (attrName == DW_AT_type) { + DBGDie *die0 = di->GetDie(static_cast(attr->GetU())); + if (die0->GetOffset()) { + EmitHexUnsigned(die0->GetOffset()); + } else { + /* unknown type, missing mplt */ + EmitHexUnsigned(di->GetDummyTypeDie()->GetOffset()); + Emit(CMNT "Warning: dummy type used"); + } + } else if (attrName == DW_AT_specification || attrName == DW_AT_sibling) { + DBGDie *die0 = di->GetDie(static_cast(attr->GetU())); + DEBUG_ASSERT(die0->GetOffset(), ""); + EmitHexUnsigned(die0->GetOffset()); + } else if (attrName == DW_AT_object_pointer) { + GStrIdx thisIdx = GlobalTables::GetStrTable().GetStrIdxFromName(kDebugMapleThis); + DBGDie *that = LFindChildDieWithName(die, DW_TAG_formal_parameter, thisIdx); + /* need to find the this or self based on the source language + what is the name for 'this' used in mapleir? + this has to be with respect to a function */ + if (that) { + EmitHexUnsigned(that->GetOffset()); + } else { + EmitHexUnsigned(attr->GetU()); + } + } else { + Emit(" OFFSET "); + EmitHexUnsigned(attr->GetU()); + } + break; + case DW_FORM_exprloc: { + DBGExprLoc *elp = attr->GetPtr(); + switch (elp->GetOp()) { + case DW_OP_call_frame_cfa: + EmitHexUnsigned(1); + Emit("\n\t.byte "); + EmitHexUnsigned(elp->GetOp()); + break; + case DW_OP_addr: + EmitHexUnsigned(k9ByteSize); + Emit("\n\t.byte "); + EmitHexUnsigned(elp->GetOp()); + Emit("\n\t.8byte "); + (void)Emit(GlobalTables::GetStrTable() + .GetStringFromStrIdx(static_cast(elp->GetGvarStridx())) + .c_str()); + break; + case DW_OP_fbreg: + EmitHexUnsigned(1 + namemangler::GetSleb128Size(elp->GetFboffset())); + Emit("\n\t.byte "); + EmitHexUnsigned(elp->GetOp()); + Emit("\n\t.sleb128 "); + EmitDecSigned(elp->GetFboffset()); + break; + default: + EmitHexUnsigned(uintptr_t(elp)); + break; + } + break; + } + default: + CHECK_FATAL(maple::GetDwFormName(attr->GetDwForm()) != nullptr, + "GetDwFormName return null in Emitter::EmitDIAttrValue"); + LogInfo::MapleLogger() << "unhandled : " << maple::GetDwFormName(attr->GetDwForm()) << std::endl; + DEBUG_ASSERT(0, "NYI"); + } +} + +void Emitter::EmitDIDebugInfoSection(DebugInfo *mirdi) +{ + /* From DWARF Standard Specification V4. 7.5.1 + collect section size */ + Emit("\t.section\t.debug_info,\"\",@progbits\n"); + /* label to mark start of the .debug_info section */ + Emit(".L" XSTR(DEBUG_INFO_0) ":\n"); + /* $ 7.5.1.1 */ + Emit("\t.4byte\t"); + EmitHexUnsigned(mirdi->GetDebugInfoLength()); + Emit(CMNT "section length\n"); + /* DWARF version. uhalf. */ + Emit("\t.2byte\t"); + /* 4 for version 4. */ + EmitHexUnsigned(kDwarfVersion); + Emit("\n"); + /* debug_abbrev_offset. 4byte for 32-bit, 8byte for 64-bit */ + Emit("\t.4byte\t.L" XSTR(DEBUG_ABBREV_0) "\n"); + /* address size. ubyte */ + Emit("\t.byte\t"); + EmitHexUnsigned(kSizeOfPTR); + Emit("\n"); + /* + * 7.5.1.2 type unit header + * currently empty... + * + * 7.5.2 Debugging Information Entry (DIE) + */ + Emitter *emitter = this; + MapleVector &abbrevVec = mirdi->GetAbbrevVec(); + ApplyInPrefixOrder(mirdi->GetCompUnit(), [&abbrevVec, &emitter, &mirdi](DBGDie *die) { + if (!die) { + /* emit the null entry and return */ + emitter->Emit("\t.byte 0x0\n"); + return; + } + bool verbose = emitter->GetCG()->GenerateVerboseAsm(); + if (verbose) { + emitter->Emit("\n"); + } + emitter->Emit("\t.uleb128 "); + emitter->EmitHexUnsigned(die->GetAbbrevId()); + if (verbose) { + emitter->Emit(CMNT); + CHECK_FATAL(maple::GetDwTagName(die->GetTag()) != nullptr, + "GetDwTagName(die->GetTag()) return null in Emitter::EmitDIDebugInfoSection"); + emitter->Emit(maple::GetDwTagName(die->GetTag())); + emitter->Emit(" Offset= "); + emitter->EmitHexUnsigned(die->GetOffset()); + emitter->Emit(" ("); + emitter->EmitDecUnsigned(die->GetOffset()); + emitter->Emit(" ), Size= "); + emitter->EmitHexUnsigned(die->GetSize()); + emitter->Emit(" ("); + emitter->EmitDecUnsigned(die->GetSize()); + emitter->Emit(" )\n"); + } else { + emitter->Emit("\n"); + } + DBGAbbrevEntry *diae = LFindAbbrevEntry(abbrevVec, die->GetAbbrevId()); + CHECK_FATAL(diae != nullptr, "diae is null in Emitter::EmitDIDebugInfoSection"); + MapleVector &apl = diae->GetAttrPairs(); /* attribute pair list */ + + std::string sfile, spath; + if (diae->GetTag() == DW_TAG_compile_unit && sfile.empty()) { + /* get full source path from fileMap[2] */ + if (emitter->GetFileMap().size() > k2ByteSize) { /* have src file map */ + std::string srcPath = emitter->GetFileMap()[k2ByteSize]; + size_t t = srcPath.rfind("/"); + DEBUG_ASSERT(t != std::string::npos, ""); + sfile = srcPath.substr(t + 1); + spath = srcPath.substr(0, t); + } + } + + for (size_t i = 0; i < diae->GetAttrPairs().size(); i += k2ByteSize) { + DBGDieAttr *attr = LFindAttribute(die->GetAttrVec(), DwAt(apl[i])); + if (!LShouldEmit(unsigned(apl[i + 1]))) { + continue; + } + /* update DW_AT_name and DW_AT_comp_dir attrs under DW_TAG_compile_unit + to be C/C++ */ + if (!sfile.empty()) { + if (attr->GetDwAt() == DW_AT_name) { + attr->SetId(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(sfile).GetIdx()); + emitter->GetCG()->GetMIRModule()->GetDbgInfo()->AddStrps(attr->GetId()); + } else if (attr->GetDwAt() == DW_AT_comp_dir) { + attr->SetId(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(spath).GetIdx()); + emitter->GetCG()->GetMIRModule()->GetDbgInfo()->AddStrps(attr->GetId()); + } + } + emitter->Emit("\t"); + emitter->EmitDIFormSpecification(unsigned(apl[i + 1])); + emitter->EmitDIAttrValue(die, attr, unsigned(apl[i]), diae->GetTag(), mirdi); + if (verbose) { + emitter->Emit(CMNT); + emitter->Emit(maple::GetDwAtName(unsigned(apl[i]))); + emitter->Emit(" : "); + emitter->Emit(maple::GetDwFormName(unsigned(apl[i + 1]))); + if (apl[i + 1] == DW_FORM_strp || apl[i + 1] == DW_FORM_string) { + emitter->Emit(" : "); + emitter->Emit(GlobalTables::GetStrTable().GetStringFromStrIdx(attr->GetId()).c_str()); + } else if (apl[i] == DW_AT_data_member_location) { + emitter->Emit(" : "); + emitter->Emit(apl[i + 1]).Emit(" attr= "); + emitter->EmitHexUnsigned(uintptr_t(attr)); + } + } + emitter->Emit("\n"); + } + }); +} + +void Emitter::EmitDIDebugAbbrevSection(DebugInfo *mirdi) +{ + Emit("\t.section\t.debug_abbrev,\"\",@progbits\n"); + Emit(".L" XSTR(DEBUG_ABBREV_0) ":\n"); + + /* construct a list of DI abbrev entries + 1. DW_TAG_compile_unit 0x11 + 2. DW_TAG_subprogram 0x2e */ + bool verbose = GetCG()->GenerateVerboseAsm(); + for (DBGAbbrevEntry *diae : mirdi->GetAbbrevVec()) { + if (!diae) { + continue; + } + /* ID */ + if (verbose) { + Emit("\n"); + } + Emit("\t.uleb128 "); + EmitHexUnsigned(diae->GetAbbrevId()); + if (verbose) { + Emit(CMNT "Abbrev Entry ID"); + } + Emit("\n"); + /* TAG */ + Emit("\t.uleb128 "); + EmitHexUnsigned(diae->GetTag()); + CHECK_FATAL(maple::GetDwTagName(diae->GetTag()) != nullptr, + "GetDwTagName return null in Emitter::EmitDIDebugAbbrevSection"); + if (verbose) { + Emit(CMNT); + Emit(maple::GetDwTagName(diae->GetTag())); + } + Emit("\n"); + + MapleVector &apl = diae->GetAttrPairs(); /* attribute pair list */ + /* children? */ + Emit("\t.byte "); + EmitHexUnsigned(diae->GetWithChildren()); + if (verbose) { + Emit(diae->GetWithChildren() ? CMNT "DW_CHILDREN_yes" : CMNT "DW_CHILDREN_no"); + } + Emit("\n"); + + for (size_t i = 0; i < diae->GetAttrPairs().size(); i += k2ByteSize) { + /* odd entry -- DW_AT_*, even entry -- DW_FORM_* */ + Emit("\t.uleb128 "); + EmitHexUnsigned(apl[i]); + CHECK_FATAL(maple::GetDwAtName(unsigned(apl[i])) != nullptr, + "GetDwAtName return null in Emitter::EmitDIDebugAbbrevSection"); + if (verbose) { + Emit(CMNT); + Emit(maple::GetDwAtName(unsigned(apl[i]))); + } + Emit("\n"); + Emit("\t.uleb128 "); + EmitHexUnsigned(apl[i + 1]); + CHECK_FATAL(maple::GetDwFormName(unsigned(apl[i + 1])) != nullptr, + "GetDwFormName return null in Emitter::EmitDIDebugAbbrevSection"); + if (verbose) { + Emit(CMNT); + Emit(maple::GetDwFormName(unsigned(apl[i + 1]))); + } + Emit("\n"); + } + /* end of an abbreviation record */ + Emit("\t.byte 0x0\n"); + Emit("\t.byte 0x0\n"); + } + Emit("\t.byte 0x0\n"); +} + +void Emitter::EmitDIDebugARangesSection() +{ + Emit("\t.section\t.debug_aranges,\"\",@progbits\n"); +} + +void Emitter::EmitDIDebugRangesSection() +{ + Emit("\t.section\t.debug_ranges,\"\",@progbits\n"); +} + +void Emitter::EmitDIDebugLineSection() +{ + Emit("\t.section\t.debug_line,\"\",@progbits\n"); + Emit(".L" XSTR(DEBUG_LINE_0) ":\n"); +} + +void Emitter::EmitDIDebugStrSection() +{ + Emit("\t.section\t.debug_str,\"MS\",@progbits,1\n"); + for (auto it : GetCG()->GetMIRModule()->GetDbgInfo()->GetStrps()) { + Emit(".L" XSTR(DEBUG_STR_LABEL)); + fileStream << it; + Emit(":\n"); + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(it); + Emit("\t.string \"").Emit(name).Emit("\"\n"); + } +} + +void Emitter::FillInClassByteSize(DBGDie *die, DBGDieAttr *byteSizeAttr) +{ + DEBUG_ASSERT(byteSizeAttr->GetDwForm() == DW_FORM_data1 || byteSizeAttr->GetDwForm() == DW_FORM_data2 || + byteSizeAttr->GetDwForm() == DW_FORM_data4 || byteSizeAttr->GetDwForm() == DW_FORM_data8, + "Unknown FORM value for DW_AT_byte_size"); + if (static_cast(byteSizeAttr->GetI()) == kDbgDefaultVal) { + /* get class size */ + DBGDieAttr *nameAttr = LFindDieAttr(die, DW_AT_name); + CHECK_FATAL(nameAttr != nullptr, "name_attr is nullptr in Emitter::FillInClassByteSize"); + /* hope this is a global string index as it is a type name */ + TyIdx tyIdx = GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(GStrIdx(nameAttr->GetId())); + CHECK_FATAL(tyIdx.GetIdx() < Globals::GetInstance()->GetBECommon()->GetSizeOfTypeSizeTable(), + "index out of range in Emitter::FillInClassByteSize"); + int64_t byteSize = static_cast(Globals::GetInstance()->GetBECommon()->GetTypeSize(tyIdx.GetIdx())); + LUpdateAttrValue(byteSizeAttr, byteSize); + } +} + +void Emitter::SetupDBGInfo(DebugInfo *mirdi) +{ + Emitter *emitter = this; + MapleVector &abbrevVec = mirdi->GetAbbrevVec(); + ApplyInPrefixOrder(mirdi->GetCompUnit(), [&abbrevVec, &emitter](DBGDie *die) { + if (!die) { + return; + } + + CHECK_FATAL(maple::GetDwTagName(die->GetTag()) != nullptr, + "maple::GetDwTagName(die->GetTag()) is nullptr in Emitter::SetupDBGInfo"); + if (die->GetAbbrevId() == 0) { + LogInfo::MapleLogger() << maple::GetDwTagName(die->GetTag()) << std::endl; + } + CHECK_FATAL(die->GetAbbrevId() < abbrevVec.size(), "index out of range in Emitter::SetupDBGInfo"); + DEBUG_ASSERT(abbrevVec[die->GetAbbrevId()]->GetAbbrevId() == die->GetAbbrevId(), ""); + DBGAbbrevEntry *diae = abbrevVec[die->GetAbbrevId()]; + switch (diae->GetTag()) { + case DW_TAG_subprogram: { + DBGExprLoc *exprloc = emitter->memPool->New(emitter->GetCG()->GetMIRModule()); + exprloc->GetSimpLoc()->SetDwOp(DW_OP_call_frame_cfa); + die->SetAttr(DW_AT_frame_base, exprloc); + break; + } + case DW_TAG_structure_type: + case DW_TAG_union_type: + case DW_TAG_class_type: + case DW_TAG_interface_type: { + DBGDieAttr *byteSizeAttr = LFindDieAttr(die, DW_AT_byte_size); + if (byteSizeAttr) { + emitter->FillInClassByteSize(die, byteSizeAttr); + } + /* get the name */ + DBGDieAttr *atName = LFindDieAttr(die, DW_AT_name); + CHECK_FATAL(atName != nullptr, "at_name is null in Emitter::SetupDBGInfo"); + /* get the type from string name */ + TyIdx ctyIdx = GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(GStrIdx(atName->GetId())); + MIRType *mty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ctyIdx); + MIRStructType *sty = static_cast(mty); + CHECK_FATAL(sty != nullptr, "pointer cast failed"); + CHECK_FATAL(sty->GetTypeIndex().GetIdx() < + Globals::GetInstance()->GetBECommon()->GetSizeOfStructFieldCountTable(), + ""); + uint32 embeddedIDs = 0; + MIRStructType *prevSubstruct = nullptr; + for (size_t i = 0; i < sty->GetFields().size(); i++) { + TyIdx fieldtyidx = sty->GetFieldsElemt(i).second.first; + MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldtyidx); + if (prevSubstruct) { + embeddedIDs += static_cast(Globals::GetInstance()->GetBECommon()->GetStructFieldCount( + static_cast(prevSubstruct->GetTypeIndex().GetIdx()))); + } + prevSubstruct = fieldty->EmbeddedStructType(); + FieldID fieldID = static_cast(i + embeddedIDs) + 1; + int offset = Globals::GetInstance()->GetBECommon()->GetFieldOffset(*sty, fieldID).first; + GStrIdx fldName = sty->GetFieldsElemt(i).first; + DBGDie *cdie = LFindChildDieWithName(die, DW_TAG_member, fldName); + CHECK_FATAL(cdie != nullptr, "cdie is null in Emitter::SetupDBGInfo"); + DBGDieAttr *mloc = LFindDieAttr(cdie, DW_AT_data_member_location); + CHECK_FATAL(mloc != nullptr, "mloc is null in Emitter::SetupDBGInfo"); + DBGAbbrevEntry *childDiae = abbrevVec[cdie->GetAbbrevId()]; + CHECK_FATAL(childDiae != nullptr, "child_diae is null in Emitter::SetupDBGInfo"); + LUpdateAttrValue(mloc, offset); + } + break; + } + default: + break; + } + }); + + /* compute DIE sizes and offsets */ + mirdi->ComputeSizeAndOffsets(); +} + +void Emitter::EmitAliasAndRef(const MIRSymbol &sym) +{ + MIRFunction *mFunc = sym.GetFunction(); + if (mFunc == nullptr || !mFunc->GetAttr(FUNCATTR_alias)) { + return; + } + if (mFunc->GetAttr(FUNCATTR_extern)) { + Emit(asmInfo->GetGlobal()).Emit(mFunc->GetName()).Emit("\n"); + } + auto &aliasPrefix = mFunc->GetAttr(FUNCATTR_weakref) ? asmInfo->GetWeakref() : asmInfo->GetSet(); + Emit(aliasPrefix); + Emit(sym.GetName()).Emit(",").Emit(mFunc->GetAttrs().GetAliasFuncName()).Emit("\n"); +} + +void Emitter::EmitHugeSoRoutines(bool lastRoutine) +{ + if (!lastRoutine && + (javaInsnCount < (static_cast(hugeSoSeqence) * static_cast(kHugeSoInsnCountThreshold)))) { + return; + } + for (auto &target : hugeSoTargets) { + (void)Emit("\t.section\t." + std::string(namemangler::kMuidJavatextPrefixStr) + ",\"ax\"\n"); +#if TARGX86 || TARGX86_64 + Emit("\t.align\t8\n"); +#else + Emit("\t.align 3\n"); +#endif + std::string routineName = target + HugeSoPostFix(); + Emit("\t.type\t" + routineName + ", %function\n"); + Emit(routineName + ":\n"); + Emit("\tadrp\tx17, :got:" + target + "\n"); + Emit("\tldr\tx17, [x17, :got_lo12:" + target + "]\n"); + Emit("\tbr\tx17\n"); + javaInsnCount += kSizeOfHugesoRoutine; + } + hugeSoTargets.clear(); + ++hugeSoSeqence; +} + +void ImmOperand::Dump() const +{ + LogInfo::MapleLogger() << "imm:" << value; +} + +void LabelOperand::Dump() const +{ + LogInfo::MapleLogger() << "label:" << labelIndex; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/global.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/global.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6f43efe3236b9eb73b2ad9faa2a7d0e18f716432 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/global.cpp @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if TARGAARCH64 +#include "aarch64_global.h" +#elif TARGRISCV64 +#include "riscv64_global.h" +#endif +#if TARGARM32 +#include "arm32_global.h" +#endif +#include "reaching.h" +#include "cgfunc.h" +#include "live.h" +/* + * This phase do some optimization using use-def chain and def-use chain. + * each function in Run() is a optimization. mainly include 2 parts: + * 1. find the number of valid bits for register by finding the definition insn of register, + * and then using the valid bits to delete redundant insns. + * 2. copy Propagate: + * a. forward copy propagate + * this optimization aim to optimize following: + * mov x100, x200; + * BBs: + * ... + * mOp ..., x100 /// multiple site that use x100 + * => + * mov x200, x200 + * BBs: + * ... + * mOp ..., x200 // multiple site that use x100 + * b. backward copy propagate + * this optimization aim to optimize following: + * mOp x200, ... // Define insn of x200 + * ... + * mOp ..., x200 // use site of x200 + * mov x100, x200; + * => + * mOp x100, ... // Define insn of x200 + * ... + * mOp ..., x100 // use site of x200 + * mov x100, x100; + * + * NOTE: after insn is modified, UD-chain and DU-chain should be maintained by self. currently several common + * interface has been implemented in RD, but they must be used reasonably. specific instructions for use + * can be found at the begining of corresponding function. + */ +namespace maplebe { +using namespace maple; + +bool CgGlobalOpt::PhaseRun(maplebe::CGFunc &f) +{ + ReachingDefinition *reachingDef = nullptr; + LiveAnalysis *live = nullptr; + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2) { + reachingDef = GET_ANALYSIS(CgReachingDefinition, f); + live = GET_ANALYSIS(CgLiveAnalysis, f); + } + if (reachingDef == nullptr || !f.GetRDStatus()) { + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgReachingDefinition::id); + return false; + } + reachingDef->SetAnalysisMode(kRDAllAnalysis); + GlobalOpt *globalOpt = nullptr; +#if TARGAARCH64 || TARGRISCV64 + globalOpt = GetPhaseAllocator()->New(f); +#endif +#if TARGARM32 + globalOpt = GetPhaseAllocator()->New(f); +#endif + globalOpt->Run(); + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return true; +} + +void CgGlobalOpt::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgGlobalOpt, globalopt) + +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/ico.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/ico.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e677e7e7fe91f38ed012635f5f6f3420a69b24df --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/ico.cpp @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ico.h" +#include "cg_option.h" +#ifdef TARGAARCH64 +#include "aarch64_ico.h" +#include "aarch64_isa.h" +#include "aarch64_insn.h" +#elif TARGRISCV64 +#include "riscv64_ico.h" +#include "riscv64_isa.h" +#include "riscv64_insn.h" +#elif TARGARM32 +#include "arm32_ico.h" +#include "arm32_isa.h" +#include "arm32_insn.h" +#endif +#include "cg.h" + +/* + * This phase implements if-conversion optimization, + * which tries to convert conditional branches into cset/csel instructions + */ +#define ICO_DUMP_NEWPM CG_DEBUG_FUNC(f) +namespace maplebe { +Insn *ICOPattern::FindLastCmpInsn(BB &bb) const +{ + if (bb.GetKind() != BB::kBBIf) { + return nullptr; + } + FOR_BB_INSNS_REV(insn, (&bb)) { + if (cgFunc->GetTheCFG()->GetInsnModifier()->IsCompareInsn(*insn)) { + return insn; + } + } + return nullptr; +} + +std::vector ICOPattern::GetLabelOpnds(Insn &insn) const +{ + std::vector labelOpnds; + for (uint32 i = 0; i < insn.GetOperandSize(); i++) { + if (insn.GetOperand(i).IsLabelOpnd()) { + labelOpnds.emplace_back(static_cast(&insn.GetOperand(i))); + } + } + return labelOpnds; +} + +bool CgIco::PhaseRun(maplebe::CGFunc &f) +{ + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + if (ICO_DUMP_NEWPM) { + DotGenerator::GenerateDot("ico-before", f, f.GetMirModule()); + } + MemPool *memPool = GetPhaseMemPool(); + IfConversionOptimizer *ico = nullptr; +#if TARGAARCH64 || TARGRISCV64 + ico = memPool->New(f, *memPool); +#endif +#if TARGARM32 + ico = memPool->New(f, *memPool); +#endif + const std::string &funcClass = f.GetFunction().GetBaseClassName(); + const std::string &funcName = f.GetFunction().GetBaseFuncName(); + std::string name = funcClass + funcName; + ico->Run(name); + if (ICO_DUMP_NEWPM) { + DotGenerator::GenerateDot("ico-after", f, f.GetMirModule()); + } + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return false; +} +void CgIco::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgIco, ico) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/ifile.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/ifile.cpp new file mode 100644 index 0000000000000000000000000000000000000000..79095494c6d656e7dd2609955540717b04b57e50 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/ifile.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLEBE_INCLUDE_CG_SECTION_H +#define MAPLEBE_INCLUDE_CG_SECTION_H + +#include "ifile.h" +#include "obj_emit.h" + +namespace maplebe { +Section::Section(std::string secName, Word type, Word flags, uint32 align, ObjEmitter &objEmitter, MemPool &memPool) + : emitter(objEmitter), name(secName, &memPool) +{ + secHeader.sh_type = type; + secHeader.sh_flags = flags; + secHeader.sh_addralign = align; + emitter.RegisterSection(this); +} + +void Section::Layout() +{ + emitter.UpdateSectionOffsetAddr(this); + GenerateData(); + HandleLocalFixup(); + emitter.UpdateGlobalOffsetAddr(this); +} + +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_SECTION_H */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/insn.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/insn.cpp new file mode 100644 index 0000000000000000000000000000000000000000..10b9f46ee2524d5012a27951715895cb0977e594 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/insn.cpp @@ -0,0 +1,441 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "insn.h" +#include "isa.h" +#include "cg.h" +namespace maplebe { +bool Insn::IsMachineInstruction() const +{ + return md && md->IsPhysicalInsn() && Globals::GetInstance()->GetTarget()->IsTargetInsn(mOp); +} +/* phi is not physical insn */ +bool Insn::IsPhi() const +{ + return md ? md->IsPhi() : false; +} +bool Insn::IsLoad() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsLoad(); +} +bool Insn::IsStore() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsStore(); +} +bool Insn::IsMove() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsMove(); +} +bool Insn::IsBranch() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsBranch(); +} +bool Insn::IsCondBranch() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsCondBranch(); +} +bool Insn::IsUnCondBranch() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsUnCondBranch(); +} +bool Insn::IsBasicOp() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsBasicOp(); +} +bool Insn::IsConversion() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsConversion(); +} +bool Insn::IsUnaryOp() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsUnaryOp(); +} +bool Insn::IsShift() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsShift(); +} +bool Insn::IsCall() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsCall(); +} +bool Insn::IsTailCall() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsTailCall(); +} +bool Insn::IsAsmInsn() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsInlineAsm(); +} +bool Insn::IsDMBInsn() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsDMB(); +} +bool Insn::IsAtomic() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsAtomic(); +} +bool Insn::IsVolatile() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsVolatile(); +} +bool Insn::IsMemAccessBar() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsMemAccessBar(); +} +bool Insn::IsMemAccess() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsMemAccess(); +} +bool Insn::CanThrow() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->CanThrow(); +} +bool Insn::IsVectorOp() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsVectorOp(); +} +bool Insn::HasLoop() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->HasLoop(); +} +uint32 Insn::GetLatencyType() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->GetLatencyType(); +} +uint32 Insn::GetAtomicNum() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->GetAtomicNum(); +} +bool Insn::IsSpecialIntrinsic() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsSpecialIntrinsic(); +} +bool Insn::IsLoadPair() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsLoadPair(); +} +bool Insn::IsStorePair() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsStorePair(); +} +bool Insn::IsLoadStorePair() const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->IsLoadStorePair(); +} +bool Insn::IsLoadLabel() const +{ + return md->IsLoad() && GetOperand(kInsnSecondOpnd).GetKind() == Operand::kOpdBBAddress; +} +bool Insn::OpndIsDef(uint32 id) const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->GetOpndDes(id)->IsDef(); +} +bool Insn::OpndIsUse(uint32 id) const +{ + DEBUG_ASSERT(md, " set insnDescription for insn "); + return md->GetOpndDes(id)->IsUse(); +} +bool Insn::IsClinit() const +{ + return Globals::GetInstance()->GetTarget()->IsClinitInsn(mOp); +} +bool Insn::IsComment() const +{ + return mOp == abstract::MOP_comment && !md->IsPhysicalInsn(); +} + +bool Insn::IsImmaterialInsn() const +{ + return IsComment(); +} + +bool Insn::IsPseudo() const +{ + return md && md->IsPhysicalInsn() && Globals::GetInstance()->GetTarget()->IsPseudoInsn(mOp); +} + +Operand *Insn::GetMemOpnd() const +{ + for (uint32 i = 0; i < opnds.size(); ++i) { + Operand &opnd = GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + return &opnd; + } + } + return nullptr; +} +void Insn::SetMemOpnd(MemOperand *memOpnd) +{ + for (uint32 i = 0; i < static_cast(opnds.size()); ++i) { + Operand &opnd = GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + SetOperand(i, *memOpnd); + return; + } + } +} + +bool Insn::IsRegDefined(regno_t regNO) const +{ + return GetDefRegs().count(regNO); +} + +std::set Insn::GetDefRegs() const +{ + std::set defRegNOs; + size_t opndNum = opnds.size(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = GetOperand(i); + auto *regProp = md->opndMD[i]; + bool isDef = regProp->IsDef(); + if (!isDef && !opnd.IsMemoryAccessOperand()) { + continue; + } + if (opnd.IsList()) { + for (auto *op : static_cast(opnd).GetOperands()) { + DEBUG_ASSERT(op != nullptr, "invalid operand in list operand"); + defRegNOs.emplace(op->GetRegisterNumber()); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if (base != nullptr) { + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { + DEBUG_ASSERT(!defRegNOs.count(base->GetRegisterNumber()), "duplicate def in one insn"); + defRegNOs.emplace(base->GetRegisterNumber()); + } + } + } else if (opnd.IsConditionCode() || opnd.IsRegister()) { + defRegNOs.emplace(static_cast(opnd).GetRegisterNumber()); + } + } + return defRegNOs; +} + +#if DEBUG +void Insn::Check() const +{ + if (!md) { + CHECK_FATAL(false, " need machine description for target insn "); + } + /* check if the number of operand(s) matches */ + uint32 insnOperandSize = GetOperandSize(); + if (insnOperandSize != md->GetOpndMDLength()) { + CHECK_FATAL(false, " the number of operands in instruction does not match machine description "); + } + /* check if the type of each operand matches */ + for (uint32 i = 0; i < insnOperandSize; ++i) { + Operand &opnd = GetOperand(i); + if (opnd.GetKind() != md->GetOpndDes(i)->GetOperandType()) { + CHECK_FATAL(false, " operand type does not match machine description "); + } + } +} +#endif + +Insn *Insn::Clone(MemPool &memPool) const +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *Insn::GetCallTargetOperand() const +{ + DEBUG_ASSERT(IsCall() || IsTailCall(), "should be call"); + return &GetOperand(kInsnFirstOpnd); +} + +ListOperand *Insn::GetCallArgumentOperand() +{ + DEBUG_ASSERT(IsCall(), "should be call"); + DEBUG_ASSERT(GetOperand(1).IsList(), "should be list"); + return &static_cast(GetOperand(kInsnSecondOpnd)); +} + +void Insn::CommuteOperands(uint32 dIndex, uint32 sIndex) +{ + Operand *tempCopy = opnds[sIndex]; + opnds[sIndex] = opnds[dIndex]; + opnds[dIndex] = tempCopy; +} + +uint32 Insn::GetBothDefUseOpnd() const +{ + size_t opndNum = opnds.size(); + uint32 opndIdx = kInsnMaxOpnd; + if (md->GetAtomicNum() > 1) { + return opndIdx; + } + for (uint32 i = 0; i < opndNum; ++i) { + auto *opndProp = md->GetOpndDes(i); + if (opndProp->IsRegUse() && opndProp->IsDef()) { + DEBUG_ASSERT(opndIdx == kInsnMaxOpnd, "Do not support yet"); + opndIdx = i; + } + if (opnds[i]->IsMemoryAccessOperand()) { + auto *MemOpnd = static_cast(opnds[i]); + if (!MemOpnd->IsIntactIndexed()) { + DEBUG_ASSERT(opndIdx == kInsnMaxOpnd, "Do not support yet"); + opndIdx = i; + } + } + } + return opndIdx; +} + +uint32 Insn::GetMemoryByteSize() const +{ + DEBUG_ASSERT(IsMemAccess(), "must be memory access insn"); + uint32 res = 0; + for (size_t i = 0; i < opnds.size(); ++i) { + if (md->GetOpndDes(i)->GetOperandType() == Operand::kOpdMem) { + res = md->GetOpndDes(i)->GetSize(); + } + } + DEBUG_ASSERT(res, "cannot access empty memory"); + if (IsLoadStorePair()) { + res = res << 1; + } + res = res >> 3; + return res; +} + +bool Insn::ScanReg(regno_t regNO) const +{ + uint32 opndNum = GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto listElem : listOpnd.GetOperands()) { + auto *regOpnd = static_cast(listElem); + DEBUG_ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + return true; + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + if ((base != nullptr && base->GetRegisterNumber() == regNO) || + (index != nullptr && index->GetRegisterNumber() == regNO)) { + return true; + } + } else if (opnd.IsRegister()) { + if (static_cast(opnd).GetRegisterNumber() == regNO) { + return true; + } + } + } + return false; +} + +bool Insn::MayThrow() const +{ + if (md->IsMemAccess() && !IsLoadLabel()) { + auto *memOpnd = static_cast(GetMemOpnd()); + DEBUG_ASSERT(memOpnd != nullptr, "CG invalid memory operand."); + if (memOpnd->IsStackMem()) { + return false; + } + } + return md->CanThrow(); +} + +void Insn::SetMOP(const InsnDesc &idesc) +{ + mOp = idesc.GetOpc(); + md = &idesc; +} + +void Insn::Dump() const +{ + DEBUG_ASSERT(md != nullptr, "md should not be nullptr"); + LogInfo::MapleLogger() << "< " << GetId() << " > "; + LogInfo::MapleLogger() << md->name << "(" << mOp << ")"; + + for (uint32 i = 0; i < GetOperandSize(); ++i) { + Operand &opnd = GetOperand(i); + LogInfo::MapleLogger() << " (opnd" << i << ": "; + Globals::GetInstance()->GetTarget()->DumpTargetOperand(opnd, *md->GetOpndDes(i)); + LogInfo::MapleLogger() << ")"; + } + + if (IsVectorOp()) { + auto *vInsn = static_cast(this); + if (vInsn->GetNumOfRegSpec() != 0) { + LogInfo::MapleLogger() << " (vecSpec: " << vInsn->GetNumOfRegSpec() << ")"; + } + } + if (stackMap != nullptr) { + const auto &deoptVreg2Opnd = stackMap->GetDeoptInfo().GetDeoptBundleInfo(); + if (!deoptVreg2Opnd.empty()) { + LogInfo::MapleLogger() << " (deopt: "; + bool isFirstElem = true; + for (const auto &elem : deoptVreg2Opnd) { + if (!isFirstElem) { + LogInfo::MapleLogger() << ", "; + } else { + isFirstElem = false; + } + LogInfo::MapleLogger() << elem.first << ":"; + elem.second->Dump(); + } + LogInfo::MapleLogger() << ")"; + } + } + LogInfo::MapleLogger() << "\n"; +} + +VectorRegSpec *VectorInsn::GetAndRemoveRegSpecFromList() +{ + if (regSpecList.size() == 0) { + VectorRegSpec *vecSpec = CG::GetCurCGFuncNoConst()->GetMemoryPool()->New(); + return vecSpec; + } + VectorRegSpec *ret = regSpecList.back(); + regSpecList.pop_back(); + return ret; +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/instruction_selection.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/instruction_selection.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c68033ab710a50e1efb6ec88ee0941ad7087b464 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/instruction_selection.cpp @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "instruction_selection.h" +namespace maplebe { +bool CgIsel::PhaseRun(maplebe::CGFunc &f) {} +} /* namespace maplebe */ \ No newline at end of file diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/isa.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/isa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..75e4d4d6666c57deeca327844b933fa9d40f64d0 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/isa.cpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "isa.h" +namespace maplebe { +#define DEFINE_MOP(op, ...) const OpndDesc OpndDesc::op = __VA_ARGS__; +#include "operand.def" +#undef DEFINE_MOP +#define DEFINE_MOP(op, ...) {abstract::op, __VA_ARGS__}, +const InsnDesc InsnDesc::abstractId[abstract::kMopLast] = { +#include "abstract_mmir.def" +}; +#undef DEFINE_MOP + +bool InsnDesc::IsSame(const InsnDesc &left, std::function cmp) const +{ + return cmp == nullptr ? false : cmp(left, *this); +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/isel.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/isel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..00e3f7e20f8f42f6770e2376c5257ca10e2320e2 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/isel.cpp @@ -0,0 +1,1696 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "isel.h" +#include "factory.h" +#include "cg.h" +#include "standardize.h" +#include +#include + +namespace maplebe { +/* register, imm , memory, cond */ +#define DEF_FAST_ISEL_MAPPING_INT(SIZE) \ + MOperator fastIselMapI##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = { \ + {abstract::MOP_copy_rr_##SIZE, abstract::MOP_copy_ri_##SIZE, abstract::MOP_load_##SIZE, abstract::MOP_undef}, \ + {abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ + {abstract::MOP_str_##SIZE, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ + {abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ + }; +#define DEF_FAST_ISEL_MAPPING_FLOAT(SIZE) \ + MOperator fastIselMapF##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = { \ + {abstract::MOP_copy_ff_##SIZE, abstract::MOP_copy_fi_##SIZE, abstract::MOP_load_f_##SIZE, \ + abstract::MOP_undef}, \ + {abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ + {abstract::MOP_str_f_##SIZE, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ + {abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ + }; + +DEF_FAST_ISEL_MAPPING_INT(8) +DEF_FAST_ISEL_MAPPING_INT(16) +DEF_FAST_ISEL_MAPPING_INT(32) +DEF_FAST_ISEL_MAPPING_INT(64) +DEF_FAST_ISEL_MAPPING_FLOAT(8) +DEF_FAST_ISEL_MAPPING_FLOAT(16) +DEF_FAST_ISEL_MAPPING_FLOAT(32) +DEF_FAST_ISEL_MAPPING_FLOAT(64) + +#define DEF_SEL_MAPPING_TBL(SIZE) \ + MOperator SelMapping##SIZE(bool isInt, uint32 x, uint32 y) \ + { \ + return isInt ? fastIselMapI##SIZE[x][y] : fastIselMapF##SIZE[x][y]; \ + } +#define USE_SELMAPPING_TBL(SIZE) \ + { \ + SIZE, SelMapping##SIZE \ + } + +DEF_SEL_MAPPING_TBL(8); +DEF_SEL_MAPPING_TBL(16); +DEF_SEL_MAPPING_TBL(32); +DEF_SEL_MAPPING_TBL(64); + +std::map> fastIselMappingTable = { + USE_SELMAPPING_TBL(8), USE_SELMAPPING_TBL(16), USE_SELMAPPING_TBL(32), USE_SELMAPPING_TBL(64)}; + +MOperator GetFastIselMop(Operand::OperandType dTy, Operand::OperandType sTy, PrimType type) +{ + uint32 bitSize = GetPrimTypeBitSize(type); + bool isInteger = IsPrimitiveInteger(type); + auto tableDriven = fastIselMappingTable.find(bitSize); + if (tableDriven != fastIselMappingTable.end()) { + auto funcIt = tableDriven->second; + return funcIt(isInteger, dTy, sTy); + } else { + CHECK_FATAL(false, "unsupport type"); + } + return abstract::MOP_undef; +} + +#define DEF_EXTEND_MAPPING_TBL(TYPE) \ + [](bool isSigned) -> MOperator { return isSigned ? abstract::MOP_sext_rr_##TYPE : abstract::MOP_zext_rr_##TYPE; } +using fromToTy = std::pair; /* std::pair */ +#define DEF_USE_EXTEND_MAPPING_TBL(FROMSIZE, TOSIZE) \ + { \ + {k##FROMSIZE##BitSize, k##TOSIZE##BitSize}, DEF_EXTEND_MAPPING_TBL(TOSIZE##_##FROMSIZE) \ + } + +std::map> fastCvtMappingTableI = { + DEF_USE_EXTEND_MAPPING_TBL(8, 16), /* Extend Mapping */ + DEF_USE_EXTEND_MAPPING_TBL(8, 32), DEF_USE_EXTEND_MAPPING_TBL(8, 64), DEF_USE_EXTEND_MAPPING_TBL(16, 32), + DEF_USE_EXTEND_MAPPING_TBL(16, 64), DEF_USE_EXTEND_MAPPING_TBL(32, 64), +}; +#undef DEF_USE_EXTEND_MAPPING_TBL +#undef DEF_EXTEND_MAPPING_TBL + +static MOperator GetFastCvtMopI(uint32 fromSize, uint32 toSize, bool isSigned) +{ + if (toSize < k8BitSize || toSize > k64BitSize) { + CHECK_FATAL(false, "unsupport type"); + } + if (fromSize < k8BitSize || fromSize > k64BitSize) { + CHECK_FATAL(false, "unsupport type"); + } + /* Extend: fromSize < toSize */ + auto tableDriven = fastCvtMappingTableI.find({fromSize, toSize}); + if (tableDriven == fastCvtMappingTableI.end()) { + CHECK_FATAL(false, "unsupport cvt"); + } + MOperator mOp = tableDriven->second(isSigned); + if (mOp == abstract::MOP_undef) { + CHECK_FATAL(false, "unsupport cvt"); + } + return mOp; +} + +/* + * fast get MOperator + * such as : and, or, shl ... + */ +#define DEF_MOPERATOR_MAPPING_FUNC(TYPE) \ + [](uint32 bitSize) -> MOperator { \ + /* 8-bits, 16-bits, 32-bits, 64-bits */ \ + constexpr static std::array fastMapping_##TYPE = { \ + abstract::MOP_##TYPE##_8, abstract::MOP_##TYPE##_16, abstract::MOP_##TYPE##_32, \ + abstract::MOP_##TYPE##_64}; \ + return fastMapping_##TYPE[GetBitIndex(bitSize)]; \ + } + +#define DEF_FLOAT_MOPERATOR_MAPPING_FUNC(TYPE) [](uint32 bitSize)->MOperator { \ + /* 8-bits, 16-bits, 32-bits, 64-bits */ \ + constexpr static std::array fastMapping_f_##TYPE = \ + {abstract::MOP_##TYPE##_f_8, abstract::MOP_##TYPE##_f_16, \ + abstract::MOP_##TYPE##_f_32, abstract::MOP_##TYPE##_f_64}; \ + return fastMapping_f_##TYPE[GetBitIndex(bitSize)]; \ +} + +void HandleDassign(StmtNode &stmt, MPISel &iSel) +{ + DEBUG_ASSERT(stmt.GetOpCode() == OP_dassign, "expect dassign"); + auto &dassignNode = static_cast(stmt); + BaseNode *rhs = dassignNode.GetRHS(); + DEBUG_ASSERT(rhs != nullptr, "get rhs of dassignNode failed"); + Operand *opndRhs = iSel.HandleExpr(dassignNode, *rhs); + if (opndRhs == nullptr) { + return; + } + iSel.SelectDassign(dassignNode, *opndRhs); +} + +void HandleDassignoff(StmtNode &stmt, MPISel &iSel) +{ + auto &dassignoffNode = static_cast(stmt); + BaseNode *rhs = dassignoffNode.GetRHS(); + CHECK_FATAL(rhs->GetOpCode() == OP_constval, "dassignoffNode without constval"); + Operand *opnd0 = iSel.HandleExpr(dassignoffNode, *rhs); + iSel.SelectDassignoff(dassignoffNode, *opnd0); +} + +void HandleIassign(StmtNode &stmt, MPISel &iSel) +{ + DEBUG_ASSERT(stmt.GetOpCode() == OP_iassign, "expect iassign"); + auto &iassignNode = static_cast(stmt); + BaseNode *rhs = iassignNode.GetRHS(); + DEBUG_ASSERT(rhs != nullptr, "null ptr check"); + Operand *opndRhs = iSel.HandleExpr(stmt, *rhs); + BaseNode *addr = iassignNode.Opnd(0); + DEBUG_ASSERT(addr != nullptr, "null ptr check"); + Operand *opndAddr = iSel.HandleExpr(stmt, *addr); + if (opndRhs == nullptr || opndAddr == nullptr) { + return; + } + if (rhs->GetPrimType() != PTY_agg) { + iSel.SelectIassign(iassignNode, *opndAddr, *opndRhs); + } else { + iSel.SelectAggIassign(iassignNode, *opndAddr, *opndRhs); + } +} + +void HandleRegassign(StmtNode &stmt, MPISel &iSel) +{ + DEBUG_ASSERT(stmt.GetOpCode() == OP_regassign, "expect regAssign"); + auto ®AssignNode = static_cast(stmt); + BaseNode *operand = regAssignNode.Opnd(0); + DEBUG_ASSERT(operand != nullptr, "get operand of regassignNode failed"); + Operand *opnd0 = iSel.HandleExpr(regAssignNode, *operand); + iSel.SelectRegassign(regAssignNode, *opnd0); +} + +void HandleIassignoff(StmtNode &stmt, MPISel &iSel) +{ + auto &iassignoffNode = static_cast(stmt); + iSel.SelectIassignoff(iassignoffNode); +} + +void HandleLabel(StmtNode &stmt, const MPISel &iSel) +{ + CGFunc *cgFunc = iSel.GetCurFunc(); + DEBUG_ASSERT(stmt.GetOpCode() == OP_label, "error"); + auto &label = static_cast(stmt); + BB *newBB = cgFunc->StartNewBBImpl(false, label); + newBB->AddLabel(label.GetLabelIdx()); + cgFunc->SetLab2BBMap(static_cast(newBB->GetLabIdx()), *newBB); + cgFunc->SetCurBB(*newBB); +} + +void HandleGoto(StmtNode &stmt, MPISel &iSel) +{ + CGFunc *cgFunc = iSel.GetCurFunc(); + cgFunc->UpdateFrequency(stmt); + auto &gotoNode = static_cast(stmt); + DEBUG_ASSERT(gotoNode.GetOpCode() == OP_goto, "expect goto"); + cgFunc->SetCurBBKind(BB::kBBGoto); + iSel.SelectGoto(gotoNode); + cgFunc->SetCurBB(*cgFunc->StartNewBB(gotoNode)); + DEBUG_ASSERT(&stmt == &gotoNode, "stmt must be same as gotoNoe"); + if ((gotoNode.GetNext() != nullptr) && (gotoNode.GetNext()->GetOpCode() != OP_label)) { + DEBUG_ASSERT(cgFunc->GetCurBB()->GetPrev()->GetLastStmt() == &stmt, "check the relation between BB and stmt"); + } +} + +void HandleIntrinCall(StmtNode &stmt, MPISel &iSel) +{ + auto &call = static_cast(stmt); + iSel.SelectIntrinCall(call); +} + +void HandleRangeGoto(StmtNode &stmt, MPISel &iSel) +{ + CGFunc *cgFunc = iSel.GetCurFunc(); + auto &rangeGotoNode = static_cast(stmt); + DEBUG_ASSERT(rangeGotoNode.GetOpCode() == OP_rangegoto, "expect rangegoto"); + BaseNode *srcNode = rangeGotoNode.Opnd(0); + Operand *srcOpnd = iSel.HandleExpr(rangeGotoNode, *srcNode); + cgFunc->SetCurBBKind(BB::kBBRangeGoto); + iSel.SelectRangeGoto(rangeGotoNode, *srcOpnd); +} + +void HandleIgoto(StmtNode &stmt, MPISel &iSel) +{ + CGFunc *cgFunc = iSel.GetCurFunc(); + auto &igotoNode = static_cast(stmt); + BaseNode *targetNode = igotoNode.Opnd(0); + Operand *targetOpnd = iSel.HandleExpr(igotoNode, *targetNode); + iSel.SelectIgoto(*targetOpnd); + cgFunc->SetCurBBKind(BB::kBBIgoto); + cgFunc->SetCurBB(*cgFunc->StartNewBB(igotoNode)); +} + +void HandleReturn(StmtNode &stmt, MPISel &iSel) +{ + CGFunc *cgFunc = iSel.GetCurFunc(); + auto &retNode = static_cast(stmt); + DEBUG_ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); + if (retNode.NumOpnds() != 0) { + Operand *opnd = iSel.HandleExpr(retNode, *retNode.Opnd(0)); + iSel.SelectReturn(retNode, *opnd); + } + iSel.SelectReturn(); + /* return stmt will jump to the ret BB, so curBB is gotoBB */ + cgFunc->SetCurBBKind(BB::kBBGoto); + cgFunc->SetCurBB(*cgFunc->StartNewBB(retNode)); +} + +void HandleComment(StmtNode &stmt, MPISel &iSel) +{ + return; +} + +void HandleIcall(StmtNode &stmt, MPISel &iSel) +{ + DEBUG_ASSERT(stmt.GetOpCode() == OP_icall || stmt.GetOpCode() == OP_icallproto, "error"); + auto &iCallNode = static_cast(stmt); + Operand *opnd0 = iSel.HandleExpr(iCallNode, *iCallNode.Opnd(0)); + iSel.SelectIcall(iCallNode, *opnd0); + iSel.SelectCallCommon(stmt, iSel); +} + +void HandleCall(StmtNode &stmt, MPISel &iSel) +{ + DEBUG_ASSERT(stmt.GetOpCode() == OP_call, "error"); + auto &callNode = static_cast(stmt); + iSel.SelectCall(callNode); + iSel.SelectCallCommon(stmt, iSel); +} + +void HandleCondbr(StmtNode &stmt, MPISel &iSel) +{ + CGFunc *cgFunc = iSel.GetCurFunc(); + auto &condGotoNode = static_cast(stmt); + BaseNode *condNode = condGotoNode.Opnd(0); + DEBUG_ASSERT(condNode != nullptr, "expect first operand of cond br"); + /* select cmpOp Insn and get the result "opnd0". However, the opnd0 is not used + * in most backend architectures */ + Operand *opnd0 = iSel.HandleExpr(stmt, *condNode); + iSel.SelectCondGoto(condGotoNode, *condNode, *opnd0); + cgFunc->SetCurBB(*cgFunc->StartNewBB(condGotoNode)); +} + +Operand *HandleAddrof(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + auto &addrofNode = static_cast(expr); + return iSel.SelectAddrof(addrofNode, parent); +} + +Operand *HandleAddroffunc(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + auto &addrofNode = static_cast(expr); + return iSel.SelectAddrofFunc(addrofNode, parent); +} + +Operand *HandleAddrofLabel(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + auto &addrofLabelNode = static_cast(expr); + return iSel.SelectAddrofLabel(addrofLabelNode, parent); +} + +Operand *HandleShift(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectShift(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleCvt(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectCvt(parent, static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleExtractBits(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectExtractbits(parent, static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleDread(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + auto &dreadNode = static_cast(expr); + return iSel.SelectDread(parent, dreadNode); +} + +Operand *HandleAdd(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectAdd(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBior(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectBior(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBxor(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectBxor(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleSub(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectSub(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleNeg(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectNeg(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleDiv(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectDiv(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleRem(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectRem(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBand(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectBand(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleMpy(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectMpy(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleConstStr(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + auto &constStrNode = static_cast(expr); + return iSel.SelectStrLiteral(constStrNode); +} + +Operand *HandleTrunc(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectCvt(parent, static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleConstVal(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + auto &constValNode = static_cast(expr); + MIRConst *mirConst = constValNode.GetConstVal(); + DEBUG_ASSERT(mirConst != nullptr, "get constval of constvalnode failed"); + if (mirConst->GetKind() == kConstInt) { + auto *mirIntConst = safe_cast(mirConst); + return iSel.SelectIntConst(*mirIntConst, constValNode.GetPrimType()); + } else if (mirConst->GetKind() == kConstDoubleConst) { + auto *mirDoubleConst = safe_cast(mirConst); + return iSel.SelectFloatingConst(*mirDoubleConst, constValNode.GetPrimType()); + } else if (mirConst->GetKind() == kConstFloatConst) { + auto *mirFloatConst = safe_cast(mirConst); + return iSel.SelectFloatingConst(*mirFloatConst, constValNode.GetPrimType()); + } else { + CHECK_FATAL(false, "NIY"); + } + return nullptr; +} + +Operand *HandleRegread(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + (void)parent; + auto ®ReadNode = static_cast(expr); + /* handle return Val */ + if (regReadNode.GetRegIdx() == -kSregRetval0 || regReadNode.GetRegIdx() == -kSregRetval1) { + return &iSel.ProcessReturnReg(regReadNode.GetPrimType(), -(regReadNode.GetRegIdx())); + } + return iSel.SelectRegread(regReadNode); +} + +Operand *HandleIread(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + auto &ireadNode = static_cast(expr); + return iSel.SelectIread(parent, ireadNode); +} +Operand *HandleIreadoff(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + auto &ireadNode = static_cast(expr); + return iSel.SelectIreadoff(parent, ireadNode); +} + +Operand *HandleBnot(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectBnot(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleLnot(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectLnot(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +void HandleEval(const StmtNode &stmt, MPISel &iSel) +{ + (void)iSel.HandleExpr(stmt, *static_cast(stmt).Opnd(0)); +} + +Operand *HandleDepositBits(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectDepositBits(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleCmp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + // fix opnd type before select insn + PrimType targetPtyp = parent.GetPrimType(); + if (kOpcodeInfo.IsCompare(parent.GetOpCode())) { + targetPtyp = static_cast(parent).GetOpndType(); + } else if (kOpcodeInfo.IsTypeCvt(parent.GetOpCode())) { + targetPtyp = static_cast(parent).FromType(); + } + if ((IsPrimitiveInteger(targetPtyp) || IsPrimitiveFloat(targetPtyp)) && targetPtyp != expr.GetPrimType()) { + expr.SetPrimType(targetPtyp); + } + return iSel.SelectCmpOp(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleAbs(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectAbs(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleAlloca(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectAlloca(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleCGArrayElemAdd(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectCGArrayElemAdd(static_cast(expr), parent); +} + +void HandleAsm(StmtNode &stmt, MPISel &iSel) +{ + iSel.SelectAsm(static_cast(stmt)); +} + +Operand *HandleSelect(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + /* 0,1,2 represent the first opnd and the second opnd and the third opnd of expr */ + Operand &trueOpnd = *iSel.HandleExpr(expr, *expr.Opnd(1)); + Operand &falseOpnd = *iSel.HandleExpr(expr, *expr.Opnd(2)); + Operand &condOpnd = *iSel.HandleExpr(expr, *expr.Opnd(0)); + if (condOpnd.IsImmediate()) { + return (static_cast(condOpnd).GetValue() == 0) ? &falseOpnd : &trueOpnd; + } + return iSel.SelectSelect(static_cast(expr), condOpnd, trueOpnd, falseOpnd, parent); +} + +Operand *HandleMin(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectMin(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleMax(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectMax(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} +Operand *HandleRetype(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + return iSel.SelectRetype(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) +{ + auto &intrinsicopNode = static_cast(expr); + switch (intrinsicopNode.GetIntrinsic()) { + case INTRN_C_rev16_2: + case INTRN_C_rev_4: + case INTRN_C_rev_8: + return iSel.SelectBswap(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_clz32: + case INTRN_C_clz64: + return iSel.SelectCclz(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_ctz32: + case INTRN_C_ctz64: + return iSel.SelectCctz(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_exp: + return iSel.SelectCexp(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + default: + DEBUG_ASSERT(false, "NIY, unsupported intrinsicop."); + return nullptr; + } +} + +using HandleStmtFactory = FunctionFactory; +using HandleExprFactory = FunctionFactory; +namespace isel { +void InitHandleStmtFactory() +{ + RegisterFactoryFunction(OP_label, HandleLabel); + RegisterFactoryFunction(OP_dassign, HandleDassign); + RegisterFactoryFunction(OP_dassignoff, HandleDassignoff); + RegisterFactoryFunction(OP_iassign, HandleIassign); + RegisterFactoryFunction(OP_iassignoff, HandleIassignoff); + RegisterFactoryFunction(OP_regassign, HandleRegassign); + RegisterFactoryFunction(OP_return, HandleReturn); + RegisterFactoryFunction(OP_comment, HandleComment); + RegisterFactoryFunction(OP_call, HandleCall); + RegisterFactoryFunction(OP_icall, HandleIcall); + RegisterFactoryFunction(OP_icallproto, HandleIcall); + RegisterFactoryFunction(OP_goto, HandleGoto); + RegisterFactoryFunction(OP_intrinsiccall, HandleIntrinCall); + RegisterFactoryFunction(OP_intrinsiccallassigned, HandleIntrinCall); + RegisterFactoryFunction(OP_rangegoto, HandleRangeGoto); + RegisterFactoryFunction(OP_igoto, HandleIgoto); + RegisterFactoryFunction(OP_brfalse, HandleCondbr); + RegisterFactoryFunction(OP_brtrue, HandleCondbr); + RegisterFactoryFunction(OP_eval, HandleEval); + RegisterFactoryFunction(OP_asm, HandleAsm); +} +void InitHandleExprFactory() +{ + RegisterFactoryFunction(OP_dread, HandleDread); + RegisterFactoryFunction(OP_add, HandleAdd); + RegisterFactoryFunction(OP_sub, HandleSub); + RegisterFactoryFunction(OP_neg, HandleNeg); + RegisterFactoryFunction(OP_mul, HandleMpy); + RegisterFactoryFunction(OP_constval, HandleConstVal); + RegisterFactoryFunction(OP_regread, HandleRegread); + RegisterFactoryFunction(OP_addrof, HandleAddrof); + RegisterFactoryFunction(OP_addroffunc, HandleAddroffunc); + RegisterFactoryFunction(OP_addroflabel, HandleAddrofLabel); + RegisterFactoryFunction(OP_shl, HandleShift); + RegisterFactoryFunction(OP_lshr, HandleShift); + RegisterFactoryFunction(OP_ashr, HandleShift); + RegisterFactoryFunction(OP_cvt, HandleCvt); + RegisterFactoryFunction(OP_zext, HandleExtractBits); + RegisterFactoryFunction(OP_sext, HandleExtractBits); + RegisterFactoryFunction(OP_extractbits, HandleExtractBits); + RegisterFactoryFunction(OP_depositbits, HandleDepositBits); + RegisterFactoryFunction(OP_band, HandleBand); + RegisterFactoryFunction(OP_bior, HandleBior); + RegisterFactoryFunction(OP_bxor, HandleBxor); + RegisterFactoryFunction(OP_iread, HandleIread); + RegisterFactoryFunction(OP_ireadoff, HandleIreadoff); + RegisterFactoryFunction(OP_bnot, HandleBnot); + RegisterFactoryFunction(OP_lnot, HandleLnot); + RegisterFactoryFunction(OP_div, HandleDiv); + RegisterFactoryFunction(OP_rem, HandleRem); + RegisterFactoryFunction(OP_conststr, HandleConstStr); + RegisterFactoryFunction(OP_le, HandleCmp); + RegisterFactoryFunction(OP_ge, HandleCmp); + RegisterFactoryFunction(OP_gt, HandleCmp); + RegisterFactoryFunction(OP_lt, HandleCmp); + RegisterFactoryFunction(OP_ne, HandleCmp); + RegisterFactoryFunction(OP_eq, HandleCmp); + RegisterFactoryFunction(OP_abs, HandleAbs); + RegisterFactoryFunction(OP_alloca, HandleAlloca); + RegisterFactoryFunction(OP_CG_array_elem_add, HandleCGArrayElemAdd); + RegisterFactoryFunction(OP_select, HandleSelect); + RegisterFactoryFunction(OP_min, HandleMin); + RegisterFactoryFunction(OP_max, HandleMax); + RegisterFactoryFunction(OP_retype, HandleRetype); + RegisterFactoryFunction(OP_trunc, HandleTrunc); + RegisterFactoryFunction(OP_intrinsicop, HandleIntrinOp); +} +} // namespace isel + +Operand *MPISel::HandleExpr(const BaseNode &parent, BaseNode &expr) +{ + auto function = CreateProductFunction(expr.GetOpCode()); + CHECK_FATAL(function != nullptr, "unsupported opCode in HandleExpr()"); + Operand *opnd = function(parent, expr, *this); + return opnd; +} + +void MPISel::doMPIS() +{ + isel::InitHandleStmtFactory(); + isel::InitHandleExprFactory(); + StmtNode *secondStmt = HandleFuncEntry(); + for (StmtNode *stmt = secondStmt; stmt != nullptr; stmt = stmt->GetNext()) { + auto function = CreateProductFunction(stmt->GetOpCode()); + CHECK_FATAL(function != nullptr, "unsupported opCode or has been lowered before"); + function(*stmt, *this); + } + HandleFuncExit(); +} + +PrimType MPISel::GetIntegerPrimTypeFromSize(bool isSigned, uint32 bitSize) +{ + static constexpr std::array signedPrimType = {PTY_i8, PTY_i16, PTY_i32, PTY_i64}; + static constexpr std::array unsignedPrimType = {PTY_u8, PTY_u16, PTY_u32, PTY_u64}; + BitIndex index = GetBitIndex(bitSize); + return isSigned ? signedPrimType[index] : unsignedPrimType[index]; +} + +void MPISel::SelectCallCommon(StmtNode &stmt, const MPISel &iSel) +{ + CGFunc *cgFunc = iSel.GetCurFunc(); + if (cgFunc->GetCurBB()->GetKind() != BB::kBBFallthru) { + cgFunc->SetCurBB(*cgFunc->StartNewBB(stmt)); + } + StmtNode *prevStmt = stmt.GetPrev(); + if (prevStmt == nullptr || prevStmt->GetOpCode() != OP_catch) { + return; + } + if ((stmt.GetNext() != nullptr) && (stmt.GetNext()->GetOpCode() == OP_label)) { + cgFunc->SetCurBB(*cgFunc->StartNewBBImpl(true, stmt)); + } +} + +void MPISel::SelectBasicOp(Operand &resOpnd, Operand &opnd0, Operand &opnd1, MOperator mOp, PrimType primType) +{ + RegOperand &firstOpnd = SelectCopy2Reg(opnd0, primType); + RegOperand &secondOpnd = SelectCopy2Reg(opnd1, primType); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(firstOpnd).AddOpndChain(secondOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +std::pair MPISel::GetFieldIdAndMirTypeFromMirNode(const BaseNode &node) +{ + FieldID fieldId = 0; + MIRType *mirType = nullptr; + if (node.GetOpCode() == maple::OP_iread) { + /* mirType stored in an addr. */ + auto &iread = static_cast(node); + fieldId = iread.GetFieldID(); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread.GetTyIdx()); + MIRPtrType *pointerType = static_cast(type); + DEBUG_ASSERT(pointerType != nullptr, "expect a pointer type at iread node"); + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType->GetPointedTyIdx()); + if (mirType->GetKind() == kTypeArray) { + MIRArrayType *arrayType = static_cast(mirType); + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx()); + } + } else if (node.GetOpCode() == maple::OP_dassign) { + /* mirSymbol */ + auto &dassign = static_cast(node); + fieldId = dassign.GetFieldID(); + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dassign.GetStIdx()); + mirType = symbol->GetType(); + } else if (node.GetOpCode() == maple::OP_dread) { + /* mirSymbol */ + auto &dread = static_cast(node); + fieldId = dread.GetFieldID(); + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dread.GetStIdx()); + mirType = symbol->GetType(); + } else if (node.GetOpCode() == maple::OP_iassign) { + auto &iassign = static_cast(node); + fieldId = iassign.GetFieldID(); + AddrofNode &addrofNode = static_cast(iassign.GetAddrExprBase()); + MIRType *iassignMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iassign.GetTyIdx()); + MIRPtrType *pointerType = nullptr; + if (iassignMirType->GetPrimType() == PTY_agg) { + MIRSymbol *addrSym = cgFunc->GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(addrofNode.GetStIdx()); + MIRType *addrMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrSym->GetTyIdx()); + addrMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrMirType->GetTypeIndex()); + DEBUG_ASSERT(addrMirType->GetKind() == kTypePointer, "non-pointer"); + pointerType = static_cast(addrMirType); + } else { + DEBUG_ASSERT(iassignMirType->GetKind() == kTypePointer, "non-pointer"); + pointerType = static_cast(iassignMirType); + } + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType->GetPointedTyIdx()); + } else { + CHECK_FATAL(false, "unsupported OpCode"); + } + return {fieldId, mirType}; +} + +MirTypeInfo MPISel::GetMirTypeInfoFormFieldIdAndMirType(FieldID fieldId, MIRType *mirType) +{ + MirTypeInfo mirTypeInfo; + /* fixup primType and offset */ + if (fieldId != 0) { + DEBUG_ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "non-structure"); + MIRStructType *structType = static_cast(mirType); + mirType = structType->GetFieldType(fieldId); + mirTypeInfo.offset = static_cast(cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first); + } + mirTypeInfo.primType = mirType->GetPrimType(); + // aggSize for AggType + if (mirTypeInfo.primType == maple::PTY_agg) { + mirTypeInfo.size = cgFunc->GetBecommon().GetTypeSize(mirType->GetTypeIndex()); + } + return mirTypeInfo; +} + +MirTypeInfo MPISel::GetMirTypeInfoFromMirNode(const BaseNode &node) +{ + auto [fieldId, mirType] = GetFieldIdAndMirTypeFromMirNode(node); + return GetMirTypeInfoFormFieldIdAndMirType(fieldId, mirType); +} + +void MPISel::SelectDassign(const DassignNode &stmt, Operand &opndRhs) +{ + /* mirSymbol info */ + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(stmt.GetStIdx()); + MirTypeInfo symbolInfo = GetMirTypeInfoFromMirNode(stmt); + /* Get symbol location */ + MemOperand &symbolMem = GetOrCreateMemOpndFromSymbol(*symbol, stmt.GetFieldID()); + /* rhs mirType info */ + PrimType rhsType = stmt.GetRHS()->GetPrimType(); + /* Generate Insn */ + if (rhsType == PTY_agg) { + /* Agg Type */ + SelectAggDassign(symbolInfo, symbolMem, opndRhs); + return; + } + PrimType memType = symbolInfo.primType; + if (memType == PTY_agg) { + memType = PTY_a64; + } + SelectCopy(symbolMem, opndRhs, memType, rhsType); + if (rhsType == PTY_ref) { + cgFunc->AddReferenceStackSlot(symbolMem.GetOffsetImmediate()->GetOffsetValue()); + } + + return; +} + +void MPISel::SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) +{ + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(stmt.stIdx); + PrimType primType = stmt.GetPrimType(); + uint32 bitSize = GetPrimTypeBitSize(primType); + MemOperand &memOpnd = GetOrCreateMemOpndFromSymbol(*symbol, bitSize, stmt.offset); + + SelectCopy(memOpnd, opnd0, primType); +} + +void MPISel::SelectIassign(const IassignNode &stmt, Operand &opndAddr, Operand &opndRhs) +{ + /* mirSymbol info */ + MirTypeInfo symbolInfo = GetMirTypeInfoFromMirNode(stmt); + /* handle Lhs, generate (%Rxx) via Rxx*/ + PrimType memType = symbolInfo.primType; + if (memType == PTY_agg) { + memType = PTY_a64; + } + RegOperand &lhsBaseOpnd = SelectCopy2Reg(opndAddr, stmt.Opnd(0)->GetPrimType()); + MemOperand &lhsMemOpnd = + cgFunc->GetOpndBuilder()->CreateMem(lhsBaseOpnd, symbolInfo.offset, GetPrimTypeBitSize(memType)); + /* handle Rhs, get R## from Rhs */ + PrimType rhsType = stmt.GetRHS()->GetPrimType(); + /* mov %R##, (%Rxx) */ + SelectCopy(lhsMemOpnd, opndRhs, memType, rhsType); +} + +void MPISel::SelectIassignoff(const IassignoffNode &stmt) +{ + Operand *addr = HandleExpr(stmt, *stmt.Opnd(0)); + DEBUG_ASSERT(addr != nullptr, "null ptr check"); + Operand *rhs = HandleExpr(stmt, *stmt.Opnd(1)); + DEBUG_ASSERT(rhs != nullptr, "null ptr check"); + + int32 offset = stmt.GetOffset(); + PrimType primType = stmt.GetPrimType(); + uint32 bitSize = GetPrimTypeBitSize(primType); + RegOperand &addrReg = SelectCopy2Reg(*addr, PTY_a64); + RegOperand &rhsReg = SelectCopy2Reg(*rhs, primType); + + MemOperand &memOpnd = cgFunc->GetOpndBuilder()->CreateMem(addrReg, offset, bitSize); + SelectCopy(memOpnd, rhsReg, primType); +} + +ImmOperand *MPISel::SelectIntConst(const MIRIntConst &intConst, PrimType primType) +{ + return &cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(primType), intConst.GetExtValue()); +} + +Operand *MPISel::SelectShift(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType primType = node.GetPrimType(); + RegOperand *resOpnd = nullptr; + Opcode opcode = node.GetOpCode(); + + if (IsPrimitiveInteger(primType)) { + resOpnd = + &(cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType))); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + SelectShift(*resOpnd, regOpnd0, opnd1, opcode, primType, node.Opnd(1)->GetPrimType()); + } else { + CHECK_FATAL(false, "NIY vector cvt"); + } + return resOpnd; +} + +void MPISel::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcode shiftDirect, PrimType opnd0Type, + PrimType opnd1Type) +{ + if (opnd1.IsIntImmediate() && static_cast(opnd1).GetValue() == 0) { + SelectCopy(resOpnd, opnd0, opnd0Type); + return; + } + + uint32 dsize = GetPrimTypeBitSize(opnd0Type); + MOperator mOp = abstract::MOP_undef; + if (shiftDirect == OP_shl) { + const static auto fastShlMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(shl); + mOp = fastShlMappingFunc(dsize); + } else if (shiftDirect == OP_ashr) { + const static auto fastAshrMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(ashr); + mOp = fastAshrMappingFunc(dsize); + } else if (shiftDirect == OP_lshr) { + const static auto fastLshrMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(lshr); + mOp = fastLshrMappingFunc(dsize); + } else { + CHECK_FATAL(false, "NIY, Not support shiftdirect case"); + } + RegOperand &firstOpnd = SelectCopy2Reg(opnd0, opnd0Type); + RegOperand &secondOpnd = SelectCopy2Reg(opnd1, opnd1Type); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + insn.AddOpndChain(resOpnd).AddOpndChain(firstOpnd).AddOpndChain(secondOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +void MPISel::SelectRegassign(RegassignNode &stmt, Operand &opnd0) +{ + PrimType rhsType = stmt.Opnd(0)->GetPrimType(); + PregIdx pregIdx = stmt.GetRegIdx(); + PrimType regType = stmt.GetPrimType(); + RegOperand ®Opnd = + cgFunc->GetOpndBuilder()->CreateVReg(cgFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), + GetPrimTypeBitSize(regType), cgFunc->GetRegTyFromPrimTy(regType)); + SelectCopy(regOpnd, opnd0, regType, rhsType); + if (stmt.GetPrimType() == PTY_ref) { + regOpnd.SetIsReference(true); + cgFunc->AddReferenceReg(regOpnd.GetRegisterNumber()); + } + if (pregIdx > 0) { + // special MIRPreg is not supported + cgFunc->SetPregIdx2Opnd(pregIdx, regOpnd); + } + const auto &derived2BaseRef = cgFunc->GetFunction().GetDerived2BaseRef(); + auto itr = derived2BaseRef.find(pregIdx); + if (itr != derived2BaseRef.end()) { + auto *opnd = cgFunc->GetOpndFromPregIdx(itr->first); + CHECK_FATAL(opnd != nullptr, "pregIdx has not been assigned Operand"); + auto &derivedRegOpnd = static_cast(*opnd); + opnd = cgFunc->GetOpndFromPregIdx(itr->second); + CHECK_FATAL(opnd != nullptr, "pregIdx has not been assigned Operand"); + auto &baseRegOpnd = static_cast(*opnd); + derivedRegOpnd.SetBaseRefOpnd(baseRegOpnd); + } + if ((Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) && (pregIdx >= 0)) { + const SymbolAlloc *symLoc = cgFunc->GetMemlayout()->GetSpillLocOfPseduoRegister(pregIdx); + int64 offset = static_cast(cgFunc->GetBaseOffset(*symLoc)); + MIRPreg *preg = cgFunc->GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + uint32 bitLen = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte; + RegOperand &base = GetTargetBasicPointer(PTY_u64); + MemOperand *dest = &cgFunc->GetOpndBuilder()->CreateMem(base, offset, bitLen); + SelectCopy(*dest, regOpnd, preg->GetPrimType(), regType); + } +} + +RegOperand *MPISel::SelectRegread(RegreadNode &expr) +{ + PregIdx pregIdx = expr.GetRegIdx(); + PrimType rhsType = expr.GetPrimType(); + if (pregIdx < 0) { + return &SelectSpecialRegread(pregIdx, rhsType); + } + + RegOperand ® = cgFunc->GetOpndBuilder()->CreateVReg(cgFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), + GetPrimTypeSize(rhsType) * kBitsPerByte, + cgFunc->GetRegTyFromPrimTy(rhsType)); + if (cgFunc->GetOpndFromPregIdx(pregIdx) == nullptr) { + cgFunc->SetPregIdx2Opnd(pregIdx, reg); + } + if (expr.GetPrimType() == maple::PTY_ref) { + reg.SetIsReference(true); + cgFunc->AddReferenceReg(reg.GetRegisterNumber()); + } + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + const SymbolAlloc *symLoc = cgFunc->GetMemlayout()->GetSpillLocOfPseduoRegister(pregIdx); + int64 offset = static_cast(cgFunc->GetBaseOffset(*symLoc)); + MIRPreg *preg = cgFunc->GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + uint32 bitLen = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte; + RegOperand &base = GetTargetBasicPointer(PTY_u64); + MemOperand *src = &cgFunc->GetOpndBuilder()->CreateMem(base, offset, bitLen); + SelectCopy(reg, *src, rhsType, preg->GetPrimType()); + } + return ® +} + +Operand *MPISel::SelectDread(const BaseNode &parent, const AddrofNode &expr) +{ + /* get mirSymbol info*/ + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); + MirTypeInfo symbolInfo = GetMirTypeInfoFromMirNode(expr); + PrimType symbolType = symbolInfo.primType; + /* Get symbol location */ + MemOperand &symbolMem = GetOrCreateMemOpndFromSymbol(*symbol, expr.GetFieldID()); + PrimType primType = expr.GetPrimType(); + if (primType == PTY_ref) { + cgFunc->AddReferenceStackSlot(symbolMem.GetOffsetImmediate()->GetOffsetValue()); + } + + /* for AggType, return it's location in stack. */ + if (symbolType == maple::PTY_agg) { + CHECK_FATAL(primType == maple::PTY_agg, "NIY"); + return &symbolMem; + } + /* for BasicType, load symbolVal to register. */ + RegOperand ®Opnd = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); + if (primType == PTY_ref) { + regOpnd.SetIsReference(true); + cgFunc->AddReferenceReg(regOpnd.GetRegisterNumber()); + } + /* Generate Insn */ + SelectCopy(regOpnd, symbolMem, primType, symbolType); + return ®Opnd; +} + +Operand *MPISel::SelectAdd(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType primType = node.GetPrimType(); + RegOperand &resReg = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectAdd(resReg, regOpnd0, regOpnd1, primType); + return &resReg; +} + +Operand *MPISel::SelectBand(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType primType = node.GetPrimType(); + RegOperand &resReg = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectBand(resReg, regOpnd0, regOpnd1, primType); + return &resReg; +} + +Operand *MPISel::SelectSub(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType primType = node.GetPrimType(); + RegOperand &resOpnd = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectSub(resOpnd, regOpnd0, regOpnd1, primType); + return &resOpnd; +} + +void MPISel::SelectExtractbits(RegOperand &resOpnd, RegOperand &opnd0, uint8 bitOffset, uint8 bitSize, + PrimType primType) +{ + uint32 primBitSize = GetPrimTypeBitSize(primType); + bool isSigned = IsSignedInteger(primType); + if (bitOffset == 0 && !isSigned) { + /* + * resOpnd = opnd0 & ((1 << bitSize) - 1) + */ + ImmOperand &imm = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, (static_cast(1) << bitSize) - 1); + SelectBand(resOpnd, opnd0, imm, primType); + } else { + /* + * tmpOpnd = opnd0 << (primBitSize - bitSize - bitOffset) + * resOpnd = tmpOpnd >> (primBitSize - bitSize) + * if signed : use sar; else use shr + */ + RegOperand &tmpOpnd = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); + ImmOperand &imm1Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, primBitSize - bitSize - bitOffset); + SelectShift(tmpOpnd, opnd0, imm1Opnd, OP_shl, primType, primType); + Opcode opcode = isSigned ? OP_ashr : OP_lshr; + ImmOperand &imm2Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, primBitSize - bitSize); + SelectShift(resOpnd, tmpOpnd, imm2Opnd, opcode, primType, primType); + } +} + +Operand *MPISel::SelectExtractbits(const BaseNode &parent, const ExtractbitsNode &node, Operand &opnd0) +{ + PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType toType = node.GetPrimType(); + uint8 bitSize = node.GetBitsSize(); + RegOperand &resOpnd = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); + if (IsPrimitiveInteger(toType)) { + // OP_extractbits or bitSize < 8-bit or bitSize is not pow of 2 + if (node.GetOpCode() == OP_extractbits || bitSize < k8BitSize || (bitSize & (bitSize - 1)) != 0) { + SelectCopy(resOpnd, opnd0, toType, fromType); + SelectExtractbits(resOpnd, resOpnd, node.GetBitsOffset(), bitSize, toType); + } else { + PrimType opndType = GetIntegerPrimTypeFromSize(node.GetOpCode() == OP_sext, bitSize); + RegOperand &tmpRegOpnd = SelectCopy2Reg(opnd0, opndType, fromType); + SelectIntCvt(resOpnd, tmpRegOpnd, toType, opndType); + } + } else { + CHECK_FATAL(false, "NIY vector cvt"); + } + return &resOpnd; +} + +Operand *MPISel::SelectCvt(const BaseNode &parent, const TypeCvtNode &node, Operand &opnd0) +{ + PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType toType = node.GetPrimType(); + if (fromType == toType) { + return &opnd0; + } + RegOperand *resOpnd = + &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); + if (IsPrimitiveInteger(toType) && IsPrimitiveInteger(fromType)) { + SelectIntCvt(*resOpnd, opnd0, toType, fromType); + } else if (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType)) { + SelectCvtInt2Float(*resOpnd, opnd0, toType, fromType); + } else if (IsPrimitiveFloat(toType) && IsPrimitiveFloat(fromType)) { + SelectFloatCvt(*resOpnd, opnd0, toType, fromType); + } else if (IsPrimitiveInteger(toType) && IsPrimitiveFloat(fromType)) { + SelectCvtFloat2Int(*resOpnd, opnd0, toType, fromType); + } else { + CHECK_FATAL(false, "NIY cvt"); + } + return resOpnd; +} + +void MPISel::SelectCvtFloat2Int(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) +{ + uint32 toSize = GetPrimTypeBitSize(toType); + bool isSigned = !IsPrimitiveUnsigned(toType); + PrimType newToType = toType; + // cvt f64/32 -> u16 / u8 -> cvt f u32 + cvt u32 -> u8 + if (toSize < k32BitSize) { + newToType = isSigned ? PTY_i32 : PTY_u32; + } + uint32 newToSize = GetPrimTypeBitSize(newToType); + RegOperand &tmpFloatOpnd = cgFunc->GetOpndBuilder()->CreateVReg(newToSize, kRegTyFloat); + SelectFloatCvt(tmpFloatOpnd, opnd0, newToType, fromType); + MOperator mOp = abstract::MOP_undef; + if (newToSize == k32BitSize) { + mOp = isSigned ? abstract::MOP_cvt_rf_i32 : abstract::MOP_cvt_rf_u32; + } else if (newToSize == k64BitSize) { + mOp = isSigned ? abstract::MOP_cvt_rf_i64 : abstract::MOP_cvt_rf_u64; + } else { + CHECK_FATAL(false, "niy"); + } + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + if (toSize == newToSize) { + (void)insn.AddOpndChain(resOpnd).AddOpndChain(tmpFloatOpnd); + } else if (toSize < newToSize) { + RegOperand &tmpIntOpnd = cgFunc->GetOpndBuilder()->CreateVReg(newToSize, kRegTyFloat); + (void)insn.AddOpndChain(tmpIntOpnd).AddOpndChain(tmpFloatOpnd); + SelectIntCvt(resOpnd, tmpIntOpnd, toType, newToType); + } + cgFunc->GetCurBB()->AppendInsn(insn); +} + +void MPISel::SelectCvtInt2Float(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) +{ + uint32 fromSize = GetPrimTypeBitSize(fromType); + bool isSigned = !IsPrimitiveUnsigned(fromType); + MOperator mOp = abstract::MOP_undef; + PrimType newFromType = PTY_begin; + if (fromSize == k32BitSize) { + mOp = isSigned ? abstract::MOP_cvt_fr_i32 : abstract::MOP_cvt_fr_u32; + newFromType = PTY_f32; + } else if (fromSize == k64BitSize) { + mOp = isSigned ? abstract::MOP_cvt_fr_i64 : abstract::MOP_cvt_fr_u64; + newFromType = PTY_f64; + } else { + CHECK_FATAL(false, "niy"); + } + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); + RegOperand &tmpFloatOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(newFromType), + cgFunc->GetRegTyFromPrimTy(newFromType)); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(tmpFloatOpnd).AddOpndChain(regOpnd0); + cgFunc->GetCurBB()->AppendInsn(insn); + SelectFloatCvt(resOpnd, tmpFloatOpnd, toType, newFromType); +} + +void MPISel::SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) +{ + uint32 fromSize = GetPrimTypeBitSize(fromType); + uint32 toSize = GetPrimTypeBitSize(toType); + /* + * It is redundancy to insert "nop" casts (unsigned 32 -> singed 32) in abstract CG IR + * The signedness of operands would be shown in the expression. + */ + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); + if (toSize <= fromSize) { + resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(regOpnd0.GetRegisterNumber(), GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + return; + } + bool isSigned = !IsPrimitiveUnsigned(fromType); + MOperator mOp = GetFastCvtMopI(fromSize, toSize, isSigned); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(regOpnd0); + cgFunc->GetCurBB()->AppendInsn(insn); + return; +} + +void MPISel::SelectFloatCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) +{ + uint32 fromSize = GetPrimTypeBitSize(fromType); + uint32 toSize = GetPrimTypeBitSize(toType); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); + if (fromSize == toSize) { + resOpnd = regOpnd0; + return; + } + MOperator mOp = abstract::MOP_undef; + if (fromSize == k32BitSize && toSize == k64BitSize) { + mOp = abstract::MOP_cvt_ff_64_32; + } else if (fromSize == k64BitSize && toSize == k32BitSize) { + mOp = abstract::MOP_cvt_ff_32_64; + } else { + CHECK_FATAL(false, "niy"); + } + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(regOpnd0); + cgFunc->GetCurBB()->AppendInsn(insn); + return; +} + +void MPISel::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + MOperator mOp = abstract::MOP_undef; + if (IsPrimitiveInteger(primType)) { + const static auto fastSubMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(sub); + mOp = fastSubMappingFunc(GetPrimTypeBitSize(primType)); + } else { + const static auto fastSubFloatMappingFunc = DEF_FLOAT_MOPERATOR_MAPPING_FUNC(sub); + mOp = fastSubFloatMappingFunc(GetPrimTypeBitSize(primType)); + } + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +void MPISel::SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + MOperator mOp = abstract::MOP_undef; + if (IsPrimitiveInteger(primType)) { + const static auto fastAndMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(and); + mOp = fastAndMappingFunc(GetPrimTypeBitSize(primType)); + } else { + const static auto fastAndFloatMappingFunc = DEF_FLOAT_MOPERATOR_MAPPING_FUNC(and); + mOp = fastAndFloatMappingFunc(GetPrimTypeBitSize(primType)); + } + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +void MPISel::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + MOperator mOp = abstract::MOP_undef; + if (IsPrimitiveInteger(primType)) { + const static auto fastAddMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(add); + mOp = fastAddMappingFunc(GetPrimTypeBitSize(primType)); + } else { + const static auto fastAddFloatMappingFunc = DEF_FLOAT_MOPERATOR_MAPPING_FUNC(add); + mOp = fastAddFloatMappingFunc(GetPrimTypeBitSize(primType)); + } + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +Operand *MPISel::SelectNeg(const UnaryNode &node, Operand &opnd0, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType()); + SelectNeg(*resOpnd, regOpnd0, dtype); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +void MPISel::SelectNeg(Operand &resOpnd, Operand &opnd0, PrimType primType) +{ + MOperator mOp = abstract::MOP_undef; + if (IsPrimitiveInteger(primType)) { + const static auto fastNegMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(neg); + mOp = fastNegMappingFunc(GetPrimTypeBitSize(primType)); + } else { + const static auto fastNegFloatMappingFunc = DEF_FLOAT_MOPERATOR_MAPPING_FUNC(neg); + mOp = fastNegFloatMappingFunc(GetPrimTypeBitSize(primType)); + } + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +Operand *MPISel::SelectBior(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType primType = node.GetPrimType(); + RegOperand *resOpnd = + &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectBior(*resOpnd, regOpnd0, regOpnd1, primType); + return resOpnd; +} + +void MPISel::SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + const static auto fastBiorMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(or); + MOperator mOp = fastBiorMappingFunc(GetPrimTypeBitSize(primType)); + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +Operand *MPISel::SelectBxor(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType primType = node.GetPrimType(); + RegOperand *resOpnd = + &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectBxor(*resOpnd, regOpnd0, regOpnd1, primType); + return resOpnd; +} + +void MPISel::SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + const static auto fastBxorMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(xor); + MOperator mOp = fastBxorMappingFunc(GetPrimTypeBitSize(primType)); + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +MemOperand *MPISel::GetOrCreateMemOpndFromIreadNode(const IreadNode &expr, PrimType primType, int offset) +{ + /* get rhs*/ + Operand *addrOpnd = HandleExpr(expr, *expr.Opnd(0)); + RegOperand &addrOnReg = SelectCopy2Reg(*addrOpnd, PTY_a64); + /* Generate memOpnd */ + MemOperand &memOpnd = cgFunc->GetOpndBuilder()->CreateMem(addrOnReg, offset, GetPrimTypeBitSize(primType)); + return &memOpnd; +} + +Operand *MPISel::SelectIread(const BaseNode &parent, const IreadNode &expr, int extraOffset) +{ + /* get lhs mirType info */ + MirTypeInfo lhsInfo = GetMirTypeInfoFromMirNode(expr); + /* get memOpnd */ + MemOperand &memOpnd = *GetOrCreateMemOpndFromIreadNode(expr, lhsInfo.primType, lhsInfo.offset + extraOffset); + /* for AggType, return addr it self. */ + if (lhsInfo.primType == PTY_agg) { + return &memOpnd; + } + /* for BasicType, load val in addr to register. */ + PrimType primType = expr.GetPrimType(); + RegOperand &result = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(result, memOpnd, primType, lhsInfo.primType); + return &result; +} + +Operand *MPISel::SelectIreadoff(const BaseNode &parent, const IreadoffNode &ireadoff) +{ + int32 offset = ireadoff.GetOffset(); + PrimType primType = ireadoff.GetPrimType(); + uint32 bitSize = GetPrimTypeBitSize(primType); + + Operand *addrOpnd = HandleExpr(ireadoff, *ireadoff.Opnd(0)); + RegOperand &addrOnReg = SelectCopy2Reg(*addrOpnd, PTY_a64); + MemOperand &memOpnd = cgFunc->GetOpndBuilder()->CreateMem(addrOnReg, offset, bitSize); + RegOperand &result = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(result, memOpnd, primType); + return &result; +} + +static inline uint64 CreateDepositBitsImm1(uint32 primBitSize, uint8 bitOffset, uint8 bitSize) +{ + /* $imm1 = 1(primBitSize - bitSize - bitOffset)0(bitSize)1(bitOffset) */ + uint64 val = UINT64_MAX; // 0xFFFFFFFFFFFFFFFF + if (bitSize + bitOffset >= primBitSize) { + val = 0; + } else { + val <<= (bitSize + bitOffset); + } + val |= (static_cast(1) << bitOffset) - 1; + return val; +} + +Operand *MPISel::SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + uint8 bitOffset = node.GetBitsOffset(); + uint8 bitSize = node.GetBitsSize(); + PrimType primType = node.GetPrimType(); + uint32 primBitSize = GetPrimTypeBitSize(primType); + DEBUG_ASSERT((primBitSize == k64BitSize) || (bitOffset < k32BitSize), "wrong bitSize"); + DEBUG_ASSERT(bitSize < k64BitSize, "wrong bitSize"); + + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(primBitSize, cgFunc->GetRegTyFromPrimTy(primType)); + /* + * resOpnd = (opnd0 and $imm1) or ((opnd1 << bitOffset) and (~$imm1)); + * $imm1 = 1(primBitSize - bitSize - bitOffset)0(bitSize)1(bitOffset) + */ + uint64 imm1Val = CreateDepositBitsImm1(primBitSize, bitOffset, bitSize); + ImmOperand &imm1Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, static_cast(imm1Val)); + /* and */ + SelectBand(resOpnd, opnd0, imm1Opnd, primType); + if (opnd1.IsIntImmediate()) { + /* opnd1 is immediate, imm2 = (opnd1.val << bitOffset) & (~$imm1) */ + int64 imm2Val = (static_cast(opnd1).GetValue() << bitOffset) & (~imm1Val); + ImmOperand &imm2Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, imm2Val); + /* or */ + SelectBior(resOpnd, resOpnd, imm2Opnd, primType); + } else { + RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(primBitSize, cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(tmpOpnd, opnd1, primType, node.Opnd(1)->GetPrimType()); + /* shift -- (opnd1 << bitOffset) */ + ImmOperand &countOpnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, bitOffset); + SelectShift(tmpOpnd, tmpOpnd, countOpnd, OP_shl, primType, primType); + /* and (~$imm1) */ + ImmOperand &nonImm1Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, (~imm1Val)); + SelectBand(tmpOpnd, tmpOpnd, nonImm1Opnd, primType); + /* or */ + SelectBior(resOpnd, resOpnd, tmpOpnd, primType); + } + return &resOpnd; +} + +Operand *MPISel::SelectAbs(UnaryNode &node, Operand &opnd0) +{ + PrimType primType = node.GetPrimType(); + if (IsPrimitiveVector(primType)) { + CHECK_FATAL(false, "NIY"); + } else if (IsPrimitiveFloat(primType)) { + /* + * fabs(x) = x AND 0x7fffffff ffffffff [set sign bit to 0] + */ + const static uint64 kNaN = 0x7fffffffffffffffUL; + const static double kNaNDouble = *(double*)(&kNaN); + const static uint64 kNaNf = 0x7fffffffUL; + const static double kNaNFloat = *(double*)(&kNaNf); + CHECK_FATAL(primType == PTY_f64 || primType == PTY_f32, "niy"); + + double mask = primType == PTY_f64 ? kNaNDouble : kNaNFloat; + MIRDoubleConst *c = cgFunc->GetMemoryPool()->New(mask, + *GlobalTables::GetTypeTable().GetTypeTable().at(PTY_f64)); + Operand *opnd1 = SelectFloatingConst(*c, PTY_f64); + + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + SelectBand(resOpnd, opnd0, *opnd1, primType); + return &resOpnd; + } else if (IsUnsignedInteger(primType)) { + return &opnd0; + } else { + /* + * abs(x) = (x XOR y) - y + * y = x >>> (bitSize - 1) + */ + uint32 bitSize = GetPrimTypeBitSize(primType); + CHECK_FATAL(bitSize == k64BitSize || bitSize == k32BitSize, "only support 32-bits or 64-bits"); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType); + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(bitSize, bitSize - 1); + RegOperand ®Opndy = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(primType)); + SelectShift(regOpndy, regOpnd0, immOpnd, OP_ashr, primType, primType); + RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(primType)); + SelectBxor(tmpOpnd, regOpnd0, regOpndy, primType); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(primType)); + SelectSub(resOpnd, tmpOpnd, regOpndy, primType); + return &resOpnd; + } +} + +Operand *MPISel::SelectAlloca(UnaryNode &node, Operand &opnd0) +{ + DEBUG_ASSERT(node.GetPrimType() == PTY_a64, "wrong type"); + PrimType srcType = node.Opnd(0)->GetPrimType(); + RegOperand &sizeOpnd = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(PTY_u64), cgFunc->GetRegTyFromPrimTy(PTY_u64)); + SelectCopy(sizeOpnd, opnd0, PTY_u64, srcType); + + /* stack byte alignment */ + uint32 stackPtrAlignment = cgFunc->GetMemlayout()->GetStackPtrAlignment(); + RegOperand &aliOp = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(PTY_u64), cgFunc->GetRegTyFromPrimTy(PTY_u64)); + SelectAdd(aliOp, sizeOpnd, cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stackPtrAlignment - 1), PTY_u64); + ImmOperand &shiftOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, __builtin_ctz(stackPtrAlignment)); + SelectShift(aliOp, aliOp, shiftOpnd, OP_lshr, PTY_u64, PTY_u64); + SelectShift(aliOp, aliOp, shiftOpnd, OP_shl, PTY_u64, PTY_u64); + + RegOperand &spOpnd = GetTargetStackPointer(PTY_u64); + SelectSub(spOpnd, spOpnd, aliOp, PTY_u64); + + RegOperand &resOpnd = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(PTY_u64), cgFunc->GetRegTyFromPrimTy(PTY_u64)); + uint32 argsToStkpassSize = cgFunc->GetMemlayout()->SizeOfArgsToStackPass(); + if (argsToStkpassSize > 0) { + SelectAdd(resOpnd, spOpnd, cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, argsToStkpassSize), PTY_u64); + } else { + SelectCopy(resOpnd, spOpnd, PTY_u64); + } + return &resOpnd; +} + +Operand *MPISel::SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) +{ + BaseNode *opnd0 = node.Opnd(0); + BaseNode *opnd1 = node.Opnd(1); + DEBUG_ASSERT(opnd1->GetOpCode() == OP_constval, "NIY, opnd1->op should be OP_constval."); + + switch (opnd0->GetOpCode()) { + case OP_regread: { + return SelectRegread(static_cast(*opnd0)); + } + case OP_addrof: { + Operand *addrOpnd = SelectAddrof(static_cast(*opnd0), node); + + /* OP_constval */ + ConstvalNode *constvalNode = static_cast(opnd1); + MIRConst *mirConst = constvalNode->GetConstVal(); + DEBUG_ASSERT(mirConst->GetKind() == kConstInt, "NIY"); + MIRIntConst *mirIntConst = static_cast(mirConst); + Operand *immOpnd = SelectIntConst(*mirIntConst, constvalNode->GetPrimType()); + + Operand &resOpnd = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(PTY_a64), cgFunc->GetRegTyFromPrimTy(PTY_a64)); + SelectAdd(resOpnd, *addrOpnd, *immOpnd, node.GetPrimType()); + return &resOpnd; + } + default: + CHECK_FATAL(false, "cannot handle opnd0."); + } +} + +StmtNode *MPISel::HandleFuncEntry() +{ + MIRFunction &mirFunc = cgFunc->GetFunction(); + BlockNode *block = mirFunc.GetBody(); + + DEBUG_ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + + StmtNode *stmt = block->GetFirst(); + if (stmt == nullptr) { + return nullptr; + } + DEBUG_ASSERT(stmt->GetOpCode() == OP_label, "The first statement should be a label"); + HandleLabel(*stmt, *this); + cgFunc->SetFirstBB(*cgFunc->GetCurBB()); + stmt = stmt->GetNext(); + if (stmt == nullptr) { + return nullptr; + } + cgFunc->SetCurBB(*cgFunc->StartNewBBImpl(false, *stmt)); + bool withFreqInfo = mirFunc.HasFreqMap() && !mirFunc.GetLastFreqMap().empty(); + if (withFreqInfo) { + cgFunc->GetCurBB()->SetFrequency(kFreqBase); + } + + return stmt; +} + +/* This function loads src to a register, the src can be an imm, mem or a label. + * Once the source and result(destination) types are different, + * implicit conversion is executed here.*/ +RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType) +{ + uint32 fromSize = GetPrimTypeBitSize(fromType); + uint32 toSize = GetPrimTypeBitSize(toType); + if (src.IsRegister() && fromSize == toSize) { + return static_cast(src); + } + RegOperand &dest = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); + if (fromSize != toSize) { + SelectCopy(dest, src, toType, fromType); + } else { + SelectCopy(dest, src, toType); + } + return dest; +} +/* Pretty sure that implicit type conversions will not occur. */ +RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType dtype) +{ + DEBUG_ASSERT(src.GetSize() == GetPrimTypeBitSize(dtype), "NIY"); + if (src.IsRegister()) { + return static_cast(src); + } + RegOperand &dest = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype)); + SelectCopy(dest, src, dtype); + return dest; +} +/* This function copy/load/store src to a dest, Once the src and dest types + * are different, implicit conversion is executed here. */ +void MPISel::SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType) +{ + if (GetPrimTypeBitSize(fromType) != GetPrimTypeBitSize(toType)) { + RegOperand &srcRegOpnd = SelectCopy2Reg(src, fromType); + RegOperand &dstRegOpnd = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); + SelectIntCvt(dstRegOpnd, srcRegOpnd, toType, fromType); + SelectCopy(dest, dstRegOpnd, toType); + } else { + SelectCopy(dest, src, toType); + } +} + +/* Pretty sure that implicit type conversions will not occur. */ +void MPISel::SelectCopy(Operand &dest, Operand &src, PrimType type) +{ + DEBUG_ASSERT(dest.GetSize() == src.GetSize(), "NIY"); + if (dest.GetKind() == Operand::kOpdRegister) { + SelectCopyInsn(dest, src, type); + } else if (dest.GetKind() == Operand::kOpdMem) { + if (src.GetKind() != Operand::kOpdRegister) { + RegOperand &tempReg = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(type), cgFunc->GetRegTyFromPrimTy(type)); + SelectCopyInsn(tempReg, src, type); + SelectCopyInsn(dest, tempReg, type); + } else { + SelectCopyInsn(dest, src, type); + } + } else { + CHECK_FATAL(false, "NIY, CPU supports more than memory and registers"); + } + return; +} + +void MPISel::SelectCopyInsn(Operand &dest, Operand &src, PrimType type) +{ + MOperator mop = GetFastIselMop(dest.GetKind(), src.GetKind(), type); + CHECK_FATAL(mop != abstract::MOP_undef, "get mop failed"); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mop, InsnDesc::GetAbstractId(mop)); + (void)insn.AddOpndChain(dest).AddOpndChain(src); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +Operand *MPISel::SelectBnot(const UnaryNode &node, Operand &opnd0, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType()); + SelectBnot(*resOpnd, regOpnd0, dtype); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +void MPISel::SelectBnot(Operand &resOpnd, Operand &opnd0, PrimType primType) +{ + const static auto fastBnotMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(not); + MOperator mOp = fastBnotMappingFunc(GetPrimTypeBitSize(primType)); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +Operand *MPISel::SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType primType = node.GetPrimType(); + RegOperand &resOpnd = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); + SelectMin(resOpnd, opnd0, opnd1, primType); + return &resOpnd; +} + +void MPISel::SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + SelectMinOrMax(true, resOpnd, opnd0, opnd1, primType); +} + +Operand *MPISel::SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType primType = node.GetPrimType(); + RegOperand &resOpnd = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); + SelectMax(resOpnd, opnd0, opnd1, primType); + return &resOpnd; +} + +void MPISel::SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + SelectMinOrMax(false, resOpnd, opnd0, opnd1, primType); +} + +Operand *MPISel::SelectRetype(TypeCvtNode &node, Operand &opnd0) +{ + PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType toType = node.GetPrimType(); + DEBUG_ASSERT(GetPrimTypeSize(fromType) == GetPrimTypeSize(toType), "retype bit widith doesn' match"); + if (IsPrimitivePoint(fromType) && IsPrimitivePoint(toType)) { + return &SelectCopy2Reg(opnd0, toType); + } + if (IsPrimitiveVector(fromType) || IsPrimitiveVector(toType)) { + return &SelectCopy2Reg(opnd0, toType); + } + if (IsPrimitiveInteger(fromType) && IsPrimitiveInteger(toType)) { + return &SelectCopy2Reg(opnd0, toType, fromType); + } + if (IsPrimitiveInteger(fromType) && IsPrimitiveFloat(toType)) { + RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + SelectRetypeFloat(*resOpnd, opnd0, toType, fromType); + return &(*resOpnd); + } + if (IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) { + RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + SelectRetypeFloat(*resOpnd, opnd0, toType, fromType); + return &(*resOpnd); + } + CHECK_FATAL(false, "NIY, retype"); + return nullptr; +} + +void MPISel::HandleFuncExit() +{ + BlockNode *block = cgFunc->GetFunction().GetBody(); + DEBUG_ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); + /* Set lastbb's frequency */ + cgFunc->SetLastBB(*cgFunc->GetCurBB()); + /* the last BB is return BB */ + cgFunc->GetLastBB()->SetKind(BB::kBBReturn); + cgFunc->SetCleanupBB(*cgFunc->GetCurBB()->GetPrev()); +} + +bool InstructionSelector::PhaseRun(maplebe::CGFunc &f) +{ + MPISel *mpIS = f.GetCG()->CreateMPIsel(*GetPhaseMemPool(), *GetPhaseAllocator(), f); + mpIS->doMPIS(); + Standardize *stdz = f.GetCG()->CreateStandardize(*GetPhaseMemPool(), f); + stdz->DoStandardize(); + return true; +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/label_creation.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/label_creation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..29ccdd55971d18a34f8e52158c362ea0acfea428 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/label_creation.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "label_creation.h" +#include "cgfunc.h" +#include "cg.h" +#include "debug_info.h" + +namespace maplebe { +using namespace maple; + +void LabelCreation::Run() +{ + CreateStartEndLabel(); +} + +void LabelCreation::CreateStartEndLabel() +{ + DEBUG_ASSERT(cgFunc != nullptr, "expect a cgfunc before CreateStartEndLabel"); + LabelIdx startLblIdx = cgFunc->CreateLabel(); + MIRBuilder *mirBuilder = cgFunc->GetFunction().GetModule()->GetMIRBuilder(); + DEBUG_ASSERT(mirBuilder != nullptr, "get mirbuilder failed in CreateStartEndLabel"); + LabelNode *startLabel = mirBuilder->CreateStmtLabel(startLblIdx); + cgFunc->SetStartLabel(*startLabel); + cgFunc->GetFunction().GetBody()->InsertFirst(startLabel); + LabelIdx endLblIdx = cgFunc->CreateLabel(); + LabelNode *endLabel = mirBuilder->CreateStmtLabel(endLblIdx); + cgFunc->SetEndLabel(*endLabel); + cgFunc->GetFunction().GetBody()->InsertLast(endLabel); + DEBUG_ASSERT(cgFunc->GetFunction().GetBody()->GetLast() == endLabel, "last stmt must be a endLabel"); + MIRFunction *func = &cgFunc->GetFunction(); + CG *cg = cgFunc->GetCG(); + if (cg->GetCGOptions().WithDwarf()) { + DebugInfo *di = cg->GetMIRModule()->GetDbgInfo(); + DBGDie *fdie = di->GetDie(func); + fdie->SetAttr(DW_AT_low_pc, startLblIdx); + fdie->SetAttr(DW_AT_high_pc, endLblIdx); + } + /* add start/end labels into the static map table in class cg */ + if (!CG::IsInFuncWrapLabels(func)) { + CG::SetFuncWrapLabels(func, std::make_pair(startLblIdx, endLblIdx)); + } +} + +bool CgCreateLabel::PhaseRun(maplebe::CGFunc &f) +{ + MemPool *memPool = GetPhaseMemPool(); + LabelCreation *labelCreate = memPool->New(f); + labelCreate->Run(); + return false; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/live.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/live.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3b69397f57c013618935ce053613cad84336253f --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/live.cpp @@ -0,0 +1,487 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "live.h" +#include +#include "cg.h" +#include "cg_option.h" +#include "cgfunc.h" + +/* + * This phase build two sets: liveOutRegno and liveInRegno of each BB. + * This algorithm mainly include 3 parts: + * 1. initialize and get def[]/use[] of each BB; + * 2. build live_in and live_out based on this algorithm + * Out[B] = U In[S] //S means B's successor; + * In[B] = use[B] U (Out[B]-def[B]); + * 3. deal with cleanup BB. + */ +namespace maplebe { +#define LIVE_ANALYZE_DUMP_NEWPM CG_DEBUG_FUNC(f) + +void LiveAnalysis::InitAndGetDefUse() +{ + FOR_ALL_BB(bb, cgFunc) { + if (!bb->GetEhPreds().empty()) { + InitEhDefine(*bb); + } + InitBB(*bb); + GetBBDefUse(*bb); + if (bb->GetEhPreds().empty()) { + continue; + } + bb->RemoveInsn(*bb->GetFirstInsn()->GetNext()); + cgFunc->DecTotalNumberOfInstructions(); + bb->RemoveInsn(*bb->GetFirstInsn()); + cgFunc->DecTotalNumberOfInstructions(); + } +} + +/* Out[BB] = Union all of In[Succs(BB)] */ +bool LiveAnalysis::GenerateLiveOut(BB &bb) +{ + const MapleSet bbLiveOutBak(bb.GetLiveOut()->GetInfo()); + for (auto succBB : bb.GetSuccs()) { + if (succBB->GetLiveInChange() && !succBB->GetLiveIn()->NoneBit()) { + bb.LiveOutOrBits(*succBB->GetLiveIn()); + } + if (!succBB->GetEhSuccs().empty()) { + for (auto ehSuccBB : succBB->GetEhSuccs()) { + bb.LiveOutOrBits(*ehSuccBB->GetLiveIn()); + } + } + } + for (auto ehSuccBB : bb.GetEhSuccs()) { + if (ehSuccBB->GetLiveInChange() && !ehSuccBB->GetLiveIn()->NoneBit()) { + bb.LiveOutOrBits(*ehSuccBB->GetLiveIn()); + } + } + return !bb.GetLiveOut()->IsEqual(bbLiveOutBak); +} + +/* In[BB] = use[BB] Union (Out[BB]-def[BB]) */ +bool LiveAnalysis::GenerateLiveIn(BB &bb) +{ + LocalMapleAllocator allocator(stackMp); + const MapleSet bbLiveInBak(bb.GetLiveIn()->GetInfo()); + if (!bb.GetInsertUse()) { + bb.SetLiveInInfo(*bb.GetUse()); + bb.SetInsertUse(true); + } + SparseDataInfo &bbLiveOut = bb.GetLiveOut()->Clone(allocator); + if (!bbLiveOut.NoneBit()) { + bbLiveOut.Difference(*bb.GetDef()); + bb.LiveInOrBits(bbLiveOut); + } + + if (!bb.GetLiveIn()->IsEqual(bbLiveInBak)) { + return true; + } + return false; +} + +SparseDataInfo *LiveAnalysis::GenerateLiveInByDefUse(SparseDataInfo &liveOut, SparseDataInfo &use, SparseDataInfo &def, + const MapleList &ehSuccs) +{ + const uint32 maxRegCount = + cgFunc->GetSSAvRegCount() > cgFunc->GetMaxVReg() ? cgFunc->GetSSAvRegCount() : cgFunc->GetMaxVReg(); + SparseDataInfo *liveIn = memPool->New(maxRegCount, alloc); + liveIn = &use; + SparseDataInfo *tmpLiveOut = memPool->New(liveOut, alloc); + if (!liveOut.NoneBit()) { + tmpLiveOut->Difference(def); + liveIn->OrBits(*tmpLiveOut); + } + if (!ehSuccs.empty()) { + /* if bb has eh successors, check if multi-gen exists. */ + SparseDataInfo allInOfEhSuccs(maxRegCount, alloc); + for (auto ehSucc : ehSuccs) { + allInOfEhSuccs.OrBits(*ehSucc->GetLiveIn()); + } + allInOfEhSuccs.AndBits(def); + liveIn->OrBits(allInOfEhSuccs); + } + return liveIn; +} + +void LiveAnalysis::GenerateStackMapLiveIn() +{ + const auto &stackMapInsns = cgFunc->GetStackMapInsns(); + for (auto *insn : stackMapInsns) { + BB *curBB = insn->GetBB(); + SparseDataInfo *liveIn = GenerateLiveInByDefUse(*curBB->GetLiveOut(), *insn->GetStackMapUse(), + *insn->GetStackMapDef(), curBB->GetEhSuccs()); + insn->SetStackMapLiveIn(*liveIn); + } +} +/* building liveIn and liveOut of each BB. */ +void LiveAnalysis::BuildInOutforFunc() +{ + iteration = 0; + bool hasChange; + do { + ++iteration; + hasChange = false; + FOR_ALL_BB_REV(bb, cgFunc) { + if (!GenerateLiveOut(*bb) && bb->GetInsertUse()) { + continue; + } + if (GenerateLiveIn(*bb)) { + bb->SetLiveInChange(true); + hasChange = true; + } else { + bb->SetLiveInChange(false); + } + } + } while (hasChange); +} + +/* reset to liveout/in_regno */ +void LiveAnalysis::ResetLiveSet() +{ + FOR_ALL_BB(bb, cgFunc) { + bb->GetLiveIn()->GetBitsOfInfo>(bb->GetLiveInRegNO()); + bb->GetLiveOut()->GetBitsOfInfo>(bb->GetLiveOutRegNO()); + } +} + +/* entry function for LiveAnalysis */ +void LiveAnalysis::AnalysisLive() +{ + InitAndGetDefUse(); + BuildInOutforFunc(); + InsertInOutOfCleanupBB(); + GenerateStackMapLiveIn(); +} + +void LiveAnalysis::DealWithInOutOfCleanupBB() +{ + const BB *cleanupBB = cgFunc->GetCleanupEntryBB(); + if (cleanupBB == nullptr) { + return; + } + for (size_t i = 0; i != cleanupBB->GetLiveIn()->Size(); ++i) { + if (!cleanupBB->GetLiveIn()->TestBit(i)) { + continue; + } + if (CleanupBBIgnoreReg(regno_t(i))) { + continue; + } + /* + * a param vreg may used in cleanup bb. So this param vreg will live on the whole function + * since everywhere in function body may occur exceptions. + */ + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsCleanup()) { + continue; + } + /* If bb is not a cleanup bb, then insert reg to both livein and liveout. */ + if ((bb != cgFunc->GetFirstBB()) && !bb->GetDef()->TestBit(i)) { + bb->SetLiveInBit(i); + } + bb->SetLiveOutBit(i); + } + } +} + +void LiveAnalysis::InsertInOutOfCleanupBB() +{ + const BB *cleanupBB = cgFunc->GetCleanupEntryBB(); + if (cleanupBB == nullptr) { + return; + } + if (cleanupBB->GetLiveIn() == nullptr || cleanupBB->GetLiveIn()->NoneBit()) { + return; + } + SparseDataInfo cleanupBBLi = *(cleanupBB->GetLiveIn()); + /* registers need to be ignored: (reg < 8) || (29 <= reg && reg <= 32) */ + for (uint32 i = 1; i < 8; ++i) { + cleanupBBLi.ResetBit(i); + } + for (uint32 j = 29; j <= 32; ++j) { + cleanupBBLi.ResetBit(j); + } + + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsCleanup()) { + continue; + } + if (bb != cgFunc->GetFirstBB()) { + cleanupBBLi.Difference(*bb->GetDef()); + bb->LiveInOrBits(cleanupBBLi); + } + bb->LiveOutOrBits(cleanupBBLi); + } +} + +void LiveAnalysis::MarkStackMapInsn(Insn &insn, BB &bb) +{ + insn.SetStackMapDef(*NewDef(*bb.GetDef())); + insn.SetStackMapUse(*NewUse(*bb.GetUse())); +} + +/* + * entry of get def/use of bb. + * getting the def or use info of each regopnd as parameters of CollectLiveInfo(). + */ +void LiveAnalysis::GetBBDefUse(BB &bb) +{ + if (bb.GetKind() == BB::kBBReturn) { + GenerateReturnBBDefUse(bb); + } + if (bb.IsEmpty()) { + return; + } + + FOR_BB_INSNS_REV(insn, &bb) + { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->IsCall()) { + MarkStackMapInsn(*insn, bb); + } + + bool isAsm = insn->IsAsmInsn(); + const InsnDesc *md = insn->GetDesc(); + if (insn->IsCall() || insn->IsTailCall()) { + ProcessCallInsnParam(bb, *insn); + } + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *opndDesc = md->GetOpndDes(i); + DEBUG_ASSERT(opndDesc != nullptr, "null ptr check"); + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + if (isAsm) { + ProcessAsmListOpnd(bb, opnd, i); + } else { + ProcessListOpnd(bb, opnd, opndDesc->IsDef()); + } + } else if (opnd.IsMemoryAccessOperand()) { + ProcessMemOpnd(bb, opnd); + } else if (opnd.IsConditionCode()) { + ProcessCondOpnd(bb); + } else { + bool isDef = opndDesc->IsRegDef(); + bool isUse = opndDesc->IsRegUse(); + CollectLiveInfo(bb, opnd, isDef, isUse); + } + } + } +} + +/* build use and def sets of each BB according to the type of regOpnd. */ +void LiveAnalysis::CollectLiveInfo(BB &bb, const Operand &opnd, bool isDef, bool isUse) const +{ + if (!opnd.IsRegister()) { + return; + } + const RegOperand ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyVary) { + return; + } + if (isDef) { + bb.SetDefBit(regNO); + if (!isUse) { + bb.UseResetBit(regNO); + } + } + if (isUse) { + bb.SetUseBit(regNO); + bb.DefResetBit(regNO); + } +} + +void LiveAnalysis::ProcessAsmListOpnd(BB &bb, Operand &opnd, uint32 idx) const +{ + bool isDef = false; + bool isUse = false; + switch (idx) { + case kAsmOutputListOpnd: + case kAsmClobberListOpnd: { + isDef = true; + break; + } + case kAsmInputListOpnd: { + isUse = true; + break; + } + default: + return; + } + ListOperand &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + CollectLiveInfo(bb, *op, isDef, isUse); + } +} + +void LiveAnalysis::ProcessListOpnd(BB &bb, Operand &opnd, bool isDef) const +{ + ListOperand &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + CollectLiveInfo(bb, *op, isDef, !isDef); + } +} + +void LiveAnalysis::ProcessMemOpnd(BB &bb, Operand &opnd) const +{ + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + CollectLiveInfo(bb, *base, !memOpnd.IsIntactIndexed(), true); + } + if (offset != nullptr) { + CollectLiveInfo(bb, *offset, false, true); + } +} + +void LiveAnalysis::ProcessCondOpnd(BB &bb) const +{ + Operand &rflag = cgFunc->GetOrCreateRflag(); + CollectLiveInfo(bb, rflag, false, true); +} + +/* dump the current info of def/use/livein/liveout */ +void LiveAnalysis::Dump() const +{ + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc->GetFunction().GetStIdx().Idx()); + DEBUG_ASSERT(funcSt != nullptr, "null ptr check"); + LogInfo::MapleLogger() << "\n--------- liveness for " << funcSt->GetName() << " iteration "; + LogInfo::MapleLogger() << iteration << " ---------\n"; + FOR_ALL_BB(bb, cgFunc) { + LogInfo::MapleLogger() << " === BB_" << bb->GetId() << " (" << std::hex << bb << ") " << std::dec << " <" + << bb->GetKindName(); + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << bb->GetLabIdx() << "]"; + } + LogInfo::MapleLogger() << "> idx " << bb->GetId() << " ===\n"; + + if (!bb->GetPreds().empty()) { + LogInfo::MapleLogger() << " pred [ "; + for (auto *pred : bb->GetPreds()) { + LogInfo::MapleLogger() << pred->GetId() << " (" << std::hex << pred << ") " << std::dec << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + if (!bb->GetSuccs().empty()) { + LogInfo::MapleLogger() << " succ [ "; + for (auto *succ : bb->GetSuccs()) { + LogInfo::MapleLogger() << succ->GetId() << " (" << std::hex << succ << ") " << std::dec << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + + const SparseDataInfo *infoDef = nullptr; + LogInfo::MapleLogger() << " DEF: "; + infoDef = bb->GetDef(); + DumpInfo(*infoDef); + + const SparseDataInfo *infoUse = nullptr; + LogInfo::MapleLogger() << "\n USE: "; + infoUse = bb->GetUse(); + DumpInfo(*infoUse); + + const SparseDataInfo *infoLiveIn = nullptr; + LogInfo::MapleLogger() << "\n Live IN: "; + infoLiveIn = bb->GetLiveIn(); + DumpInfo(*infoLiveIn); + + const SparseDataInfo *infoLiveOut = nullptr; + LogInfo::MapleLogger() << "\n Live OUT: "; + infoLiveOut = bb->GetLiveOut(); + DumpInfo(*infoLiveOut); + LogInfo::MapleLogger() << "\n"; + } + LogInfo::MapleLogger() << "---------------------------\n"; +} + +void LiveAnalysis::DumpInfo(const SparseDataInfo &info) const +{ + uint32 count = 1; + for (auto x : info.GetInfo()) { + LogInfo::MapleLogger() << x << " "; + ++count; + /* 20 output one line */ + if ((count % 20) == 0) { + LogInfo::MapleLogger() << "\n"; + } + } + LogInfo::MapleLogger() << '\n'; +} + +/* initialize dependent info and container of BB. */ +void LiveAnalysis::InitBB(BB &bb) +{ + bb.SetLiveInChange(true); + bb.SetInsertUse(false); + bb.ClearLiveInRegNO(); + bb.ClearLiveOutRegNO(); + const uint32 maxRegCount = + cgFunc->GetSSAvRegCount() > cgFunc->GetMaxVReg() ? cgFunc->GetSSAvRegCount() : cgFunc->GetMaxVReg(); + bb.SetLiveIn(*NewLiveIn(maxRegCount)); + bb.SetLiveOut(*NewLiveOut(maxRegCount)); + bb.SetDef(*NewDef(maxRegCount)); + bb.SetUse(*NewUse(maxRegCount)); +} + +void LiveAnalysis::ClearInOutDataInfo() +{ + FOR_ALL_BB(bb, cgFunc) { + bb->SetLiveInChange(false); + bb->DefClearDataInfo(); + bb->UseClearDataInfo(); + bb->LiveInClearDataInfo(); + bb->LiveOutClearDataInfo(); + } +} + +void LiveAnalysis::EnlargeSpaceForLiveAnalysis(BB &currBB) +{ + regno_t currMaxVRegNO = cgFunc->GetMaxVReg(); + if (currMaxVRegNO >= currBB.GetLiveIn()->Size()) { + FOR_ALL_BB(bb, cgFunc) { + bb->LiveInEnlargeCapacity(currMaxVRegNO); + bb->LiveOutEnlargeCapacity(currMaxVRegNO); + } + } +} + +void CgLiveAnalysis::GetAnalysisDependence(AnalysisDep &aDep) const +{ +#if TARGX86_64 + aDep.AddRequired(); +#endif + aDep.SetPreservedAll(); +} + +bool CgLiveAnalysis::PhaseRun(maplebe::CGFunc &f) +{ + MemPool *liveMemPool = GetPhaseMemPool(); + live = f.GetCG()->CreateLiveAnalysis(*liveMemPool, f); + CHECK_FATAL(live != nullptr, "NIY"); + live->AnalysisLive(); + if (LIVE_ANALYZE_DUMP_NEWPM) { + live->Dump(); + } + live->ResetLiveSet(); + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgLiveAnalysis, liveanalysis) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/local_opt.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/local_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..26e573e6da680b6f87ee402ab6756befab531ae0 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/local_opt.cpp @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "local_opt.h" +#include "cg.h" +#include "mpl_logging.h" +#if defined TARGX86_64 +#include "x64_reaching.h" +#endif +/* + * this phase does optimization on local level(single bb or super bb) + * this phase requires liveanalysis + */ +namespace maplebe { +void LocalOpt::DoLocalCopyPropOptmize() +{ + DoLocalCopyProp(); +} + +void LocalPropOptimizePattern::Run() +{ + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*bb, *insn); + } + } +} + +bool LocalCopyProp::PhaseRun(maplebe::CGFunc &f) +{ + MemPool *mp = GetPhaseMemPool(); + auto *reachingDef = f.GetCG()->CreateReachingDefinition(*mp, f); + LocalOpt *localOpt = f.GetCG()->CreateLocalOpt(*mp, f, *reachingDef); + localOpt->DoLocalCopyPropOptmize(); + return false; +} + +void LocalCopyProp::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.SetPreservedAll(); +} + +bool RedundantDefRemove::CheckCondition(Insn &insn) +{ + uint32 opndNum = insn.GetOperandSize(); + const InsnDesc *md = insn.GetDesc(); + std::vector defOpnds; + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + auto *opndDesc = md->opndMD[i]; + if (opndDesc->IsDef() && opndDesc->IsUse()) { + return false; + } + if (opnd.IsList()) { + continue; + } + if (opndDesc->IsDef()) { + defOpnds.emplace_back(&opnd); + } + } + if (defOpnds.size() != 1 || !defOpnds[0]->IsRegister()) { + return false; + } + auto ®Def = static_cast(*defOpnds[0]); + auto &liveOutRegSet = insn.GetBB()->GetLiveOutRegNO(); + if (liveOutRegSet.find(regDef.GetRegisterNumber()) != liveOutRegSet.end()) { + return false; + } + return true; +} + +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(LocalCopyProp, localcopyprop) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/loop.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/loop.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ca8a6511ad634585de7b929a62637d7e76371b4c --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/loop.cpp @@ -0,0 +1,691 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "loop.h" +#include "cg.h" +#include "optimize_common.h" + +namespace maplebe { +#define LOOP_ANALYSIS_DUMP_NEWPM CG_DEBUG_FUNC(f) + +static void PrintLoopInfo(const LoopHierarchy &loop) +{ + LogInfo::MapleLogger() << "header " << loop.GetHeader()->GetId(); + if (loop.otherLoopEntries.size()) { + LogInfo::MapleLogger() << " multi-header "; + for (auto en : loop.otherLoopEntries) { + LogInfo::MapleLogger() << en->GetId() << " "; + } + } + if (loop.GetOuterLoop() != nullptr) { + LogInfo::MapleLogger() << " parent " << loop.GetOuterLoop()->GetHeader()->GetId(); + } + LogInfo::MapleLogger() << " backedge "; + for (auto *bb : loop.GetBackedge()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + LogInfo::MapleLogger() << "\n members "; + for (auto *bb : loop.GetLoopMembers()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + if (!loop.GetInnerLoops().empty()) { + LogInfo::MapleLogger() << "\n inner_loop_headers "; + for (auto *inner : loop.GetInnerLoops()) { + LogInfo::MapleLogger() << inner->GetHeader()->GetId() << " "; + } + } + LogInfo::MapleLogger() << "\n"; +} + +static void PrintInner(const LoopHierarchy &loop, uint32 level) +{ + for (auto *inner : loop.GetInnerLoops()) { + LogInfo::MapleLogger() << "loop-level-" << level << "\n"; + PrintLoopInfo(*inner); + PrintInner(*inner, level + 1); + } +} + +void LoopHierarchy::PrintLoops(const std::string &name) const +{ + LogInfo::MapleLogger() << name << "\n"; + for (const LoopHierarchy *loop = this; loop != nullptr; loop = loop->next) { + PrintLoopInfo(*loop); + } + for (const LoopHierarchy *loop = this; loop != nullptr; loop = loop->next) { + PrintInner(*loop, 1); + } +} + +void CGFuncLoops::CheckOverlappingInnerLoops(const MapleVector &iLoops, + const MapleVector &loopMem) const +{ + for (auto iloop : iLoops) { + CHECK_FATAL(iloop->loopMembers.size() > 0, "Empty loop"); + for (auto bb : iloop->loopMembers) { + if (find(loopMem.begin(), loopMem.end(), bb) != loopMem.end()) { + LogInfo::MapleLogger() << "Error: inconsistent loop member"; + CHECK_FATAL(0, "loop member overlap with inner loop"); + } + } + CheckOverlappingInnerLoops(iloop->innerLoops, loopMem); + } +} + +void CGFuncLoops::CheckLoops() const +{ + // Make sure backedge -> header relationship holds + for (auto bEdge : backedge) { + if (find(bEdge->GetSuccs().begin(), bEdge->GetSuccs().end(), header) == bEdge->GetSuccs().end()) { + bool inOtherEntry = false; + for (auto entry : multiEntries) { + if (find(bEdge->GetSuccs().begin(), bEdge->GetSuccs().end(), entry) != bEdge->GetSuccs().end()) { + inOtherEntry = true; + break; + } + } + if (inOtherEntry == false) { + if (find(bEdge->GetEhSuccs().begin(), bEdge->GetEhSuccs().end(), header) == bEdge->GetEhSuccs().end()) { + LogInfo::MapleLogger() << "Error: inconsistent loop backedge"; + CHECK_FATAL(0, "loop backedge does not go to loop header"); + } + } + } + if (find(header->GetPreds().begin(), header->GetPreds().end(), bEdge) == header->GetPreds().end()) { + bool inOtherEntry = false; + for (auto entry : multiEntries) { + if (find(entry->GetPreds().begin(), entry->GetPreds().end(), bEdge) != entry->GetPreds().end()) { + inOtherEntry = true; + break; + } + } + if (inOtherEntry == false) { + if (find(header->GetEhPreds().begin(), header->GetEhPreds().end(), bEdge) == + header->GetEhPreds().end()) { + LogInfo::MapleLogger() << "Error: inconsistent loop header"; + CHECK_FATAL(0, "loop header does not have a backedge"); + } + } + } + } + + // Make sure containing loop members do not overlap + CheckOverlappingInnerLoops(innerLoops, loopMembers); + + if (innerLoops.empty() == false) { + for (auto lp : innerLoops) { + lp->CheckLoops(); + } + } +} + +void CGFuncLoops::PrintLoops(const CGFuncLoops &funcLoop) const +{ + LogInfo::MapleLogger() << "loop_level(" << funcLoop.loopLevel << ") "; + LogInfo::MapleLogger() << "header " << funcLoop.GetHeader()->GetId() << " "; + if (funcLoop.multiEntries.size()) { + LogInfo::MapleLogger() << "other-header "; + for (auto bb : funcLoop.multiEntries) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + } + if (funcLoop.GetOuterLoop() != nullptr) { + LogInfo::MapleLogger() << "parent " << funcLoop.GetOuterLoop()->GetHeader()->GetId() << " "; + } + LogInfo::MapleLogger() << "backedge "; + for (auto *bb : funcLoop.GetBackedge()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + LogInfo::MapleLogger() << "\n members "; + for (auto *bb : funcLoop.GetLoopMembers()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + LogInfo::MapleLogger() << "\n exits "; + for (auto *bb : funcLoop.GetExits()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + LogInfo::MapleLogger() << "\n"; + if (!funcLoop.GetInnerLoops().empty()) { + LogInfo::MapleLogger() << " inner_loop_headers "; + for (auto *inner : funcLoop.GetInnerLoops()) { + LogInfo::MapleLogger() << inner->GetHeader()->GetId() << " "; + } + LogInfo::MapleLogger() << "\n"; + for (auto *inner : funcLoop.GetInnerLoops()) { + PrintLoops(*inner); + } + } +} + +// partial loop body found with formLoop is NOT really needed in down stream +// It should be simplied later +void LoopFinder::formLoop(BB *headBB, BB *backBB) +{ + DEBUG_ASSERT(headBB != nullptr && backBB != nullptr, "headBB or backBB is nullptr"); + LoopHierarchy *simple_loop = memPool->New(*memPool); + + if (headBB != backBB) { + DEBUG_ASSERT(!dfsBBs.empty(), "dfsBBs is empty"); + DEBUG_ASSERT(onPathBBs[headBB->GetId()], "headBB is not on execution path"); + std::stack tempStk; + + tempStk.push(dfsBBs.top()); + dfsBBs.pop(); + + while (tempStk.top() != headBB && !dfsBBs.empty()) { + tempStk.push(dfsBBs.top()); + dfsBBs.pop(); + } + + while (!tempStk.empty()) { + BB *topBB = tempStk.top(); + tempStk.pop(); + + if (onPathBBs[topBB->GetId()]) { + simple_loop->InsertLoopMembers(*topBB); + } + dfsBBs.push(topBB); + } + } + // Note: backBB is NOT on dfsBBs + simple_loop->InsertLoopMembers(*backBB); + simple_loop->SetHeader(*headBB); + simple_loop->InsertBackedge(*backBB); + + if (loops) { + loops->SetPrev(simple_loop); + } + simple_loop->SetNext(loops); + loops = simple_loop; +} + +void LoopFinder::seekBackEdge(BB *bb, MapleList succs) +{ + for (const auto succBB : succs) { + if (!visitedBBs[succBB->GetId()]) { + dfsBBs.push(succBB); + } else { + if (onPathBBs[succBB->GetId()]) { + formLoop(succBB, bb); + bb->PushBackLoopSuccs(*succBB); + succBB->PushBackLoopPreds(*bb); + } + } + } +} + +void LoopFinder::seekCycles() +{ + while (!dfsBBs.empty()) { + BB *bb = dfsBBs.top(); + if (visitedBBs[bb->GetId()]) { + onPathBBs[bb->GetId()] = false; + dfsBBs.pop(); + continue; + } + + visitedBBs[bb->GetId()] = true; + onPathBBs[bb->GetId()] = true; + seekBackEdge(bb, bb->GetSuccs()); + seekBackEdge(bb, bb->GetEhSuccs()); + } +} + +void LoopFinder::markExtraEntryAndEncl() +{ + DEBUG_ASSERT(dfsBBs.empty(), "dfsBBs is NOT empty"); + std::vector loopEnclosure; + loopEnclosure.resize(cgFunc->NumBBs()); + std::vector startProcess; + startProcess.resize(cgFunc->NumBBs()); + std::vector origEntries; + origEntries.resize(cgFunc->NumBBs()); + std::vector newEntries; + newEntries.resize(cgFunc->NumBBs()); + + for (LoopHierarchy *loop = loops; loop != nullptr; loop = loop->GetNext()) { + fill(visitedBBs.begin(), visitedBBs.end(), false); + fill(loopEnclosure.begin(), loopEnclosure.end(), nullptr); + fill(startProcess.begin(), startProcess.end(), false); + fill(origEntries.begin(), origEntries.end(), nullptr); + fill(newEntries.begin(), newEntries.end(), nullptr); + + for (auto *bb : loop->GetLoopMembers()) { + loopEnclosure[bb->GetId()] = bb; + } + origEntries[loop->GetHeader()->GetId()] = loop->GetHeader(); + + // Form loop closure from the primary entry. At end collect all other entries + bool changed = false; + dfsBBs.push(loop->GetHeader()); + while (true) { + while (!dfsBBs.empty()) { + BB *bb = dfsBBs.top(); + visitedBBs[bb->GetId()] = true; + if (startProcess[bb->GetId()]) { + dfsBBs.pop(); + for (const auto succBB : bb->GetSuccs()) { + if (loopEnclosure[bb->GetId()] == nullptr && loopEnclosure[succBB->GetId()] != nullptr && + succBB != loop->GetHeader()) { + changed = true; + loopEnclosure[bb->GetId()] = bb; + break; + } + } + continue; + } else { + startProcess[bb->GetId()] = true; + for (const auto succBB : bb->GetSuccs()) { + if (!visitedBBs[succBB->GetId()]) { + dfsBBs.push(succBB); + } + } + } + } + + // Repeat till no new item is added in + if (changed) { + dfsBBs.push(loop->GetHeader()); + changed = false; + fill(visitedBBs.begin(), visitedBBs.end(), false); + fill(startProcess.begin(), startProcess.end(), false); + continue; + } + + // Collect all entries + bool foundNewEntry = false; + fill(visitedBBs.begin(), visitedBBs.end(), false); + FOR_ALL_BB(bb, cgFunc) { + if (!visitedBBs[bb->GetId()]) { + dfsBBs.push(bb); + visitedBBs[bb->GetId()] = true; + while (!dfsBBs.empty()) { + BB *currBB = dfsBBs.top(); + visitedBBs[currBB->GetId()] = true; + dfsBBs.pop(); + for (const auto succBB : currBB->GetSuccs()) { + // check if entering a loop. + if ((loopEnclosure[succBB->GetId()] != nullptr) && + (loopEnclosure[currBB->GetId()] == nullptr)) { + newEntries[succBB->GetId()] = succBB; + if (origEntries[succBB->GetId()] == nullptr) { + foundNewEntry = true; + } + } + if (!visitedBBs[succBB->GetId()]) { + dfsBBs.push(succBB); + } + } + } + } + } + if (foundNewEntry) { + origEntries = newEntries; + for (const auto bb : newEntries) { + if (bb != nullptr) { + dfsBBs.push(bb); + } + } + fill(visitedBBs.begin(), visitedBBs.end(), false); + fill(startProcess.begin(), startProcess.end(), false); + fill(newEntries.begin(), newEntries.end(), nullptr); + } else { + break; + } + } + + // Setup loop body + for (size_t id = 0; id < loopEnclosure.size(); id++) { + if (loopEnclosure[id] != nullptr) { + loop->InsertLoopMembers(*loopEnclosure[id]); + } + } + + // Setup head and extra entries + for (const auto bb : newEntries) { + if (bb != nullptr) { + loop->otherLoopEntries.insert(bb); + } + } + loop->otherLoopEntries.erase(loop->GetHeader()); + } +} + +bool LoopFinder::HasSameHeader(const LoopHierarchy *lp1, const LoopHierarchy *lp2) +{ + if (lp1->GetHeader() == lp2->GetHeader()) { + return true; + } + for (auto other1 : lp1->otherLoopEntries) { + if (lp2->GetHeader() == other1) { + return true; + } + for (auto other2 : lp2->otherLoopEntries) { + if (other2 == other1) { + return true; + } + } + } + return false; +} + +void LoopFinder::MergeLoops() +{ + for (LoopHierarchy *loopHierarchy1 = loops; loopHierarchy1 != nullptr; loopHierarchy1 = loopHierarchy1->GetNext()) { + for (LoopHierarchy *loopHierarchy2 = loopHierarchy1->GetNext(); loopHierarchy2 != nullptr; + loopHierarchy2 = loopHierarchy2->GetNext()) { + // Different loop bodies imply different loops + bool sameLoop = true; + if (loopHierarchy1->GetLoopMembers().size() == loopHierarchy2->GetLoopMembers().size()) { + for (auto *bb : loopHierarchy2->GetLoopMembers()) { + if (find(loopHierarchy1->GetLoopMembers().begin(), loopHierarchy1->GetLoopMembers().end(), bb) == + loopHierarchy1->GetLoopMembers().end()) { + sameLoop = false; + break; + } + } + if (sameLoop) { + for (auto *bb : loopHierarchy1->GetLoopMembers()) { + if (find(loopHierarchy2->GetLoopMembers().begin(), loopHierarchy2->GetLoopMembers().end(), + bb) == loopHierarchy2->GetLoopMembers().end()) { + sameLoop = false; + break; + } + } + } + if (sameLoop) { + loopHierarchy2->GetPrev()->SetNext(loopHierarchy2->GetNext()); + if (loopHierarchy2->GetNext() != nullptr) { + loopHierarchy2->GetNext()->SetPrev(loopHierarchy2->GetPrev()); + } + continue; + } + } + if (HasSameHeader(loopHierarchy1, loopHierarchy2) == false) { + continue; + } + for (auto *bb : loopHierarchy2->GetLoopMembers()) { + loopHierarchy1->InsertLoopMembers(*bb); + } + if (loopHierarchy1->GetHeader() != loopHierarchy2->GetHeader()) { + loopHierarchy1->otherLoopEntries.insert(loopHierarchy2->GetHeader()); + } + for (auto bb : loopHierarchy2->otherLoopEntries) { + if (loopHierarchy1->GetHeader() != bb) { + loopHierarchy1->otherLoopEntries.insert(bb); + } + } + for (auto *bb : loopHierarchy2->GetBackedge()) { + loopHierarchy1->InsertBackedge(*bb); + } + loopHierarchy2->GetPrev()->SetNext(loopHierarchy2->GetNext()); + if (loopHierarchy2->GetNext() != nullptr) { + loopHierarchy2->GetNext()->SetPrev(loopHierarchy2->GetPrev()); + } + } + } +} + +void LoopFinder::SortLoops() +{ + LoopHierarchy *head = nullptr; + LoopHierarchy *next1 = nullptr; + LoopHierarchy *next2 = nullptr; + bool swapped; + do { + swapped = false; + for (LoopHierarchy *loopHierarchy1 = loops; loopHierarchy1 != nullptr;) { + /* remember loopHierarchy1's prev in case if loopHierarchy1 moved */ + head = loopHierarchy1; + next1 = loopHierarchy1->GetNext(); + for (LoopHierarchy *loopHierarchy2 = loopHierarchy1->GetNext(); loopHierarchy2 != nullptr;) { + next2 = loopHierarchy2->GetNext(); + + if (loopHierarchy1->GetLoopMembers().size() > loopHierarchy2->GetLoopMembers().size()) { + if (head->GetPrev() == nullptr) { + /* remove loopHierarchy2 from list */ + loopHierarchy2->GetPrev()->SetNext(loopHierarchy2->GetNext()); + if (loopHierarchy2->GetNext() != nullptr) { + loopHierarchy2->GetNext()->SetPrev(loopHierarchy2->GetPrev()); + } + /* link loopHierarchy2 as head */ + loops = loopHierarchy2; + loopHierarchy2->SetPrev(nullptr); + loopHierarchy2->SetNext(head); + head->SetPrev(loopHierarchy2); + } else { + loopHierarchy2->GetPrev()->SetNext(loopHierarchy2->GetNext()); + if (loopHierarchy2->GetNext() != nullptr) { + loopHierarchy2->GetNext()->SetPrev(loopHierarchy2->GetPrev()); + } + head->GetPrev()->SetNext(loopHierarchy2); + loopHierarchy2->SetPrev(head->GetPrev()); + loopHierarchy2->SetNext(head); + head->SetPrev(loopHierarchy2); + } + head = loopHierarchy2; + swapped = true; + } + loopHierarchy2 = next2; + } + loopHierarchy1 = next1; + } + } while (swapped); +} + +void LoopFinder::UpdateOuterForInnerLoop(BB *bb, LoopHierarchy *outer) +{ + if (outer == nullptr) { + return; + } + for (auto ito = outer->GetLoopMembers().begin(); ito != outer->GetLoopMembers().end();) { + if (*ito == bb) { + ito = outer->EraseLoopMembers(ito); + } else { + ++ito; + } + } + if (outer->GetOuterLoop() != nullptr) { + UpdateOuterForInnerLoop(bb, const_cast(outer->GetOuterLoop())); + } +} + +void LoopFinder::UpdateOuterLoop(const LoopHierarchy *loop) +{ + for (auto inner : loop->GetInnerLoops()) { + UpdateOuterLoop(inner); + } + for (auto *bb : loop->GetLoopMembers()) { + UpdateOuterForInnerLoop(bb, const_cast(loop->GetOuterLoop())); + } +} + +void LoopFinder::CreateInnerLoop(LoopHierarchy &inner, LoopHierarchy &outer) +{ + outer.InsertInnerLoops(inner); + inner.SetOuterLoop(outer); + if (loops == &inner) { + loops = inner.GetNext(); + } else { + LoopHierarchy *prev = loops; + for (LoopHierarchy *loopHierarchy1 = loops->GetNext(); loopHierarchy1 != nullptr; + loopHierarchy1 = loopHierarchy1->GetNext()) { + if (loopHierarchy1 == &inner) { + prev->SetNext(prev->GetNext()->GetNext()); + } + prev = loopHierarchy1; + } + } +} + +static void FindLoopExits(LoopHierarchy *loop) +{ + for (auto *bb : loop->GetLoopMembers()) { + for (auto succ : bb->GetSuccs()) { + if (find(loop->GetLoopMembers().begin(), loop->GetLoopMembers().end(), succ) == + loop->GetLoopMembers().end()) { + loop->InsertExit(*bb); + } + } + } + for (auto *inner : loop->GetInnerLoops()) { + FindLoopExits(inner); + } +} + +void LoopFinder::DetectInnerLoop() +{ + for (LoopHierarchy *loop = loops; loop != nullptr; loop = loop->GetNext()) { + FindLoopExits(loop); + } + bool innerCreated; + do { + innerCreated = false; + for (LoopHierarchy *loopHierarchy1 = loops; loopHierarchy1 != nullptr; + loopHierarchy1 = loopHierarchy1->GetNext()) { + for (LoopHierarchy *loopHierarchy2 = loopHierarchy1->GetNext(); loopHierarchy2 != nullptr; + loopHierarchy2 = loopHierarchy2->GetNext()) { + if (loopHierarchy1->GetHeader() != loopHierarchy2->GetHeader()) { + auto loopHierarchy2Members = loopHierarchy2->GetLoopMembers(); + if (find(loopHierarchy2Members.begin(), loopHierarchy2Members.end(), loopHierarchy1->GetHeader()) != + loopHierarchy2Members.end()) { + bool allin = true; + // Make sure body is included + for (auto *bb1 : loopHierarchy1->GetLoopMembers()) { + if (find(loopHierarchy2Members.begin(), loopHierarchy2Members.end(), bb1) == + loopHierarchy2Members.end()) { + allin = false; + break; + } + } + if (allin) { + CreateInnerLoop(*loopHierarchy1, *loopHierarchy2); + innerCreated = true; + } + } + if (innerCreated) { + break; + } + } + } + if (innerCreated) { + break; + } + } + } while (innerCreated); + + for (LoopHierarchy *outer = loops; outer != nullptr; outer = outer->GetNext()) { + UpdateOuterLoop(outer); + } +} + +static void CopyLoopInfo(const LoopHierarchy *from, CGFuncLoops *to, CGFuncLoops *parent, MemPool *memPool) +{ + to->SetHeader(*const_cast(from->GetHeader())); + for (auto bb : from->otherLoopEntries) { + to->AddMultiEntries(*bb); + } + for (auto *bb : from->GetLoopMembers()) { + to->AddLoopMembers(*bb); + bb->SetLoop(*to); + } + for (auto *bb : from->GetBackedge()) { + to->AddBackedge(*bb); + } + for (auto *bb : from->GetExits()) { + to->AddExit(*bb); + } + if (!from->GetInnerLoops().empty()) { + for (auto *inner : from->GetInnerLoops()) { + CGFuncLoops *floop = memPool->New(*memPool); + to->AddInnerLoops(*floop); + floop->SetLoopLevel(to->GetLoopLevel() + 1); + CopyLoopInfo(inner, floop, to, memPool); + } + } + if (parent != nullptr) { + to->SetOuterLoop(*parent); + } +} + +void LoopFinder::UpdateCGFunc() +{ + for (LoopHierarchy *loop = loops; loop != nullptr; loop = loop->GetNext()) { + CGFuncLoops *floop = cgFunc->GetMemoryPool()->New(*cgFunc->GetMemoryPool()); + cgFunc->PushBackLoops(*floop); + floop->SetLoopLevel(1); /* top level */ + CopyLoopInfo(loop, floop, nullptr, cgFunc->GetMemoryPool()); + } +} + +void LoopFinder::FormLoopHierarchy() +{ + visitedBBs.clear(); + visitedBBs.resize(cgFunc->NumBBs(), false); + sortedBBs.clear(); + sortedBBs.resize(cgFunc->NumBBs(), nullptr); + onPathBBs.clear(); + onPathBBs.resize(cgFunc->NumBBs(), false); + + FOR_ALL_BB(bb, cgFunc) { + bb->SetLevel(0); + } + bool changed; + do { + changed = false; + FOR_ALL_BB(bb, cgFunc) { + if (!visitedBBs[bb->GetId()]) { + dfsBBs.push(bb); + seekCycles(); + changed = true; + } + } + } while (changed); + + markExtraEntryAndEncl(); + /* + * FIX : Should merge the partial loops at the time of initial + * construction. And make the linked list as a sorted set, + * then the merge and sort phases below can go away. + * + * Start merging the loops with the same header + */ + MergeLoops(); + /* order loops from least number of members */ + SortLoops(); + DetectInnerLoop(); + UpdateCGFunc(); +} + +bool CgLoopAnalysis::PhaseRun(maplebe::CGFunc &f) +{ + f.ClearLoopInfo(); + MemPool *loopMemPool = GetPhaseMemPool(); + LoopFinder *loopFinder = loopMemPool->New(f, *loopMemPool); + loopFinder->FormLoopHierarchy(); + + if (LOOP_ANALYSIS_DUMP_NEWPM) { + /* do dot gen after detection so the loop backedge can be properly colored using the loop info */ + DotGenerator::GenerateDot("buildloop", f, f.GetMirModule(), true, f.GetName()); + } +#if DEBUG + for (const auto *lp : f.GetLoops()) { + lp->CheckLoops(); + } +#endif + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgLoopAnalysis, loopanalysis) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/memlayout.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/memlayout.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9f6deec7c6cf1aea22c452aa77691bed6a1737ae --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/memlayout.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "memlayout.h" +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; + +/* + * Go over all outgoing calls in the function body and get the maximum space + * needed for storing the actuals based on the actual parameters and the ABI. + * These are usually those arguments that cannot be passed + * through registers because a call passes more than 8 arguments, or + * they cannot be fit in a pair of registers. + + * This assumes that all nesting of statements has been removed, + * so that all the statements are at only one block level. + */ +uint32 MemLayout::FindLargestActualArea(int32 &aggCopySize) +{ + StmtNode *stmt = mirFunction->GetBody()->GetFirst(); + if (stmt == nullptr) { + return 0; + } + uint32 maxActualSize = 0; + uint32 maxParamStackSize = 0; // Size of parameter stack requirement + uint32 maxCopyStackSize = 0; // Size of aggregate param stack copy requirement + for (; stmt != nullptr; stmt = stmt->GetNext()) { + Opcode opCode = stmt->GetOpCode(); + if ((opCode < OP_call || opCode > OP_xintrinsiccallassigned) && opCode != OP_icallproto) { + continue; + } + if (opCode == OP_intrinsiccallwithtypeassigned || opCode == OP_intrinsiccallwithtype || + opCode == OP_intrinsiccallassigned || opCode == OP_intrinsiccall) { + /* + * Some intrinsics, such as MPL_ATOMIC_EXCHANGE_PTR, are handled by CG, + * and map to machine code sequences. We ignore them because they are not + * function calls. + */ + continue; + } + /* + * if the following check fails, most likely bytecode has invoke-custom etc + * that is not supported yet + */ + DCHECK((opCode == OP_call || opCode == OP_icall || opCode == OP_icallproto), "Not lowered to call or icall?"); + int32 copySize; + uint32 size = + ComputeStackSpaceRequirementForCall(*stmt, copySize, opCode == OP_icall || opCode == OP_icallproto); + if (size > maxParamStackSize) { + maxParamStackSize = size; + } + if (static_cast(copySize) > maxCopyStackSize) { + maxCopyStackSize = static_cast(copySize); + } + if ((maxParamStackSize + maxCopyStackSize) > maxActualSize) { + maxActualSize = maxParamStackSize + maxCopyStackSize; + } + } + aggCopySize = static_cast(maxCopyStackSize); + /* GetPointerSize() * 2's pow 2 is 4, set the low 4 bit of maxActualSize to 0 */ + if (CGOptions::IsArm64ilp32()) { + maxActualSize = RoundUp(maxActualSize, k8ByteSize * 2); + } else { + maxActualSize = RoundUp(maxActualSize, GetPointerSize() * 2); + } + return maxActualSize; +} + +bool CgLayoutFrame::PhaseRun(maplebe::CGFunc &f) +{ + if (CGOptions::IsPrintFunction()) { + LogInfo::MapleLogger() << f.GetName() << "\n"; + } + f.LayoutStackFrame(); + return false; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/obj_emit.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/obj_emit.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b10740fffa2cd7229cba5693778d9cd92875ea5e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/obj_emit.cpp @@ -0,0 +1,330 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "obj_emit.h" +#include "namemangler.h" +#include "cg.h" + +namespace maplebe { +using namespace maple; +using namespace namemangler; + +void ObjEmitter::Run(FuncEmitInfo &funcEmitInfo) +{ + InsertNopInsn(static_cast(funcEmitInfo)); + EmitFuncBinaryCode(static_cast(funcEmitInfo)); +} + +/* traverse insns, get the binary code and saved in buffer */ +void ObjEmitter::EmitFuncBinaryCode(ObjFuncEmitInfo &objFuncEmitInfo) +{ + CGFunc &cgFunc = objFuncEmitInfo.GetCGFunc(); + objFuncEmitInfo.SetFuncName(cgFunc.GetName()); + + int labelSize = cgFunc.GetLab2BBMap().size() + cgFunc.GetLabelAndValueMap().size() + 1; + std::vector label2Offset(labelSize, 0xFFFFFFFFULL); + EmitInstructions(objFuncEmitInfo, label2Offset); + objFuncEmitInfo.UpdateMethodCodeSize(); + + int symbolSize = cgFunc.GetFunction().GetSymTab()->GetTable().size() + 1; + std::vector symbol2Offset(symbolSize, 0xFFFFFFFFULL); + EmitFunctionSymbolTable(objFuncEmitInfo, symbol2Offset); + EmitSwitchTable(objFuncEmitInfo, symbol2Offset); + + /* local float variable */ + for (const auto &mpPair : cgFunc.GetLabelAndValueMap()) { + CHECK_FATAL(mpPair.first <= label2Offset.size(), "label2Offset"); + label2Offset[mpPair.first] = objFuncEmitInfo.GetTextDataSize(); + objFuncEmitInfo.AppendTextData(&(mpPair.second), k8ByteSize); + } + + /* handle branch fixup here */ + objFuncEmitInfo.HandleLocalBranchFixup(label2Offset, symbol2Offset); +} + +void ObjEmitter::EmitInstructions(ObjFuncEmitInfo &objFuncEmitInfo, std::vector &label2Offset) +{ + CGFunc &cgFunc = objFuncEmitInfo.GetCGFunc(); + FOR_ALL_BB(bb, &cgFunc) { + if (bb->GetLabIdx() != 0) { + CHECK_FATAL(bb->GetLabIdx() <= label2Offset.size(), "label2Offset"); + label2Offset[bb->GetLabIdx()] = objFuncEmitInfo.GetTextDataSize(); + objFuncEmitInfo.AppendLabel2Order(bb->GetLabIdx()); + } + + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction() || insn->IsAsmInsn() || insn->IsPseudo()) { + continue; + } + + /* get binary code and save in buffer */ + if (insn->GetDesc()->IsIntrinsic()) { + EmitIntrinsicInsn(*insn, objFuncEmitInfo); + } else if (insn->GetDesc()->IsSpecialIntrinsic()) { + EmitSpinIntrinsicInsn(*insn, objFuncEmitInfo); + } else { + EncodeInstruction(*insn, label2Offset, objFuncEmitInfo); + } + } + } +} + +void ObjEmitter::EmitSwitchTable(ObjFuncEmitInfo &objFuncEmitInfo, const std::vector &symbol2Offset) +{ + CGFunc &cgFunc = objFuncEmitInfo.GetCGFunc(); + if (cgFunc.GetEmitStVec().size() == 0) { + return; + } + uint32 tmpOffset = GetBeforeTextDataSize(objFuncEmitInfo); + /* align is 8 push padding to objFuncEmitInfo.data */ + uint32 startOffset = Alignment::Align(tmpOffset, k8ByteSize); + uint32 padding = startOffset - tmpOffset; + objFuncEmitInfo.FillTextDataNop(padding); + + uint32 curOffset = objFuncEmitInfo.GetTextDataSize(); + for (std::pair st : cgFunc.GetEmitStVec()) { + objFuncEmitInfo.SetSwitchTableOffset(st.second->GetName(), curOffset); + MIRAggConst *arrayConst = safe_cast(st.second->GetKonst()); + DEBUG_ASSERT(arrayConst != nullptr, "null ptr check"); + for (size_t i = 0; i < arrayConst->GetConstVec().size(); ++i) { + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + DEBUG_ASSERT(lblConst != nullptr, "null ptr check"); + CHECK_FATAL(lblConst->GetValue() <= symbol2Offset.size(), "symbol2Offset"); + uint64 offset = static_cast(symbol2Offset[lblConst->GetValue()]) - static_cast(curOffset); + objFuncEmitInfo.AppendTextData(offset, k8ByteSize); + } + + curOffset += arrayConst->GetConstVec().size() * k8ByteSize; + } +} + +void ObjEmitter::WriteObjFile() +{ + /* write header */ + Emit(&header, sizeof(header)); + + /* write sections */ + for (auto *section : sections) { + if (section->GetType() == SHT_NOBITS) { + continue; + } + if (section == textSection) { + const auto &emitMemorymanager = CGOptions::GetInstance().GetEmitMemoryManager(); + uint8 *memSpace = + emitMemorymanager.allocateDataSection(emitMemorymanager.codeSpace, textSection->GetDataSize(), + textSection->GetAlign(), textSection->GetName().c_str()); + memcpy_s(memSpace, textSection->GetDataSize(), textSection->GetData().data(), section->GetDataSize()); + } + + SetFileOffset(section->GetOffset()); + section->WriteSection(fileStream); + } + + /* write section table */ + SetFileOffset(header.e_shoff); + for (auto section : sections) { + Emit(§ion->GetSectionHeader(), sizeof(section->GetSectionHeader())); + } +} + +void ObjEmitter::AddSymbol(const std::string &name, Word size, const Section §ion, Address value) +{ + auto nameIndex = strTabSection->AddString(name); + symbolTabSection->AppendSymbol({static_cast(nameIndex), + static_cast((STB_GLOBAL << 4) + (STT_SECTION & 0xf)), 0, + section.GetIndex(), value, size}); +} + +void ObjEmitter::AddFuncSymbol(const MapleString &name, Word size, Address value) +{ + auto symbolStrIndex = strTabSection->AddString(name); + symbolTabSection->AppendSymbol({static_cast(symbolStrIndex), + static_cast((STB_GLOBAL << k4BitSize) + (STT_FUNC & 0xf)), 0, + textSection->GetIndex(), value, size}); +} + +void ObjEmitter::ClearData() +{ + globalLabel2Offset.clear(); + for (auto *section : sections) { + if (section != nullptr) { + section->ClearData(); + } + } +} + +void ObjEmitter::InitELFHeader() +{ + header.e_ident[EI_MAG0] = ELFMAG0; + header.e_ident[EI_MAG1] = ELFMAG1; + header.e_ident[EI_MAG2] = ELFMAG2; + header.e_ident[EI_MAG3] = ELFMAG3; + header.e_ident[EI_CLASS] = ELFCLASS64; + header.e_ident[EI_DATA] = ELFDATA2LSB; + header.e_ident[EI_VERSION] = EV_CURRENT; + header.e_ident[EI_OSABI] = ELFOSABI_LINUX; + header.e_ident[EI_ABIVERSION] = 0; + std::fill_n(&header.e_ident[EI_PAD], EI_NIDENT - EI_PAD, 0); + header.e_type = ET_REL; + header.e_version = 1; + UpdateMachineAndFlags(header); + header.e_entry = 0; + header.e_ehsize = sizeof(FileHeader); + header.e_phentsize = sizeof(SegmentHeader); + header.e_shentsize = sizeof(SectionHeader); + header.e_shstrndx = shStrSection->GetIndex(); + header.e_shoff = 0; + header.e_phoff = 0; + header.e_shnum = sections.size(); + header.e_phnum = 0; +} + +void ObjEmitter::EmitMIRIntConst(EmitInfo &emitInfo) +{ + DEBUG_ASSERT(IsPrimitiveScalar(emitInfo.elemConst.GetType().GetPrimType()), "must be primitive type!"); + MIRIntConst &intConst = static_cast(emitInfo.elemConst); + size_t size = GetPrimTypeSize(emitInfo.elemConst.GetType().GetPrimType()); + const IntVal &value = intConst.GetValue(); + int64 val = value.GetExtValue(); + dataSection->AppendData(&val, size); + emitInfo.offset += size; +#ifdef OBJ_DEBUG + LogInfo::MapleLogger() << val << " size: " << size << "\n"; +#endif +} + +void ObjEmitter::EmitMIRAddrofConstCommon(EmitInfo &emitInfo, uint64 specialOffset) +{ + MIRAddrofConst &symAddr = static_cast(emitInfo.elemConst); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(symAddr.GetSymbolIndex().Idx()); + const std::string &symAddrName = symAddrSym->GetName(); + LabelFixup labelFixup(symAddrName, emitInfo.offset, kLabelFixupDirect64); + if (specialOffset != 0) { + DataSection::AddLabelFixup(emitInfo.labelFixups, labelFixup); + } + uint64 value = specialOffset - emitInfo.offset; + size_t size = GetPrimTypeSize(emitInfo.elemConst.GetType().GetPrimType()); + dataSection->AppendData(&value, size); + emitInfo.offset += size; + +#ifdef OBJ_DEBUG + LogInfo::MapleLogger() << symAddrName << " size: " << size << "\n"; +#endif +} + +void ObjEmitter::EmitMIRAddrofConst(EmitInfo &emitInfo) +{ + EmitMIRAddrofConstCommon(emitInfo, 0); +} + +void ObjEmitter::EmitMIRAddrofConstOffset(EmitInfo &emitInfo) +{ + /* 2 is fixed offset in runtime */ + EmitMIRAddrofConstCommon(emitInfo, 2); +} + +void ObjEmitter::EmitFunctionSymbolTable(ObjFuncEmitInfo &objFuncEmitInfo, std::vector &symbol2Offset) +{ + CGFunc &cgFunc = objFuncEmitInfo.GetCGFunc(); + MIRFunction *func = &cgFunc.GetFunction(); + + size_t size = + (func == nullptr) ? GlobalTables::GetGsymTable().GetTable().size() : func->GetSymTab()->GetTable().size(); + for (size_t i = 0; i < size; ++i) { + const MIRSymbol *st = nullptr; + if (func == nullptr) { + auto &symTab = GlobalTables::GetGsymTable(); + st = symTab.GetSymbol(i); + } else { + auto &symTab = *func->GetSymTab(); + st = symTab.GetSymbolAt(i); + } + if (st == nullptr) { + continue; + } + MIRStorageClass storageClass = st->GetStorageClass(); + MIRSymKind symKind = st->GetSKind(); + if (storageClass == kScPstatic && symKind == kStConst) { + // align + size_t tmpOffset = GetBeforeTextDataSize(objFuncEmitInfo); + uint32 offset = Alignment::Align(tmpOffset, k8ByteSize); + uint32 padding = offset - tmpOffset; + objFuncEmitInfo.FillTextDataNop(padding); + CHECK_FATAL(cgFunc.GetLocalSymLabelIndex(*st) <= symbol2Offset.size(), "symbol2Offset"); + symbol2Offset[cgFunc.GetLocalSymLabelIndex(*st)] = static_cast(objFuncEmitInfo.GetTextDataSize()); + if (st->GetKonst()->GetKind() == kConstStr16Const) { + EmitStr16Const(objFuncEmitInfo, *st); + continue; + } + + if (st->GetKonst()->GetKind() == kConstStrConst) { + EmitStrConst(objFuncEmitInfo, *st); + continue; + } + + switch (st->GetKonst()->GetType().GetPrimType()) { + case PTY_u32: { + MIRIntConst *intConst = safe_cast(st->GetKonst()); + uint32 value = static_cast(intConst->GetValue().GetExtValue()); + objFuncEmitInfo.AppendTextData(&value, sizeof(value)); + break; + } + case PTY_f32: { + MIRFloatConst *floatConst = safe_cast(st->GetKonst()); + uint32 value = static_cast(floatConst->GetIntValue()); + objFuncEmitInfo.AppendTextData(&value, sizeof(value)); + break; + } + case PTY_f64: { + MIRDoubleConst *doubleConst = safe_cast(st->GetKonst()); + uint32 value = doubleConst->GetIntLow32(); + objFuncEmitInfo.AppendTextData(&value, sizeof(value)); + value = doubleConst->GetIntHigh32(); + objFuncEmitInfo.AppendTextData(&value, sizeof(value)); + break; + } + default: + break; + } + } + } +} + +void ObjEmitter::EmitStr16Const(ObjFuncEmitInfo &objFuncEmitInfo, const MIRSymbol &str16Symbol) +{ + MIRStr16Const *mirStr16Const = safe_cast(str16Symbol.GetKonst()); + const std::u16string &str16 = GlobalTables::GetU16StrTable().GetStringFromStrIdx(mirStr16Const->GetValue()); + + uint32 len = str16.length(); + for (uint32 i = 0; i < len; ++i) { + char16_t c = str16[i]; + objFuncEmitInfo.AppendTextData(&c, sizeof(c)); + } + if ((str16.length() & 0x1) == 1) { + uint16 value = 0; + objFuncEmitInfo.AppendTextData(&value, sizeof(value)); + } +} + +void ObjEmitter::EmitStrConst(ObjFuncEmitInfo &objFuncEmitInfo, const MIRSymbol &strSymbol) +{ + MIRStrConst *mirStrConst = safe_cast(strSymbol.GetKonst()); + + auto str = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirStrConst->GetValue()); + size_t size = str.length(); + /* 1 is tail 0 of the str string */ + objFuncEmitInfo.AppendTextData(str.c_str(), size + 1); +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/offset_adjust.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/offset_adjust.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fc9609108f1dce3cd560825b2a16eb250f972f0a --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/offset_adjust.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "offset_adjust.h" +#if TARGAARCH64 +#include "aarch64_offset_adjust.h" +#elif TARGRISCV64 +#include "riscv64_offset_adjust.h" +#endif +#if TARGARM32 +#include "arm32_offset_adjust.h" +#endif + +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; +bool CgFrameFinalize::PhaseRun(maplebe::CGFunc &f) +{ + FrameFinalize *offsetAdjustment = nullptr; +#if TARGAARCH64 || TARGRISCV64 + offsetAdjustment = GetPhaseAllocator()->New(f); +#endif +#if TARGARM32 + offsetAdjustment = GetPhaseAllocator()->New(f); +#endif + offsetAdjustment->Run(); + return false; +} + +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/operand.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/operand.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9f6e40ef3adfb0e7e4a4a4a6d51264b18742b7ca --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/operand.cpp @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "operand.h" +#include "common_utils.h" +#include "mpl_logging.h" + +namespace maplebe { +bool IsMoveWidableImmediate(uint64 val, uint32 bitLen) +{ + if (bitLen == k64BitSize) { + /* 0xHHHH000000000000 or 0x0000HHHH00000000, return true */ + if (((val & ((static_cast(0xffff)) << k48BitSize)) == val) || + ((val & ((static_cast(0xffff)) << k32BitSize)) == val)) { + return true; + } + } else { + /* get lower 32 bits */ + val &= static_cast(0xffffffff); + } + /* 0x00000000HHHH0000 or 0x000000000000HHHH, return true */ + return ((val & ((static_cast(0xffff)) << k16BitSize)) == val || + (val & ((static_cast(0xffff)) << 0)) == val); +} + +bool BetterUseMOVZ(uint64 val) +{ + int32 n16zerosChunks = 0; + int32 n16onesChunks = 0; + uint64 sa = 0; + /* a 64 bits number is split 4 chunks, each chunk has 16 bits. check each chunk whether is all 1 or is all 0 */ + for (uint64 i = 0; i < k4BitSize; ++i, sa += k16BitSize) { + uint64 chunkVal = (val >> (static_cast(sa))) & 0x0000FFFFUL; + if (chunkVal == 0) { + ++n16zerosChunks; + } else if (chunkVal == 0xFFFFUL) { + ++n16onesChunks; + } + } + /* + * note that since we already check if the value + * can be movable with as a single mov instruction, + * we should not exepct either n_16zeros_chunks>=3 or n_16ones_chunks>=3 + */ +#if DEBUG + constexpr uint32 kN16ChunksCheck = 2; + DEBUG_ASSERT(n16zerosChunks <= kN16ChunksCheck, "n16zerosChunks ERR"); + DEBUG_ASSERT(n16onesChunks <= kN16ChunksCheck, "n16onesChunks ERR"); +#endif + return (n16zerosChunks >= n16onesChunks); +} + +bool RegOperand::operator==(const RegOperand &o) const +{ + regno_t myRn = GetRegisterNumber(); + uint32 mySz = GetSize(); + uint32 myFl = regFlag; + regno_t otherRn = o.GetRegisterNumber(); + uint32 otherSz = o.GetSize(); + uint32 otherFl = o.regFlag; + + if (IsPhysicalRegister()) { + return (myRn == otherRn && mySz == otherSz && myFl == otherFl); + } + return (myRn == otherRn && mySz == otherSz); +} + +bool RegOperand::operator<(const RegOperand &o) const +{ + regno_t myRn = GetRegisterNumber(); + uint32 mySz = GetSize(); + uint32 myFl = regFlag; + regno_t otherRn = o.GetRegisterNumber(); + uint32 otherSz = o.GetSize(); + uint32 otherFl = o.regFlag; + return myRn < otherRn || (myRn == otherRn && mySz < otherSz) || + (myRn == otherRn && mySz == otherSz && myFl < otherFl); +} + +Operand *MemOperand::GetOffset() const +{ + switch (addrMode) { + case kAddrModeBOi: + return GetOffsetOperand(); + case kAddrModeBOrX: + return GetIndexRegister(); + case kAddrModeLiteral: + break; + case kAddrModeLo12Li: + break; + default: + DEBUG_ASSERT(false, "error memoperand dump"); + break; + } + return nullptr; +} + +bool MemOperand::Equals(Operand &op) const +{ + if (!op.IsMemoryAccessOperand()) { + return false; + } + return Equals(static_cast(op)); +} + +bool MemOperand::Equals(const MemOperand &op) const +{ + if (&op == this) { + return true; + } + + if (addrMode == op.GetAddrMode()) { + switch (addrMode) { + case kAddrModeBOi: + return (GetBaseRegister()->Equals(*op.GetBaseRegister()) && + GetOffsetImmediate()->Equals(*op.GetOffsetImmediate())); + case kAddrModeBOrX: + return (GetBaseRegister()->Equals(*op.GetBaseRegister()) && + GetIndexRegister()->Equals(*op.GetIndexRegister()) && + GetExtendAsString() == op.GetExtendAsString() && ShiftAmount() == op.ShiftAmount()); + case kAddrModeLiteral: + return GetSymbolName() == op.GetSymbolName(); + case kAddrModeLo12Li: + return (GetBaseRegister()->Equals(*op.GetBaseRegister()) && GetSymbolName() == op.GetSymbolName() && + GetOffsetImmediate()->Equals(*op.GetOffsetImmediate())); + default: + DEBUG_ASSERT(false, "error memoperand"); + break; + } + } + return false; +} + +bool MemOperand::Less(const Operand &right) const +{ + if (&right == this) { + return false; + } + + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + const MemOperand *rightOpnd = static_cast(&right); + if (addrMode != rightOpnd->addrMode) { + return addrMode < rightOpnd->addrMode; + } + + switch (addrMode) { + case kAddrModeBOi: { + DEBUG_ASSERT(idxOpt == kIntact, "Should not compare pre/post index addressing."); + + RegOperand *baseReg = GetBaseRegister(); + RegOperand *rbaseReg = rightOpnd->GetBaseRegister(); + int32 nRet = baseReg->RegCompare(*rbaseReg); + if (nRet == 0) { + Operand *ofstOpnd = GetOffsetOperand(); + const Operand *rofstOpnd = rightOpnd->GetOffsetOperand(); + return ofstOpnd->Less(*rofstOpnd); + } + return nRet < 0; + } + case kAddrModeBOrX: { + if (noExtend != rightOpnd->noExtend) { + return noExtend; + } + if (!noExtend && extend != rightOpnd->extend) { + return extend < rightOpnd->extend; + } + RegOperand *indexReg = GetIndexRegister(); + const RegOperand *rindexReg = rightOpnd->GetIndexRegister(); + return indexReg->Less(*rindexReg); + } + case kAddrModeLiteral: { + return static_cast(GetSymbol()) < static_cast(rightOpnd->GetSymbol()); + } + case kAddrModeLo12Li: { + if (GetSymbol() != rightOpnd->GetSymbol()) { + return static_cast(GetSymbol()) < static_cast(rightOpnd->GetSymbol()); + } + Operand *ofstOpnd = GetOffsetOperand(); + const Operand *rofstOpnd = rightOpnd->GetOffsetOperand(); + return ofstOpnd->Less(*rofstOpnd); + } + default: + DEBUG_ASSERT(false, "Internal error."); + return false; + } +} + +const char *CondOperand::ccStrs[kCcLast] = {"EQ", "NE", "CS", "HS", "CC", "LO", "MI", "PL", "VS", + "VC", "HI", "LS", "GE", "LT", "GT", "LE", "AL"}; + +bool CondOperand::Less(const Operand &right) const +{ + if (&right == this) { + return false; + } + + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + const CondOperand *rightOpnd = static_cast(&right); + + /* The same type. */ + if (cc == CC_AL || rightOpnd->cc == CC_AL) { + return false; + } + return cc < rightOpnd->cc; +} + +uint32 PhiOperand::GetLeastCommonValidBit() const +{ + uint32 leastCommonVb = 0; + for (auto phiOpnd : phiList) { + uint32 curVb = phiOpnd.second->GetValidBitsNum(); + if (curVb > leastCommonVb) { + leastCommonVb = curVb; + } + } + return leastCommonVb; +} +bool PhiOperand::IsRedundancy() const +{ + uint32 srcSsaIdx = 0; + for (auto phiOpnd : phiList) { + if (srcSsaIdx == 0) { + srcSsaIdx = phiOpnd.second->GetRegisterNumber(); + } + if (srcSsaIdx != phiOpnd.second->GetRegisterNumber()) { + return false; + } + } + return true; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/optimize_common.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/optimize_common.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c0e9d7b2972b11c319d1bbf229a35d8d1ded5858 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/optimize_common.cpp @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "optimize_common.h" +#include "cgbb.h" +#include "cg.h" +#include "cg_option.h" +#include "loop.h" +#include "securec.h" + +/* This file provides common class and function for cfgo and ico. */ +namespace maplebe { +void Optimizer::Run(const std::string &funcName, bool checkOnly) +{ + /* Initialize cfg optimization patterns */ + InitOptimizePatterns(); + + /* For each pattern, search cgFunc for optimization */ + for (OptimizationPattern *p : diffPassPatterns) { + p->Search2Op(checkOnly); + } + /* Search the cgFunc for multiple possible optimizations in one pass */ + if (!singlePassPatterns.empty()) { + BB *curBB = cgFunc->GetFirstBB(); + bool flag = false; + while (curBB != nullptr) { + for (OptimizationPattern *p : singlePassPatterns) { + if (p->Optimize(*curBB)) { + flag = p->IsKeepPosition(); + p->SetKeepPosition(false); + break; + } + } + + if (flag) { + flag = false; + } else { + curBB = curBB->GetNext(); + } + } + } + + if (CGOptions::IsDumpOptimizeCommonLog()) { + constexpr int arrSize = 80; + char post[arrSize]; + errno_t cpyRet = strcpy_s(post, arrSize, "post-"); + CHECK_FATAL(cpyRet == EOK, "call strcpy_s failed"); + errno_t catRes = strcat_s(post, arrSize, name); + CHECK_FATAL(catRes == EOK, "call strcat_s failed "); + OptimizeLogger::GetLogger().Print(funcName); + } + OptimizeLogger::GetLogger().ClearLocal(); +} + +void OptimizationPattern::Search2Op(bool noOptimize) +{ + checkOnly = noOptimize; + BB *curBB = cgFunc->GetFirstBB(); + while (curBB != nullptr) { + bool changed = false; + do { + changed = Optimize(*curBB); + } while (changed); + if (keepPosition) { + keepPosition = false; + } else { + curBB = curBB->GetNext(); + } + } +} + +void OptimizationPattern::Log(uint32 bbID) +{ + OptimizeLogger::GetLogger().Log(patternName.c_str()); + DotGenerator::SetColor(bbID, dotColor.c_str()); +} + +std::map DotGenerator::coloringMap; + +void DotGenerator::SetColor(uint32 bbID, const std::string &color) +{ + coloringMap[bbID] = color; +} + +std::string DotGenerator::GetFileName(const MIRModule &mirModule, const std::string &filePreFix) +{ + std::string fileName; + if (!filePreFix.empty()) { + fileName.append(filePreFix); + fileName.append("-"); + } + fileName.append(mirModule.GetFileName()); + for (uint32 i = 0; i < fileName.length(); i++) { + if (fileName[i] == ';' || fileName[i] == '/' || fileName[i] == '|') { + fileName[i] = '_'; + } + } + + fileName.append(".dot"); + return fileName; +} + +static bool IsBackEdgeForLoop(const CGFuncLoops &loop, const BB &from, const BB &to) +{ + const BB *header = loop.GetHeader(); + if (header->GetId() == to.GetId()) { + for (auto *be : loop.GetBackedge()) { + if (be->GetId() == from.GetId()) { + return true; + } + } + } + for (auto *inner : loop.GetInnerLoops()) { + if (IsBackEdgeForLoop(*inner, from, to)) { + return true; + } + } + return false; +} +bool DotGenerator::IsBackEdge(const CGFunc &cgFunction, const BB &from, const BB &to) +{ + for (const auto *loop : cgFunction.GetLoops()) { + if (IsBackEdgeForLoop(*loop, from, to)) { + return true; + } + } + return false; +} + +void DotGenerator::DumpEdge(const CGFunc &cgFunction, std::ofstream &cfgFileOfStream, bool isIncludeEH) +{ + FOR_ALL_BB_CONST(bb, &cgFunction) { + for (auto *succBB : bb->GetSuccs()) { + cfgFileOfStream << "BB" << bb->GetId(); + cfgFileOfStream << " -> " + << "BB" << succBB->GetId(); + if (IsBackEdge(cgFunction, *bb, *succBB)) { + cfgFileOfStream << " [color=red]"; + } else { + cfgFileOfStream << " [color=green]"; + } + cfgFileOfStream << ";\n"; + } + if (isIncludeEH) { + for (auto *ehSuccBB : bb->GetEhSuccs()) { + cfgFileOfStream << "BB" << bb->GetId(); + cfgFileOfStream << " -> " + << "BB" << ehSuccBB->GetId(); + cfgFileOfStream << "[color=red]"; + cfgFileOfStream << ";\n"; + } + } + } +} + +bool DotGenerator::FoundListOpndRegNum(ListOperand &listOpnd, const Insn &insnObj, regno_t vReg) +{ + bool exist = false; + for (auto op : listOpnd.GetOperands()) { + RegOperand *regOpnd = static_cast(op); + if (op->IsRegister() && regOpnd->GetRegisterNumber() == vReg) { + LogInfo::MapleLogger() << "BB" << insnObj.GetBB()->GetId() << " [style=filled, fillcolor=red];\n"; + exist = true; + break; + } + } + return exist; +} + +bool DotGenerator::FoundMemAccessOpndRegNum(const MemOperand &memOperand, const Insn &insnObj, regno_t vReg) +{ + Operand *base = memOperand.GetBaseRegister(); + Operand *offset = memOperand.GetIndexRegister(); + bool exist = false; + if (base != nullptr && base->IsRegister()) { + RegOperand *regOpnd = static_cast(base); + if (regOpnd->GetRegisterNumber() == vReg) { + LogInfo::MapleLogger() << "BB" << insnObj.GetBB()->GetId() << " [style=filled, fillcolor=red];\n"; + exist = true; + } + } else if (offset != nullptr && offset->IsRegister()) { + RegOperand *regOpnd = static_cast(offset); + if (regOpnd->GetRegisterNumber() == vReg) { + LogInfo::MapleLogger() << "BB" << insnObj.GetBB()->GetId() << " [style=filled, fillcolor=red];\n"; + exist = true; + } + } + return exist; +} + +bool DotGenerator::FoundNormalOpndRegNum(const RegOperand ®Opnd, const Insn &insnObj, regno_t vReg) +{ + bool exist = false; + if (regOpnd.GetRegisterNumber() == vReg) { + LogInfo::MapleLogger() << "BB" << insnObj.GetBB()->GetId() << " [style=filled, fillcolor=red];\n"; + exist = true; + } + return exist; +} + +void DotGenerator::DumpBBInstructions(const CGFunc &cgFunction, regno_t vReg, std::ofstream &cfgFile) +{ + FOR_ALL_BB_CONST(bb, &cgFunction) { + if (vReg != 0) { + FOR_BB_INSNS_CONST(insn, bb) { + bool found = false; + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + found = FoundListOpndRegNum(listOpnd, *insn, vReg); + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + found = FoundMemAccessOpndRegNum(memOpnd, *insn, vReg); + } else { + if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + found = FoundNormalOpndRegNum(regOpnd, *insn, vReg); + } + } + if (found) { + break; + } + } + if (found) { + break; + } + } + } + cfgFile << "BB" << bb->GetId() << "["; + auto it = coloringMap.find(bb->GetId()); + if (it != coloringMap.end()) { + cfgFile << "style=filled,fillcolor=" << it->second << ","; + } + if (bb->GetKind() == BB::kBBIf) { + cfgFile << "shape=diamond,label= \" BB" << bb->GetId() << ":\n"; + } else { + cfgFile << "shape=box,label= \" BB" << bb->GetId() << ":\n"; + } + cfgFile << "{ "; + cfgFile << bb->GetKindName() << "\n"; + cfgFile << bb->GetFrequency() << "\n"; + if (bb->GetLabIdx() != 0) { + cfgFile << "LabIdx=" << bb->GetLabIdx() << "\n"; + } + cfgFile << "}\"];\n"; + } +} + +/* Generate dot file for cfg */ +void DotGenerator::GenerateDot(const std::string &preFix, const CGFunc &cgFunc, const MIRModule &mod, bool includeEH, + const std::string fname, regno_t vReg) +{ + std::ofstream cfgFile; + std::streambuf *coutBuf = std::cout.rdbuf(); /* keep original cout buffer */ + std::streambuf *buf = cfgFile.rdbuf(); + std::cout.rdbuf(buf); + std::string fileName = GetFileName(mod, (preFix + "-" + fname)); + + cfgFile.open(fileName, std::ios::trunc); + CHECK_FATAL(cfgFile.is_open(), "Failed to open output file: %s", fileName.c_str()); + cfgFile << "digraph {\n"; + /* dump edge */ + DumpEdge(cgFunc, cfgFile, includeEH); + + /* dump instruction in each BB */ + DumpBBInstructions(cgFunc, vReg, cfgFile); + + cfgFile << "}\n"; + coloringMap.clear(); + cfgFile.flush(); + cfgFile.close(); + std::cout.rdbuf(coutBuf); +} + +void OptimizeLogger::Print(const std::string &funcName) +{ + if (!localStat.empty()) { + LogInfo::MapleLogger() << funcName << '\n'; + for (const auto &localStatPair : localStat) { + LogInfo::MapleLogger() << "Optimized " << localStatPair.first << ":" << localStatPair.second << "\n"; + } + + ClearLocal(); + LogInfo::MapleLogger() << "Total:" << '\n'; + for (const auto &globalStatPair : globalStat) { + LogInfo::MapleLogger() << "Optimized " << globalStatPair.first << ":" << globalStatPair.second << "\n"; + } + } +} + +void OptimizeLogger::Log(const std::string &patternName) +{ + auto itemInGlobal = globalStat.find(patternName); + if (itemInGlobal != globalStat.end()) { + itemInGlobal->second++; + } else { + (void)globalStat.emplace(std::pair(patternName, 1)); + } + auto itemInLocal = localStat.find(patternName); + if (itemInLocal != localStat.end()) { + itemInLocal->second++; + } else { + (void)localStat.emplace(std::pair(patternName, 1)); + } +} + +void OptimizeLogger::ClearLocal() +{ + localStat.clear(); +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/peep.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/peep.cpp new file mode 100644 index 0000000000000000000000000000000000000000..59784e05752f82b6d9b929c88391a93c26ec2c43 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/peep.cpp @@ -0,0 +1,768 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "peep.h" +#include "cg.h" +#include "mpl_logging.h" +#include "common_utils.h" +#if TARGAARCH64 +#include "aarch64_peep.h" +#elif TARGRISCV64 +#include "riscv64_peep.h" +#elif defined TARGX86_64 +#include "x64_peep.h" +#endif +#if TARGARM32 +#include "arm32_peep.h" +#endif + +namespace maplebe { +#if TARGAARCH64 +bool CGPeepPattern::IsCCRegCrossVersion(Insn &startInsn, Insn &endInsn, const RegOperand &ccReg) +{ + if (startInsn.GetBB() != endInsn.GetBB()) { + return true; + } + CHECK_FATAL(ssaInfo != nullptr, "must have ssaInfo"); + CHECK_FATAL(ccReg.IsSSAForm(), "cc reg must be ssa form"); + for (auto *curInsn = startInsn.GetNext(); curInsn != nullptr && curInsn != &endInsn; curInsn = curInsn->GetNext()) { + if (!curInsn->IsMachineInstruction()) { + continue; + } + if (curInsn->IsCall()) { + return true; + } + uint32 opndNum = curInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = curInsn->GetOperand(i); + if (!opnd.IsRegister()) { + continue; + } + auto ®Opnd = static_cast(opnd); + if (!curInsn->IsRegDefined(regOpnd.GetRegisterNumber())) { + continue; + } + if (static_cast(opnd).IsOfCC()) { + VRegVersion *ccVersion = ssaInfo->FindSSAVersion(ccReg.GetRegisterNumber()); + VRegVersion *curCCVersion = ssaInfo->FindSSAVersion(regOpnd.GetRegisterNumber()); + CHECK_FATAL(ccVersion != nullptr && curCCVersion != nullptr, + "RegVersion must not be null based on ssa"); + CHECK_FATAL(!ccVersion->IsDeleted() && !curCCVersion->IsDeleted(), "deleted version"); + if (ccVersion->GetVersionIdx() != curCCVersion->GetVersionIdx()) { + return true; + } + } + } + } + return false; +} + +int64 CGPeepPattern::GetLogValueAtBase2(int64 val) const +{ + return (__builtin_popcountll(static_cast(val)) == 1) ? (__builtin_ffsll(val) - 1) : -1; +} + +InsnSet CGPeepPattern::GetAllUseInsn(const RegOperand &defReg) +{ + InsnSet allUseInsn; + if ((ssaInfo != nullptr) && defReg.IsSSAForm()) { + VRegVersion *defVersion = ssaInfo->FindSSAVersion(defReg.GetRegisterNumber()); + CHECK_FATAL(defVersion != nullptr, "useVRegVersion must not be null based on ssa"); + for (auto insnInfo : defVersion->GetAllUseInsns()) { + Insn *secondInsn = insnInfo.second->GetInsn(); + allUseInsn.emplace(secondInsn); + } + } + return allUseInsn; +} + +Insn *CGPeepPattern::GetDefInsn(const RegOperand &useReg) +{ + if (!useReg.IsSSAForm()) { + return nullptr; + } + regno_t useRegNO = useReg.GetRegisterNumber(); + VRegVersion *useVersion = ssaInfo->FindSSAVersion(useRegNO); + DEBUG_ASSERT(useVersion != nullptr, "useVRegVersion must not be null based on ssa"); + CHECK_FATAL(!useVersion->IsDeleted(), "deleted version"); + DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); + return defInfo == nullptr ? nullptr : defInfo->GetInsn(); +} + +void CGPeepPattern::DumpAfterPattern(std::vector &prevInsns, const Insn *replacedInsn, const Insn *newInsn) +{ + LogInfo::MapleLogger() << ">>>>>>> In " << GetPatternName() << " : <<<<<<<\n"; + if (!prevInsns.empty()) { + if ((replacedInsn == nullptr) && (newInsn == nullptr)) { + LogInfo::MapleLogger() << "======= RemoveInsns : {\n"; + } else { + LogInfo::MapleLogger() << "======= PrevInsns : {\n"; + } + for (auto *prevInsn : prevInsns) { + if (prevInsn != nullptr) { + LogInfo::MapleLogger() << "[primal form] "; + prevInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*prevInsn); + } + } + } + LogInfo::MapleLogger() << "}\n"; + } + if (replacedInsn != nullptr) { + LogInfo::MapleLogger() << "======= OldInsn :\n"; + LogInfo::MapleLogger() << "[primal form] "; + replacedInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*replacedInsn); + } + } + if (newInsn != nullptr) { + LogInfo::MapleLogger() << "======= NewInsn :\n"; + LogInfo::MapleLogger() << "[primal form] "; + newInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*newInsn); + } + } +} + +/* Check if a regOpnd is live after insn. True if live, otherwise false. */ +bool CGPeepPattern::IfOperandIsLiveAfterInsn(const RegOperand ®Opnd, Insn &insn) +{ + for (Insn *nextInsn = insn.GetNext(); nextInsn != nullptr; nextInsn = nextInsn->GetNext()) { + if (!nextInsn->IsMachineInstruction()) { + continue; + } + int32 lastOpndId = static_cast(nextInsn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = nextInsn->GetOperand(static_cast(i)); + if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr && base->IsRegister()) { + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + } else if (opnd.IsList()) { + auto &opndList = static_cast(opnd).GetOperands(); + if (find(opndList.begin(), opndList.end(), ®Opnd) != opndList.end()) { + return true; + } + } + + if (!opnd.IsRegister()) { + continue; + } + auto &tmpRegOpnd = static_cast(opnd); + if (opnd.IsRegister() && tmpRegOpnd.GetRegisterNumber() != regOpnd.GetRegisterNumber()) { + continue; + } + const InsnDesc *md = nextInsn->GetDesc(); + auto *regProp = (md->opndMD[static_cast(i)]); + bool isUse = regProp->IsUse(); + /* if noUse Redefined, no need to check live-out. */ + return isUse; + } + } + /* Check if it is live-out. */ + return FindRegLiveOut(regOpnd, *insn.GetBB()); +} + +/* entrance for find if a regOpnd is live-out. */ +bool CGPeepPattern::FindRegLiveOut(const RegOperand ®Opnd, const BB &bb) +{ + /* + * Each time use peephole, index is initialized by the constructor, + * and the internal_flags3 should be cleared. + */ + if (PeepOptimizer::index == 0) { + FOR_ALL_BB(currbb, cgFunc) { + currbb->SetInternalFlag3(0); + } + } + /* before each invoke check function, increase index. */ + ++PeepOptimizer::index; + return CheckOpndLiveinSuccs(regOpnd, bb); +} + +/* Check regOpnd in succs/ehSuccs. True is live-out, otherwise false. */ +bool CGPeepPattern::CheckOpndLiveinSuccs(const RegOperand ®Opnd, const BB &bb) const +{ + for (auto succ : bb.GetSuccs()) { + DEBUG_ASSERT(succ->GetInternalFlag3() <= PeepOptimizer::index, "internal error."); + if (succ->GetInternalFlag3() == PeepOptimizer::index) { + continue; + } + succ->SetInternalFlag3(PeepOptimizer::index); + ReturnType result = IsOpndLiveinBB(regOpnd, *succ); + if (result == kResNotFind) { + if (CheckOpndLiveinSuccs(regOpnd, *succ)) { + return true; + } + continue; + } else if (result == kResUseFirst) { + return true; + } else if (result == kResDefFirst) { + continue; + } + } + for (auto ehSucc : bb.GetEhSuccs()) { + DEBUG_ASSERT(ehSucc->GetInternalFlag3() <= PeepOptimizer::index, "internal error."); + if (ehSucc->GetInternalFlag3() == PeepOptimizer::index) { + continue; + } + ehSucc->SetInternalFlag3(PeepOptimizer::index); + ReturnType result = IsOpndLiveinBB(regOpnd, *ehSucc); + if (result == kResNotFind) { + if (CheckOpndLiveinSuccs(regOpnd, *ehSucc)) { + return true; + } + continue; + } else if (result == kResUseFirst) { + return true; + } else if (result == kResDefFirst) { + continue; + } + } + return CheckRegLiveinReturnBB(regOpnd, bb); +} + +/* Check if the reg is used in return BB */ +bool CGPeepPattern::CheckRegLiveinReturnBB(const RegOperand ®Opnd, const BB &bb) const +{ +#if TARGAARCH64 || TARGRISCV64 + if (bb.GetKind() == BB::kBBReturn) { + regno_t regNO = regOpnd.GetRegisterNumber(); + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyVary) { + return false; + } + PrimType returnType = cgFunc->GetFunction().GetReturnType()->GetPrimType(); + regno_t returnReg = R0; + if (IsPrimitiveFloat(returnType)) { + returnReg = V0; + } else if (IsPrimitiveInteger(returnType)) { + returnReg = R0; + } + if (regNO == returnReg) { + return true; + } + } +#endif + return false; +} + +/* + * Check regNO in current bb: + * kResUseFirst:first find use point; kResDefFirst:first find define point; + * kResNotFind:cannot find regNO, need to continue searching. + */ +ReturnType CGPeepPattern::IsOpndLiveinBB(const RegOperand ®Opnd, const BB &bb) const +{ + FOR_BB_INSNS_CONST(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + const InsnDesc *md = insn->GetDesc(); + int32 lastOpndId = static_cast(insn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = insn->GetOperand(static_cast(i)); + auto *regProp = (md->opndMD[static_cast(i)]); + if (opnd.IsConditionCode()) { + if (regOpnd.GetRegisterNumber() == kRFLAG) { + bool isUse = regProp->IsUse(); + if (isUse) { + return kResUseFirst; + } + DEBUG_ASSERT(regProp->IsDef(), "register should be redefined."); + return kResDefFirst; + } + } else if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + if (insn->GetMachineOpcode() == MOP_asm) { + if (static_cast(i) == kAsmOutputListOpnd || static_cast(i) == kAsmClobberListOpnd) { + for (auto op : listOpnd.GetOperands()) { + if (op->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResDefFirst; + } + } + continue; + } else if (static_cast(i) != kAsmInputListOpnd) { + continue; + } + /* fall thru for kAsmInputListOpnd */ + } + for (auto op : listOpnd.GetOperands()) { + if (op->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr) { + DEBUG_ASSERT(base->IsRegister(), "internal error."); + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + } else if (opnd.IsRegister()) { + auto &tmpRegOpnd = static_cast(opnd); + if (tmpRegOpnd.GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + bool isUse = regProp->IsUse(); + if (isUse) { + return kResUseFirst; + } + DEBUG_ASSERT(regProp->IsDef(), "register should be redefined."); + return kResDefFirst; + } + } + } + } + return kResNotFind; +} + +int PeepPattern::logValueAtBase2(int64 val) const +{ + return (__builtin_popcountll(static_cast(val)) == 1) ? (__builtin_ffsll(val) - 1) : (-1); +} + +/* Check if a regOpnd is live after insn. True if live, otherwise false. */ +bool PeepPattern::IfOperandIsLiveAfterInsn(const RegOperand ®Opnd, Insn &insn) +{ + for (Insn *nextInsn = insn.GetNext(); nextInsn != nullptr; nextInsn = nextInsn->GetNext()) { + if (!nextInsn->IsMachineInstruction()) { + continue; + } + int32 lastOpndId = static_cast(nextInsn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = nextInsn->GetOperand(static_cast(i)); + if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr && base->IsRegister()) { + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + } else if (opnd.IsList()) { + auto &opndList = static_cast(opnd).GetOperands(); + if (find(opndList.begin(), opndList.end(), ®Opnd) != opndList.end()) { + return true; + } + } + + if (!opnd.IsRegister()) { + continue; + } + auto &tmpRegOpnd = static_cast(opnd); + if (opnd.IsRegister() && tmpRegOpnd.GetRegisterNumber() != regOpnd.GetRegisterNumber()) { + continue; + } + const InsnDesc *md = nextInsn->GetDesc(); + auto *regProp = (md->opndMD[static_cast(i)]); + bool isUse = regProp->IsUse(); + /* if noUse Redefined, no need to check live-out. */ + return isUse; + } + } + /* Check if it is live-out. */ + return FindRegLiveOut(regOpnd, *insn.GetBB()); +} + +/* entrance for find if a regOpnd is live-out. */ +bool PeepPattern::FindRegLiveOut(const RegOperand ®Opnd, const BB &bb) +{ + /* + * Each time use peephole, index is initialized by the constructor, + * and the internal_flags3 should be cleared. + */ + if (PeepOptimizer::index == 0) { + FOR_ALL_BB(currbb, &cgFunc) { + currbb->SetInternalFlag3(0); + } + } + /* before each invoke check function, increase index. */ + ++PeepOptimizer::index; + return CheckOpndLiveinSuccs(regOpnd, bb); +} + +/* Check regOpnd in succs/ehSuccs. True is live-out, otherwise false. */ +bool PeepPattern::CheckOpndLiveinSuccs(const RegOperand ®Opnd, const BB &bb) const +{ + for (auto succ : bb.GetSuccs()) { + DEBUG_ASSERT(succ->GetInternalFlag3() <= PeepOptimizer::index, "internal error."); + if (succ->GetInternalFlag3() == PeepOptimizer::index) { + continue; + } + succ->SetInternalFlag3(PeepOptimizer::index); + ReturnType result = IsOpndLiveinBB(regOpnd, *succ); + if (result == kResNotFind) { + if (CheckOpndLiveinSuccs(regOpnd, *succ)) { + return true; + } + continue; + } else if (result == kResUseFirst) { + return true; + } else if (result == kResDefFirst) { + continue; + } + } + for (auto ehSucc : bb.GetEhSuccs()) { + DEBUG_ASSERT(ehSucc->GetInternalFlag3() <= PeepOptimizer::index, "internal error."); + if (ehSucc->GetInternalFlag3() == PeepOptimizer::index) { + continue; + } + ehSucc->SetInternalFlag3(PeepOptimizer::index); + ReturnType result = IsOpndLiveinBB(regOpnd, *ehSucc); + if (result == kResNotFind) { + if (CheckOpndLiveinSuccs(regOpnd, *ehSucc)) { + return true; + } + continue; + } else if (result == kResUseFirst) { + return true; + } else if (result == kResDefFirst) { + continue; + } + } + return CheckRegLiveinReturnBB(regOpnd, bb); +} + +/* Check if the reg is used in return BB */ +bool PeepPattern::CheckRegLiveinReturnBB(const RegOperand ®Opnd, const BB &bb) const +{ +#if TARGAARCH64 || TARGRISCV64 + if (bb.GetKind() == BB::kBBReturn) { + regno_t regNO = regOpnd.GetRegisterNumber(); + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyVary) { + return false; + } + PrimType returnType = cgFunc.GetFunction().GetReturnType()->GetPrimType(); + regno_t returnReg = R0; + if (IsPrimitiveFloat(returnType)) { + returnReg = V0; + } else if (IsPrimitiveInteger(returnType)) { + returnReg = R0; + } + if (regNO == returnReg) { + return true; + } + } +#endif + return false; +} + +/* + * Check regNO in current bb: + * kResUseFirst:first find use point; kResDefFirst:first find define point; + * kResNotFind:cannot find regNO, need to continue searching. + */ +ReturnType PeepPattern::IsOpndLiveinBB(const RegOperand ®Opnd, const BB &bb) const +{ + FOR_BB_INSNS_CONST(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + const InsnDesc *md = insn->GetDesc(); + int32 lastOpndId = static_cast(insn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = insn->GetOperand(static_cast(i)); + auto *regProp = (md->opndMD[static_cast(i)]); + if (opnd.IsConditionCode()) { + if (regOpnd.GetRegisterNumber() == kRFLAG) { + bool isUse = regProp->IsUse(); + if (isUse) { + return kResUseFirst; + } + DEBUG_ASSERT(regProp->IsDef(), "register should be redefined."); + return kResDefFirst; + } + } else if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + if (insn->GetMachineOpcode() == MOP_asm) { + if (static_cast(i) == kAsmOutputListOpnd || static_cast(i) == kAsmClobberListOpnd) { + for (auto op : listOpnd.GetOperands()) { + if (op->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResDefFirst; + } + } + continue; + } else if (static_cast(i) != kAsmInputListOpnd) { + continue; + } + /* fall thru for kAsmInputListOpnd */ + } + for (auto op : listOpnd.GetOperands()) { + if (op->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr) { + DEBUG_ASSERT(base->IsRegister(), "internal error."); + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + } else if (opnd.IsRegister()) { + auto &tmpRegOpnd = static_cast(opnd); + if (tmpRegOpnd.GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + bool isUse = regProp->IsUse(); + if (isUse) { + return kResUseFirst; + } + DEBUG_ASSERT(regProp->IsDef(), "register should be redefined."); + return kResDefFirst; + } + } + } + } + return kResNotFind; +} + +bool PeepPattern::IsMemOperandOptPattern(const Insn &insn, Insn &nextInsn) +{ + /* Check if base register of nextInsn and the dest operand of insn are identical. */ + auto *memOpnd = static_cast(nextInsn.GetMemOpnd()); + DEBUG_ASSERT(memOpnd != nullptr, "null ptr check"); + /* Only for AddrMode_B_OI addressing mode. */ + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { + return false; + } + /* Only for immediate is 0. */ + if (memOpnd->GetOffsetImmediate()->GetOffsetValue() != 0) { + return false; + } + /* Only for intact memory addressing. */ + if (!memOpnd->IsIntactIndexed()) { + return false; + } + + auto &oldBaseOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + /* Check if dest operand of insn is idential with base register of nextInsn. */ + if (memOpnd->GetBaseRegister() != &oldBaseOpnd) { + return false; + } + +#ifdef USE_32BIT_REF + if (nextInsn.IsAccessRefField() && nextInsn.GetOperand(kInsnFirstOpnd).GetSize() > k32BitSize) { + return false; + } +#endif + /* Check if x0 is used after ldr insn, and if it is in live-out. */ + if (IfOperandIsLiveAfterInsn(oldBaseOpnd, nextInsn)) { + return false; + } + return true; +} + +template +void PeepOptimizer::Run() +{ + auto *patterMatcher = peepOptMemPool->New(cgFunc, peepOptMemPool); + patterMatcher->InitOpts(); + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + patterMatcher->Run(*bb, *insn); + } + } +} + +int32 PeepOptimizer::index = 0; + +void PeepHoleOptimizer::Peephole0() +{ + auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); + PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); +#if TARGAARCH64 || TARGRISCV64 + peepOptimizer.Run(); +#endif +#if TARGARM32 + peepOptimizer.Run(); +#endif +} + +void PeepHoleOptimizer::PeepholeOpt() +{ + auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); + PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); +#if TARGAARCH64 || TARGRISCV64 + peepOptimizer.Run(); +#endif +#if TARGARM32 + peepOptimizer.Run(); +#endif +} + +void PeepHoleOptimizer::PrePeepholeOpt() +{ + auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); + PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); +#if TARGAARCH64 || TARGRISCV64 + peepOptimizer.Run(); +#endif +#if TARGARM32 + peepOptimizer.Run(); +#endif +} + +void PeepHoleOptimizer::PrePeepholeOpt1() +{ + auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); + PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); +#if TARGAARCH64 || TARGRISCV64 + peepOptimizer.Run(); +#endif +#if TARGARM32 + peepOptimizer.Run(); +#endif +} + +/* === SSA form === */ +bool CgPeepHole::PhaseRun(maplebe::CGFunc &f) +{ + CGSSAInfo *cgssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + CHECK_FATAL((cgssaInfo != nullptr), "Get ssaInfo failed!"); + MemPool *mp = GetPhaseMemPool(); + auto *cgpeep = mp->New(f, mp, cgssaInfo); + CHECK_FATAL((cgpeep != nullptr), "Creat AArch64CGPeepHole failed!"); + cgpeep->Run(); + return false; +} + +void CgPeepHole::GetAnalysisDependence(AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPeepHole, cgpeephole) +#endif +/* === Physical Pre Form === */ +bool CgPrePeepHole::PhaseRun(maplebe::CGFunc &f) +{ + MemPool *mp = GetPhaseMemPool(); +#if defined TARGAARCH64 + auto *cgpeep = mp->New(f, mp); +#elif defined TARGX86_64 + auto *cgpeep = mp->New(f, mp); +#endif + CHECK_FATAL(cgpeep != nullptr, "PeepHoleOptimizer instance create failure"); + cgpeep->Run(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPrePeepHole, cgprepeephole) + +/* === Physical Post Form === */ +bool CgPostPeepHole::PhaseRun(maplebe::CGFunc &f) +{ + MemPool *mp = GetPhaseMemPool(); +#if defined TARGAARCH64 + auto *cgpeep = mp->New(f, mp); +#elif defined TARGX86_64 + auto *cgpeep = mp->New(f, mp); +#endif + CHECK_FATAL(cgpeep != nullptr, "PeepHoleOptimizer instance create failure"); + cgpeep->Run(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPostPeepHole, cgpostpeephole) + +#if TARGAARCH64 +bool CgPrePeepHole0::PhaseRun(maplebe::CGFunc &f) +{ + auto *peep = GetPhaseMemPool()->New(&f); + CHECK_FATAL(peep != nullptr, "PeepHoleOptimizer instance create failure"); + peep->PrePeepholeOpt(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPrePeepHole0, prepeephole) + +bool CgPrePeepHole1::PhaseRun(maplebe::CGFunc &f) +{ + auto *peep = GetPhaseMemPool()->New(&f); + CHECK_FATAL(peep != nullptr, "PeepHoleOptimizer instance create failure"); + peep->PrePeepholeOpt1(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPrePeepHole1, prepeephole1) + +bool CgPeepHole0::PhaseRun(maplebe::CGFunc &f) +{ + auto *peep = GetPhaseMemPool()->New(&f); + CHECK_FATAL(peep != nullptr, "PeepHoleOptimizer instance create failure"); + peep->Peephole0(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPeepHole0, peephole0) + +bool CgPeepHole1::PhaseRun(maplebe::CGFunc &f) +{ + auto *peep = GetPhaseMemPool()->New(&f); + CHECK_FATAL(peep != nullptr, "PeepHoleOptimizer instance create failure"); + peep->PeepholeOpt(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPeepHole1, peephole) +#endif + +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/pressure.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/pressure.cpp new file mode 100644 index 0000000000000000000000000000000000000000..94db0144963e230d9b68deafca9db2758f08cbe2 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/pressure.cpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "pressure.h" +#if TARGAARCH64 +#include "aarch64_schedule.h" +#elif TARGRISCV64 +#include "riscv64_schedule.h" +#endif +#include "deps.h" + +namespace maplebe { +/* ------- RegPressure function -------- */ +int32 RegPressure::maxRegClassNum = 0; + +/* print regpressure information */ +void RegPressure::DumpRegPressure() const +{ + PRINT_STR_VAL("Priority: ", priority); + PRINT_STR_VAL("maxDepth: ", maxDepth); + PRINT_STR_VAL("near: ", near); + PRINT_STR_VAL("callNum: ", callNum); + + LogInfo::MapleLogger() << "\n"; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/proepilog.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/proepilog.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d146bc3b74f9b3b8bb0f1bdc93f862731bf8b48e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/proepilog.cpp @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "proepilog.h" +#if TARGAARCH64 +#include "aarch64_proepilog.h" +#elif TARGRISCV64 +#include "riscv64_proepilog.h" +#endif +#if TARGARM32 +#include "arm32_proepilog.h" +#endif +#if TARGX86_64 +#include "x64_proepilog.h" +#endif +#include "cgfunc.h" +#include "cg.h" + +namespace maplebe { +using namespace maple; + +Insn *GenProEpilog::InsertCFIDefCfaOffset(int32 &cfiOffset, Insn &insertAfter) +{ + if (!cgFunc.GenCfi()) { + return &insertAfter; + } + cfiOffset = AddtoOffsetFromCFA(cfiOffset); + Insn &cfiInsn = cgFunc.GetInsnBuilder() + ->BuildCfiInsn(cfi::OP_CFI_def_cfa_offset) + .AddOpndChain(cgFunc.CreateCfiImmOperand(cfiOffset, k64BitSize)); + Insn *newIPoint = cgFunc.GetCurBB()->InsertInsnAfter(insertAfter, cfiInsn); + cgFunc.SetDbgCallFrameOffset(cfiOffset); + return newIPoint; +} + +/* there are two stack protector: + * 1. stack protector all: for all function + * 2. stack protector strong: for some functon that + * <1> invoke alloca functon; + * <2> use stack address; + * <3> callee use return stack slot; + * <4> local symbol is vector type; + * */ +void GenProEpilog::NeedStackProtect() +{ + DEBUG_ASSERT(stackProtect == false, "no stack protect default"); + CG *currCG = cgFunc.GetCG(); + if (currCG->IsStackProtectorAll()) { + stackProtect = true; + return; + } + + if (!currCG->IsStackProtectorStrong()) { + return; + } + + if (cgFunc.HasAlloca()) { + stackProtect = true; + return; + } + + /* check if function use stack address or callee function return stack slot */ + auto stackProtectInfo = cgFunc.GetStackProtectInfo(); + if ((stackProtectInfo & kAddrofStack) != 0 || (stackProtectInfo & kRetureStackSlot) != 0) { + stackProtect = true; + return; + } + + /* check if local symbol is vector type */ + auto &mirFunction = cgFunc.GetFunction(); + uint32 symTabSize = static_cast(mirFunction.GetSymTab()->GetSymbolTableSize()); + for (uint32 i = 0; i < symTabSize; ++i) { + MIRSymbol *symbol = mirFunction.GetSymTab()->GetSymbolFromStIdx(i); + if (symbol == nullptr || symbol->GetStorageClass() != kScAuto || symbol->IsDeleted()) { + continue; + } + TyIdx tyIdx = symbol->GetTyIdx(); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (type->GetKind() == kTypeArray) { + stackProtect = true; + return; + } + + if (type->IsStructType() && IncludeArray(*type)) { + stackProtect = true; + return; + } + } +} + +bool GenProEpilog::IncludeArray(const MIRType &type) const +{ + DEBUG_ASSERT(type.IsStructType(), "agg must be one of class/struct/union"); + auto &structType = static_cast(type); + /* all elements of struct. */ + auto num = static_cast(structType.GetFieldsSize()); + for (uint32 i = 0; i < num; ++i) { + MIRType *elemType = structType.GetElemType(i); + if (elemType->GetKind() == kTypeArray) { + return true; + } + if (elemType->IsStructType() && IncludeArray(*elemType)) { + return true; + } + } + return false; +} + +bool CgGenProEpiLog::PhaseRun(maplebe::CGFunc &f) +{ + GenProEpilog *genPE = nullptr; +#if TARGAARCH64 || TARGRISCV64 + genPE = GetPhaseAllocator()->New(f, *ApplyTempMemPool()); +#endif +#if TARGARM32 + genPE = GetPhaseAllocator()->New(f); +#endif +#if TARGX86_64 + genPE = GetPhaseAllocator()->New(f); +#endif + genPE->Run(); + return false; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/ra_opt.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/ra_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1343f63cccfea6ca61e539f53e4d968a75080c2c --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/ra_opt.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cgfunc.h" +#if TARGAARCH64 +#include "aarch64_ra_opt.h" +#elif TARGRISCV64 +#include "riscv64_ra_opt.h" +#endif + +namespace maplebe { +using namespace maple; + +bool CgRaOpt::PhaseRun(maplebe::CGFunc &f) +{ + MemPool *memPool = GetPhaseMemPool(); + RaOpt *raOpt = nullptr; +#if TARGAARCH64 + raOpt = memPool->New(f, *memPool); +#elif || TARGRISCV64 + raOpt = memPool->New(f, *memPool); +#endif + + if (raOpt) { + raOpt->Run(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgRaOpt, raopt) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/reaching.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/reaching.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0d5a5ecb380ecc13b4b0aaf685e91a4ad222d031 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/reaching.cpp @@ -0,0 +1,1454 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if TARGAARCH64 +#include "aarch64_reaching.h" +#include "aarch64_isa.h" +#elif TARGRISCV64 +#include "riscv64_reaching.h" +#endif +#if TARGARM32 +#include "arm32_reaching.h" +#endif +#include "cg_option.h" +#include "cgfunc.h" +#include "cg.h" + +/* + * This phase build bb->in and bb->out infomation for stack memOperand and RegOperand. each bit in DataInfo + * represent whether the register or memory is live or not. to save storage space, the offset of stack is divided + * by 4, since the offset is a multiple 4. + * this algorithm mainly include 2 parts: + * 1. initialize each BB + * (1) insert pseudoInsns for function parameters, ehBB, and return R0/V0 + * (2) init bb->gen, bb->use, bb->out + * 2. build in and out + * (1) In[BB] = Union all of out[Parents(bb)] + * (2) Out[BB] = gen[BB] union in[BB] + * aditionally, this phase provide several commen funcfion about data flow. users can call these functions in + * optimization phase conveniently. + */ +namespace maplebe { +ReachingDefinition::ReachingDefinition(CGFunc &func, MemPool &memPool) + : AnalysisResult(&memPool), + cgFunc(&func), + rdAlloc(&memPool), + stackMp(func.GetStackMemPool()), + pseudoInsns(rdAlloc.Adapter()), + kMaxBBNum(cgFunc->NumBBs() + 1), + normalBBSet(rdAlloc.Adapter()), + cleanUpBBSet(rdAlloc.Adapter()) +{ +} + +/* check whether the opnd is stack register or not */ +bool ReachingDefinition::IsFrameReg(const Operand &opnd) const +{ + if (!opnd.IsRegister()) { + return false; + } + auto ® = static_cast(opnd); + return cgFunc->IsFrameReg(reg); +} + +/* intialize bb->out, bb->out only include generated DataInfo */ +void ReachingDefinition::InitOut(const BB &bb) +{ + if (mode & kRDRegAnalysis) { + *regOut[bb.GetId()] = *regGen[bb.GetId()]; + } + if (mode & kRDMemAnalysis) { + *memOut[bb.GetId()] = *memGen[bb.GetId()]; + } +} + +/* when DataInfo will not be used later, they should be cleared. */ +void ReachingDefinition::ClearDefUseInfo() +{ + for (auto insn : pseudoInsns) { + /* Keep return pseudo to extend the return register liveness to 'ret'. + * Backward propagation can move the return register definition far from the return. + */ +#ifndef TARGX86_64 + if (insn->GetMachineOpcode() == MOP_pseudo_ret_int || insn->GetMachineOpcode() == MOP_pseudo_ret_float) { + continue; + } +#endif + insn->GetBB()->RemoveInsn(*insn); + } + FOR_ALL_BB(bb, cgFunc) { + delete (regGen[bb->GetId()]); + regGen[bb->GetId()] = nullptr; + delete (regUse[bb->GetId()]); + regUse[bb->GetId()] = nullptr; + delete (regIn[bb->GetId()]); + regIn[bb->GetId()] = nullptr; + delete (regOut[bb->GetId()]); + regOut[bb->GetId()] = nullptr; + delete (memGen[bb->GetId()]); + memGen[bb->GetId()] = nullptr; + delete (memUse[bb->GetId()]); + memUse[bb->GetId()] = nullptr; + delete (memIn[bb->GetId()]); + memIn[bb->GetId()] = nullptr; + delete (memOut[bb->GetId()]); + memOut[bb->GetId()] = nullptr; + } + regGen.clear(); + regGen.shrink_to_fit(); + regUse.clear(); + regUse.shrink_to_fit(); + regIn.clear(); + regIn.shrink_to_fit(); + regOut.clear(); + regOut.shrink_to_fit(); + memGen.clear(); + memGen.shrink_to_fit(); + memUse.clear(); + memUse.shrink_to_fit(); + memIn.clear(); + memIn.shrink_to_fit(); + memOut.clear(); + memOut.shrink_to_fit(); + cgFunc->SetRD(nullptr); +} + +/* + * find used insns for register. + * input: + * insn: the insn in which register is defined + * regNO: the No of register + * isRegNO: this argument is used to form function overloading + * return: + * the set of used insns for register + */ +InsnSet ReachingDefinition::FindUseForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO) const +{ + InsnSet useInsnSet; + uint32 regNO = indexOrRegNO; + if (!isRegNO) { + Operand &opnd = insn.GetOperand(indexOrRegNO); + auto ®Opnd = static_cast(opnd); + regNO = regOpnd.GetRegisterNumber(); + } + /* register may be redefined in current bb */ + bool findFinish = FindRegUseBetweenInsn(regNO, insn.GetNext(), insn.GetBB()->GetLastInsn(), useInsnSet); + std::vector visitedBB(kMaxBBNum, false); + if (findFinish || !regOut[insn.GetBB()->GetId()]->TestBit(regNO)) { + if (!insn.GetBB()->GetEhSuccs().empty()) { + DFSFindUseForRegOpnd(*insn.GetBB(), regNO, visitedBB, useInsnSet, true); + } + } else { + DFSFindUseForRegOpnd(*insn.GetBB(), regNO, visitedBB, useInsnSet, false); + } + + if (!insn.GetBB()->IsCleanup() && firstCleanUpBB != nullptr) { + if (regUse[firstCleanUpBB->GetId()]->TestBit(regNO)) { + findFinish = + FindRegUseBetweenInsn(regNO, firstCleanUpBB->GetFirstInsn(), firstCleanUpBB->GetLastInsn(), useInsnSet); + if (findFinish || !regOut[firstCleanUpBB->GetId()]->TestBit(regNO)) { + return useInsnSet; + } + } + DFSFindUseForRegOpnd(*firstCleanUpBB, regNO, visitedBB, useInsnSet, false); + } + + return useInsnSet; +} + +/* + * find used insns for register iteratively. + * input: + * startBB: find used insns starting from startBB + * regNO: the No of register to be find + * visitedBB: record these visited BB + * useInsnSet: used insns of register is saved in this set + */ +void ReachingDefinition::DFSFindUseForRegOpnd(const BB &startBB, uint32 regNO, std::vector &visitedBB, + InsnSet &useInsnSet, bool onlyFindForEhSucc = false) const +{ + if (!onlyFindForEhSucc) { + for (auto succBB : startBB.GetSuccs()) { + if (!regIn[succBB->GetId()]->TestBit(regNO)) { + continue; + } + if (visitedBB[succBB->GetId()]) { + continue; + } + visitedBB[succBB->GetId()] = true; + bool findFinish = false; + if (regUse[succBB->GetId()]->TestBit(regNO)) { + findFinish = FindRegUseBetweenInsn(regNO, succBB->GetFirstInsn(), succBB->GetLastInsn(), useInsnSet); + } else if (regGen[succBB->GetId()]->TestBit(regNO)) { + findFinish = true; + } + if (!findFinish && regOut[succBB->GetId()]->TestBit(regNO)) { + DFSFindUseForRegOpnd(*succBB, regNO, visitedBB, useInsnSet, false); + } + } + } + + for (auto ehSuccBB : startBB.GetEhSuccs()) { + if (!regIn[ehSuccBB->GetId()]->TestBit(regNO)) { + continue; + } + if (visitedBB[ehSuccBB->GetId()]) { + continue; + } + visitedBB[ehSuccBB->GetId()] = true; + + bool findFinish = false; + if (regUse[ehSuccBB->GetId()]->TestBit(regNO)) { + findFinish = FindRegUseBetweenInsn(regNO, ehSuccBB->GetFirstInsn(), ehSuccBB->GetLastInsn(), useInsnSet); + } else if (regGen[ehSuccBB->GetId()]->TestBit(regNO)) { + findFinish = true; + } + if (!findFinish && regOut[ehSuccBB->GetId()]->TestBit(regNO)) { + DFSFindUseForRegOpnd(*ehSuccBB, regNO, visitedBB, useInsnSet, false); + } + } +} + +/* check whether register defined in regDefInsn has used insns */ +bool ReachingDefinition::RegHasUsePoint(uint32 regNO, Insn ®DefInsn) const +{ + InsnSet useInsnSet; + bool findFinish = FindRegUseBetweenInsn(regNO, regDefInsn.GetNext(), regDefInsn.GetBB()->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return true; + } + if (!findFinish) { + std::vector visitedBB(kMaxBBNum, false); + return RegIsUsedInOtherBB(*regDefInsn.GetBB(), regNO, visitedBB); + } + return false; +} + +/* check whether register is used in other BB except startBB */ +bool ReachingDefinition::RegIsUsedInOtherBB(const BB &startBB, uint32 regNO, std::vector &visitedBB) const +{ + InsnSet useInsnSet; + for (auto succBB : startBB.GetSuccs()) { + if (!regIn[succBB->GetId()]->TestBit(regNO)) { + continue; + } + if (visitedBB[succBB->GetId()]) { + continue; + } + visitedBB[succBB->GetId()] = true; + bool findFinish = false; + if (regUse[succBB->GetId()]->TestBit(regNO)) { + if (!regGen[succBB->GetId()]->TestBit(regNO)) { + return true; + } + useInsnSet.clear(); + findFinish = FindRegUseBetweenInsn(regNO, succBB->GetFirstInsn(), succBB->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return true; + } + } else if (regGen[succBB->GetId()]->TestBit(regNO)) { + findFinish = true; + } + if (!findFinish && regOut[succBB->GetId()]->TestBit(regNO)) { + if (RegIsUsedInOtherBB(*succBB, regNO, visitedBB)) { + return true; + } + } + } + + for (auto ehSuccBB : startBB.GetEhSuccs()) { + if (!regIn[ehSuccBB->GetId()]->TestBit(regNO)) { + continue; + } + if (visitedBB[ehSuccBB->GetId()]) { + continue; + } + visitedBB[ehSuccBB->GetId()] = true; + + bool findFinish = false; + if (regUse[ehSuccBB->GetId()]->TestBit(regNO)) { + if (!regGen[ehSuccBB->GetId()]->TestBit(regNO)) { + return true; + } + useInsnSet.clear(); + findFinish = FindRegUseBetweenInsn(regNO, ehSuccBB->GetFirstInsn(), ehSuccBB->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return true; + } + } else if (regGen[ehSuccBB->GetId()]->TestBit(regNO)) { + findFinish = true; + } + if (!findFinish && regOut[ehSuccBB->GetId()]->TestBit(regNO)) { + if (RegIsUsedInOtherBB(*ehSuccBB, regNO, visitedBB)) { + return true; + } + } + } + + return false; +} + +bool ReachingDefinition::RegIsUsedInCleanUpBB(uint32 regNO) const +{ + if (firstCleanUpBB == nullptr) { + return false; + } + InsnSet useInsnSet; + if (regUse[firstCleanUpBB->GetId()]->TestBit(regNO)) { + bool findFinish = + FindRegUseBetweenInsn(regNO, firstCleanUpBB->GetFirstInsn(), firstCleanUpBB->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return true; + } + if (findFinish) { + return false; + } + } + + std::vector visitedBB(kMaxBBNum, false); + DFSFindUseForRegOpnd(*firstCleanUpBB, regNO, visitedBB, useInsnSet, false); + if (useInsnSet.empty()) { + return true; + } + + return false; +} + +/* + * find used insns for stack memory operand iteratively. + * input: + * startBB: find used insns starting from startBB + * offset: the offset of memory to be find + * visitedBB: record these visited BB + * useInsnSet: used insns of stack memory operand is saved in this set + */ +void ReachingDefinition::DFSFindUseForMemOpnd(const BB &startBB, uint32 offset, std::vector &visitedBB, + InsnSet &useInsnSet, bool onlyFindForEhSucc = false) const +{ + if (!onlyFindForEhSucc) { + for (auto succBB : startBB.GetSuccs()) { + if (!memIn[succBB->GetId()]->TestBit(offset / kMemZoomSize)) { + continue; + } + if (visitedBB[succBB->GetId()]) { + continue; + } + visitedBB[succBB->GetId()] = true; + bool findFinish = false; + if (memUse[succBB->GetId()]->TestBit(offset / kMemZoomSize)) { + findFinish = FindMemUseBetweenInsn(offset, succBB->GetFirstInsn(), succBB->GetLastInsn(), useInsnSet); + } else if (memGen[succBB->GetId()]->TestBit(offset / kMemZoomSize)) { + findFinish = true; + } + if (!findFinish && memOut[succBB->GetId()]->TestBit(offset / kMemZoomSize)) { + DFSFindUseForMemOpnd(*succBB, offset, visitedBB, useInsnSet); + } + } + } + + for (auto ehSuccBB : startBB.GetEhSuccs()) { + if (!memIn[ehSuccBB->GetId()]->TestBit(offset / kMemZoomSize)) { + continue; + } + if (visitedBB[ehSuccBB->GetId()]) { + continue; + } + visitedBB[ehSuccBB->GetId()] = true; + bool findFinish = false; + if (memUse[ehSuccBB->GetId()]->TestBit(offset / kMemZoomSize)) { + findFinish = FindMemUseBetweenInsn(offset, ehSuccBB->GetFirstInsn(), ehSuccBB->GetLastInsn(), useInsnSet); + } else if (memGen[ehSuccBB->GetId()]->TestBit(offset / kMemZoomSize)) { + findFinish = true; + } + if (!findFinish && memOut[ehSuccBB->GetId()]->TestBit(offset / kMemZoomSize)) { + DFSFindUseForMemOpnd(*ehSuccBB, offset, visitedBB, useInsnSet); + } + } +} + +/* Out[BB] = gen[BB] union in[BB]. if bb->out changed, return true. */ +bool ReachingDefinition::GenerateOut(const BB &bb) +{ + bool outInfoChanged = false; + if (mode & kRDRegAnalysis) { + LocalMapleAllocator alloc(stackMp); + DataInfo &bbRegOutBak = regOut[bb.GetId()]->Clone(alloc); + *regOut[bb.GetId()] = *(regIn[bb.GetId()]); + regOut[bb.GetId()]->OrBits(*regGen[bb.GetId()]); + if (!regOut[bb.GetId()]->IsEqual(bbRegOutBak)) { + outInfoChanged = true; + } + } + + if (mode & kRDMemAnalysis) { + LocalMapleAllocator alloc(stackMp); + DataInfo &bbMemOutBak = memOut[bb.GetId()]->Clone(alloc); + *memOut[bb.GetId()] = *memIn[bb.GetId()]; + memOut[bb.GetId()]->OrBits(*memGen[bb.GetId()]); + if (!memOut[bb.GetId()]->IsEqual(bbMemOutBak)) { + outInfoChanged = true; + } + } + return outInfoChanged; +} + +bool ReachingDefinition::GenerateOut(const BB &bb, const std::set &infoIndex, const bool isReg) +{ + bool outInfoChanged = false; + if (isReg) { + for (auto index : infoIndex) { + uint64 bbRegOutBak = regOut[bb.GetId()]->GetElem(index); + regOut[bb.GetId()]->SetElem(index, regIn[bb.GetId()]->GetElem(index)); + regOut[bb.GetId()]->OrDesignateBits(*regGen[bb.GetId()], index); + if (!outInfoChanged && (bbRegOutBak != regOut[bb.GetId()]->GetElem(index))) { + outInfoChanged = true; + } + } + } else { + for (auto index : infoIndex) { + uint64 bbMemOutBak = memOut[bb.GetId()]->GetElem(index); + memOut[bb.GetId()]->SetElem(index, memIn[bb.GetId()]->GetElem(index)); + memOut[bb.GetId()]->OrDesignateBits(*memGen[bb.GetId()], index); + if (bbMemOutBak != memOut[bb.GetId()]->GetElem(index)) { + outInfoChanged = true; + } + } + } + return outInfoChanged; +} + +/* In[BB] = Union all of out[Parents(bb)]. return true if bb->in changed. */ +bool ReachingDefinition::GenerateIn(const BB &bb) +{ + bool inInfoChanged = false; + if (mode & kRDRegAnalysis) { + LocalMapleAllocator alloc(stackMp); + DataInfo &bbRegInBak = regIn[bb.GetId()]->Clone(alloc); + for (auto predBB : bb.GetPreds()) { + regIn[bb.GetId()]->OrBits(*regOut[predBB->GetId()]); + } + for (auto predEhBB : bb.GetEhPreds()) { + regIn[bb.GetId()]->OrBits(*regOut[predEhBB->GetId()]); + } + + if (!regIn[bb.GetId()]->IsEqual(bbRegInBak)) { + inInfoChanged = true; + } + } + if (mode & kRDMemAnalysis) { + LocalMapleAllocator alloc(stackMp); + DataInfo &memInBak = memIn[bb.GetId()]->Clone(alloc); + for (auto predBB : bb.GetPreds()) { + memIn[bb.GetId()]->OrBits(*memOut[predBB->GetId()]); + } + for (auto predEhBB : bb.GetEhPreds()) { + memIn[bb.GetId()]->OrBits(*memOut[predEhBB->GetId()]); + } + + if (!memIn[bb.GetId()]->IsEqual(memInBak)) { + inInfoChanged = true; + } + } + return inInfoChanged; +} + +/* In[BB] = Union all of out[Parents(bb)]. return true if bb->in changed. */ +bool ReachingDefinition::GenerateIn(const BB &bb, const std::set &infoIndex, const bool isReg) +{ + bool inInfoChanged = false; + + if (isReg) { + for (auto index : infoIndex) { + uint64 bbRegInBak = regIn[bb.GetId()]->GetElem(index); + regIn[bb.GetId()]->SetElem(index, 0ULL); + for (auto predBB : bb.GetPreds()) { + regIn[bb.GetId()]->OrDesignateBits(*regOut[predBB->GetId()], index); + } + for (auto predEhBB : bb.GetEhPreds()) { + regIn[bb.GetId()]->OrDesignateBits(*regOut[predEhBB->GetId()], index); + } + + if (bbRegInBak != regIn[bb.GetId()]->GetElem(index)) { + inInfoChanged = true; + } + } + } else { + for (auto index : infoIndex) { + uint64 bbMemInBak = memIn[bb.GetId()]->GetElem(index); + memIn[bb.GetId()]->SetElem(index, 0ULL); + for (auto predBB : bb.GetPreds()) { + memIn[bb.GetId()]->OrDesignateBits(*memOut[predBB->GetId()], index); + } + for (auto predEhBB : bb.GetEhPreds()) { + memIn[bb.GetId()]->OrDesignateBits(*memOut[predEhBB->GetId()], index); + } + + if (bbMemInBak != memIn[bb.GetId()]->GetElem(index)) { + inInfoChanged = true; + } + } + } + return inInfoChanged; +} + +/* In[firstCleanUpBB] = Union all of out[bbNormalSet] */ +bool ReachingDefinition::GenerateInForFirstCleanUpBB() +{ + CHECK_NULL_FATAL(firstCleanUpBB); + if (mode & kRDRegAnalysis) { + regIn[firstCleanUpBB->GetId()]->ResetAllBit(); + } + if (mode & kRDMemAnalysis) { + memIn[firstCleanUpBB->GetId()]->ResetAllBit(); + } + + for (auto normalBB : normalBBSet) { + if (mode & kRDRegAnalysis) { + regIn[firstCleanUpBB->GetId()]->OrBits(*regOut[normalBB->GetId()]); + } + + if (mode & kRDMemAnalysis) { + memIn[firstCleanUpBB->GetId()]->OrBits(*memOut[normalBB->GetId()]); + } + } + + return ((regIn[firstCleanUpBB->GetId()] != nullptr && regIn[firstCleanUpBB->GetId()]->Size() > 0) || + (memIn[firstCleanUpBB->GetId()] != nullptr && memIn[firstCleanUpBB->GetId()]->Size() > 0)); +} + +bool ReachingDefinition::GenerateInForFirstCleanUpBB(bool isReg, const std::set &infoIndex) +{ + CHECK_NULL_FATAL(firstCleanUpBB); + bool inInfoChanged = false; + if (isReg) { + for (auto index : infoIndex) { + uint64 regInElemBak = regIn[firstCleanUpBB->GetId()]->GetElem(index); + regIn[firstCleanUpBB->GetId()]->SetElem(index, 0ULL); + for (auto &normalBB : normalBBSet) { + regIn[firstCleanUpBB->GetId()]->OrDesignateBits(*regOut[normalBB->GetId()], index); + } + if (!inInfoChanged && (regIn[firstCleanUpBB->GetId()]->GetElem(index) != regInElemBak)) { + inInfoChanged = true; + } + } + } else { + for (auto index : infoIndex) { + uint64 memInElemBak = memIn[firstCleanUpBB->GetId()]->GetElem(index); + memIn[firstCleanUpBB->GetId()]->SetElem(index, 0ULL); + for (auto &normalBB : normalBBSet) { + memIn[firstCleanUpBB->GetId()]->OrDesignateBits(*memOut[normalBB->GetId()], index); + } + if (!inInfoChanged && (memIn[firstCleanUpBB->GetId()]->GetElem(index) != memInElemBak)) { + inInfoChanged = true; + } + } + } + return inInfoChanged; +} + +/* allocate memory for DataInfo of bb */ +void ReachingDefinition::InitRegAndMemInfo(const BB &bb) +{ + if (mode & kRDRegAnalysis) { + const uint32 kMaxRegCount = cgFunc->GetMaxVReg(); + regGen[bb.GetId()] = new DataInfo(kMaxRegCount, rdAlloc); + regUse[bb.GetId()] = new DataInfo(kMaxRegCount, rdAlloc); + regIn[bb.GetId()] = new DataInfo(kMaxRegCount, rdAlloc); + regOut[bb.GetId()] = new DataInfo(kMaxRegCount, rdAlloc); + } + + if (mode & kRDMemAnalysis) { + const int32 kStackSize = GetStackSize(); + memGen[bb.GetId()] = new DataInfo((kStackSize / kMemZoomSize), rdAlloc); + memUse[bb.GetId()] = new DataInfo((kStackSize / kMemZoomSize), rdAlloc); + memIn[bb.GetId()] = new DataInfo((kStackSize / kMemZoomSize), rdAlloc); + memOut[bb.GetId()] = new DataInfo((kStackSize / kMemZoomSize), rdAlloc); + } +} + +/* insert pseudoInsns for function parameters, ehBB, and return R0/V0. init bb->gen, bb->use, bb->out */ +void ReachingDefinition::Initialize() +{ + InitDataSize(); + AddRetPseudoInsns(); + FOR_ALL_BB(bb, cgFunc) { + InitRegAndMemInfo(*bb); + } + FOR_ALL_BB(bb, cgFunc) { + if (bb == cgFunc->GetFirstBB()) { + InitStartGen(); + } + if (!bb->GetEhPreds().empty()) { + InitEhDefine(*bb); + } + InitGenUse(*bb); + InitOut(*bb); + + if (bb->IsCleanup()) { + if (bb->GetFirstStmt() == cgFunc->GetCleanupLabel()) { + firstCleanUpBB = bb; + } + (void)cleanUpBBSet.insert(bb); + } else { + (void)normalBBSet.insert(bb); + } + } + maxInsnNO = 0; + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + insn->SetId(maxInsnNO); + maxInsnNO += kInsnNoInterval; + } + } +} + +void ReachingDefinition::InitDataSize() +{ + /* to visit vec[cgFunc->NumBBs()], size should be cgFunc->NumBBs() + 1 */ + const uint32 dataSize = cgFunc->NumBBs() + 1; + regIn.resize(dataSize); + regOut.resize(dataSize); + regGen.resize(dataSize); + regUse.resize(dataSize); + memIn.resize(dataSize); + memOut.resize(dataSize); + memGen.resize(dataSize); + memUse.resize(dataSize); +} + +/* compute bb->in, bb->out for each BB execpt cleanup BB */ +void ReachingDefinition::BuildInOutForFuncBody() +{ + std::unordered_set normalBBSetBak(normalBBSet.begin(), normalBBSet.end()); + std::unordered_set::iterator setItr; + while (!normalBBSetBak.empty()) { + setItr = normalBBSetBak.begin(); + BB *bb = *setItr; + DEBUG_ASSERT(bb != nullptr, "null ptr check"); + (void)normalBBSetBak.erase(setItr); + + if (GenerateIn(*bb)) { + if (GenerateOut(*bb)) { + for (auto succ : bb->GetSuccs()) { + (void)normalBBSetBak.insert(succ); + } + + for (auto ehSucc : bb->GetEhSuccs()) { + (void)normalBBSetBak.insert(ehSucc); + } + } + } + } + DEBUG_ASSERT(normalBBSetBak.empty(), "CG internal error."); +} + +/* if bb->out changed, update in and out */ +void ReachingDefinition::UpdateInOut(BB &changedBB) +{ + InitGenUse(changedBB, false); + if (!GenerateOut(changedBB)) { + return; + } + + std::unordered_set bbSet; + std::unordered_set::iterator setItr; + + for (auto succ : changedBB.GetSuccs()) { + (void)bbSet.insert(succ); + } + + for (auto ehSucc : changedBB.GetEhSuccs()) { + (void)bbSet.insert(ehSucc); + } + + while (!bbSet.empty()) { + setItr = bbSet.begin(); + BB *bb = *setItr; + DEBUG_ASSERT(bb != nullptr, "null ptr check"); + bbSet.erase(setItr); + + if (GenerateIn(*bb)) { + if (GenerateOut(*bb)) { + for (auto succ : bb->GetSuccs()) { + (void)bbSet.insert(succ); + } + + for (auto ehSucc : bb->GetEhSuccs()) { + (void)bbSet.insert(ehSucc); + } + } + } + } + + if (!changedBB.IsCleanup() && firstCleanUpBB != nullptr) { + BuildInOutForCleanUpBB(); + } +} + +void ReachingDefinition::UpdateInOut(BB &changedBB, bool isReg) +{ + std::set changedInfoIndex; + if (isReg) { + LocalMapleAllocator alloc(stackMp); + DataInfo &genInfoBak = regGen[changedBB.GetId()]->Clone(alloc); + InitGenUse(changedBB, false); + genInfoBak.EorBits(*regGen[changedBB.GetId()]); + genInfoBak.GetNonZeroElemsIndex(changedInfoIndex); + } else { + LocalMapleAllocator alloc(stackMp); + DataInfo &genInfoBak = memGen[changedBB.GetId()]->Clone(alloc); + InitGenUse(changedBB, false); + genInfoBak.EorBits(*memGen[changedBB.GetId()]); + genInfoBak.GetNonZeroElemsIndex(changedInfoIndex); + } + if (changedInfoIndex.empty()) { + return; + } + if (!GenerateOut(changedBB, changedInfoIndex, isReg)) { + return; + } + std::set bbSet; + std::set::iterator setItr; + for (auto &succ : changedBB.GetSuccs()) { + (void)bbSet.insert(succ); + } + + for (auto &ehSucc : changedBB.GetEhSuccs()) { + (void)bbSet.insert(ehSucc); + } + while (!bbSet.empty()) { + setItr = bbSet.begin(); + BB *bb = *setItr; + bbSet.erase(setItr); + if (GenerateIn(*bb, changedInfoIndex, isReg)) { + if (GenerateOut(*bb, changedInfoIndex, isReg)) { + for (auto &succ : bb->GetSuccs()) { + (void)bbSet.insert(succ); + } + for (auto &ehSucc : bb->GetEhSuccs()) { + (void)bbSet.insert(ehSucc); + } + } + } + } + + if (!changedBB.IsCleanup() && firstCleanUpBB != nullptr) { + BuildInOutForCleanUpBB(isReg, changedInfoIndex); + } +} + +/* compute bb->in, bb->out for cleanup BBs */ +void ReachingDefinition::BuildInOutForCleanUpBB() +{ + DEBUG_ASSERT(firstCleanUpBB != nullptr, "firstCleanUpBB must not be nullptr"); + if (GenerateInForFirstCleanUpBB()) { + GenerateOut(*firstCleanUpBB); + } + std::unordered_set cleanupBBSetBak(cleanUpBBSet.begin(), cleanUpBBSet.end()); + std::unordered_set::iterator setItr; + + while (!cleanupBBSetBak.empty()) { + setItr = cleanupBBSetBak.begin(); + BB *bb = *setItr; + cleanupBBSetBak.erase(setItr); + if (GenerateIn(*bb)) { + if (GenerateOut(*bb)) { + for (auto succ : bb->GetSuccs()) { + (void)cleanupBBSetBak.insert(succ); + } + for (auto ehSucc : bb->GetEhSuccs()) { + (void)cleanupBBSetBak.insert(ehSucc); + } + } + } + } + DEBUG_ASSERT(cleanupBBSetBak.empty(), "CG internal error."); +} + +void ReachingDefinition::BuildInOutForCleanUpBB(bool isReg, const std::set &index) +{ + DEBUG_ASSERT(firstCleanUpBB != nullptr, "firstCleanUpBB must not be nullptr"); + if (GenerateInForFirstCleanUpBB(isReg, index)) { + GenerateOut(*firstCleanUpBB, index, isReg); + } + std::unordered_set cleanupBBSetBak(cleanUpBBSet.begin(), cleanUpBBSet.end()); + std::unordered_set::iterator setItr; + while (!cleanupBBSetBak.empty()) { + setItr = cleanupBBSetBak.begin(); + BB *bb = *setItr; + cleanupBBSetBak.erase(setItr); + if (GenerateIn(*bb, index, isReg)) { + if (GenerateOut(*bb, index, isReg)) { + for (auto &succ : bb->GetSuccs()) { + (void)cleanupBBSetBak.insert(succ); + } + for (auto &ehSucc : bb->GetEhSuccs()) { + (void)cleanupBBSetBak.insert(ehSucc); + } + } + } + } + DEBUG_ASSERT(cleanupBBSetBak.empty(), "CG internal error."); +} + +/* entry for ReachingDefinition Analysis, mode represent to analyze RegOperand, MemOperand or both of them */ +void ReachingDefinition::AnalysisStart() +{ + if (!cgFunc->GetFirstBB()) { + return; + } + Initialize(); + /* Build in/out for function body first. (Except cleanup bb) */ + BuildInOutForFuncBody(); + /* If cleanup bb exists, build in/out for cleanup bbs. firstCleanUpBB->in = Union all non-cleanup bb's out. */ + if (firstCleanUpBB != nullptr) { + BuildInOutForCleanUpBB(); + } + cgFunc->SetRD(this); +} + +/* check whether currentBB can reach endBB according to control flow */ +bool ReachingDefinition::CanReachEndBBFromCurrentBB(const BB ¤tBB, const BB &endBB, + std::vector &traversedBBSet) const +{ + if (¤tBB == &endBB) { + return true; + } + for (auto predBB : endBB.GetPreds()) { + if (traversedBBSet[predBB->GetId()]) { + continue; + } + traversedBBSet[predBB->GetId()] = true; + if (predBB == ¤tBB) { + return true; + } + if (CanReachEndBBFromCurrentBB(currentBB, *predBB, traversedBBSet)) { + return true; + } + } + for (auto ehPredBB : endBB.GetEhPreds()) { + if (traversedBBSet[ehPredBB->GetId()]) { + continue; + } + traversedBBSet[ehPredBB->GetId()] = true; + if (ehPredBB == ¤tBB) { + return true; + } + if (CanReachEndBBFromCurrentBB(currentBB, *ehPredBB, traversedBBSet)) { + return true; + } + } + return false; +} + +/* check whether register may be redefined form startBB to endBB */ +bool ReachingDefinition::IsLiveInAllPathBB(uint32 regNO, const BB &startBB, const BB &endBB, + std::vector &visitedBB, bool isFirstNo) const +{ + for (auto succ : startBB.GetSuccs()) { + if (visitedBB[succ->GetId()]) { + continue; + } + visitedBB[succ->GetId()] = true; + if (isFirstNo && CheckRegLiveinReturnBB(regNO, *succ)) { + return false; + } + std::vector traversedPathSet(kMaxBBNum, false); + bool canReachEndBB = true; + if (regGen[succ->GetId()]->TestBit(regNO)) { + canReachEndBB = CanReachEndBBFromCurrentBB(*succ, endBB, traversedPathSet); + if (canReachEndBB) { + return false; + } + } + if (!canReachEndBB) { + continue; + } + bool isLive = IsLiveInAllPathBB(regNO, *succ, endBB, visitedBB, isFirstNo); + if (!isLive) { + return false; + } + } + + for (auto ehSucc : startBB.GetEhSuccs()) { + if (visitedBB[ehSucc->GetId()]) { + continue; + } + visitedBB[ehSucc->GetId()] = true; + if (isFirstNo && CheckRegLiveinReturnBB(regNO, *ehSucc)) { + return false; + } + std::vector traversedPathSet(kMaxBBNum, false); + bool canReachEndBB = true; + if (regGen[ehSucc->GetId()]->TestBit(regNO)) { + canReachEndBB = CanReachEndBBFromCurrentBB(*ehSucc, endBB, traversedPathSet); + if (canReachEndBB) { + return false; + } + } + if (!canReachEndBB) { + continue; + } + bool isLive = IsLiveInAllPathBB(regNO, *ehSucc, endBB, visitedBB, isFirstNo); + if (!isLive) { + return false; + } + } + return true; +} + +/* Check if the reg is used in return BB */ +bool ReachingDefinition::CheckRegLiveinReturnBB(uint32 regNO, const BB &bb) const +{ +#if TARGAARCH64 || TARGRISCV64 + if (bb.GetKind() == BB::kBBReturn) { + PrimType returnType = cgFunc->GetFunction().GetReturnType()->GetPrimType(); + regno_t returnReg = R0; + if (IsPrimitiveFloat(returnType)) { + returnReg = V0; + } else if (IsPrimitiveInteger(returnType)) { + returnReg = R0; + } + if (regNO == returnReg) { + return true; + } + } +#endif + return false; +} + +bool ReachingDefinition::RegIsUsedIncaller(uint32 regNO, Insn &startInsn, Insn &endInsn) const +{ + if (startInsn.GetBB() != endInsn.GetBB()) { + return false; + } + if (startInsn.GetNext() == &endInsn || &startInsn == &endInsn) { + return false; + } + auto RegDefVec = FindRegDefBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev()); + if (!RegDefVec.empty()) { + return false; + } + if (IsCallerSavedReg(regNO) && startInsn.GetNext() != nullptr && + KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *(startInsn.GetBB()->GetLastInsn()), regNO)) { + return true; + } + if (CheckRegLiveinReturnBB(regNO, *startInsn.GetBB())) { + return true; + } + return false; +} + +/* check whether control flow can reach endInsn from startInsn */ +bool ReachingDefinition::RegIsLiveBetweenInsn(uint32 regNO, Insn &startInsn, Insn &endInsn, bool isBack, + bool isFirstNo) const +{ + DEBUG_ASSERT(&startInsn != &endInsn, "startInsn is not equal to endInsn"); + if (startInsn.GetBB() == endInsn.GetBB()) { + /* register is difined more than once */ + if (startInsn.GetId() > endInsn.GetId()) { + if (!isBack) { + return false; + } else { + return true; + } + } + if (startInsn.GetNext() == &endInsn) { + return true; + } + if (regGen[startInsn.GetBB()->GetId()]->TestBit(regNO)) { + std::vector RegDefVec; + if (isBack) { + RegDefVec = FindRegDefBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev()); + } else { + RegDefVec = FindRegDefBetweenInsn(regNO, &startInsn, endInsn.GetPrev()); + } + if (!RegDefVec.empty()) { + return false; + } + } + if (IsCallerSavedReg(regNO) && + KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *endInsn.GetPrev(), regNO)) { + return false; + } + return true; + } + + if (&startInsn != startInsn.GetBB()->GetLastInsn() && regGen[startInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, startInsn.GetNext(), startInsn.GetBB()->GetLastInsn()).empty()) { + return false; + } + + if (&startInsn != startInsn.GetBB()->GetLastInsn() && IsCallerSavedReg(regNO) && + KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *startInsn.GetBB()->GetLastInsn(), regNO)) { + return false; + } + + if (&endInsn != endInsn.GetBB()->GetFirstInsn() && regGen[endInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, endInsn.GetBB()->GetFirstInsn(), endInsn.GetPrev()).empty()) { + return false; + } + + if (&endInsn != endInsn.GetBB()->GetFirstInsn() && IsCallerSavedReg(regNO) && + KilledByCallBetweenInsnInSameBB(*endInsn.GetBB()->GetFirstInsn(), *endInsn.GetPrev(), regNO)) { + return false; + } + + std::vector visitedBB(kMaxBBNum, false); + return IsLiveInAllPathBB(regNO, *startInsn.GetBB(), *endInsn.GetBB(), visitedBB, isFirstNo); +} + +static bool SetDefInsnVecForAsm(Insn *insn, uint32 index, uint32 regNO, std::vector &defInsnVec) +{ + for (auto reg : static_cast(insn->GetOperand(index)).GetOperands()) { + if (static_cast(reg)->GetRegisterNumber() == regNO) { + defInsnVec.emplace_back(insn); + return true; + } + } + return false; +} + +std::vector ReachingDefinition::FindRegDefBetweenInsn(uint32 regNO, Insn *startInsn, Insn *endInsn, + bool findAll, bool analysisDone) const +{ + std::vector defInsnVec; + if (startInsn == nullptr || endInsn == nullptr) { + return defInsnVec; + } + + DEBUG_ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + if (analysisDone && !regGen[startInsn->GetBB()->GetId()]->TestBit(regNO)) { + return defInsnVec; + } + + for (Insn *insn = endInsn; insn != nullptr && insn != startInsn->GetPrev(); insn = insn->GetPrev()) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->IsAsmInsn()) { + if (SetDefInsnVecForAsm(insn, kAsmOutputListOpnd, regNO, defInsnVec) || + SetDefInsnVecForAsm(insn, kAsmClobberListOpnd, regNO, defInsnVec)) { + if (findAll) { + defInsnVec.emplace_back(insn); + } else { + return defInsnVec; + } + } + } + if (insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + defInsnVec.emplace_back(insn); + if (!findAll) { + return defInsnVec; + } + } + if (insn->IsRegDefined(regNO)) { + defInsnVec.emplace_back(insn); + if (!findAll) { + return defInsnVec; + } + } + } + return defInsnVec; +} + +bool ReachingDefinition::RegIsUsedOrDefBetweenInsn(uint32 regNO, Insn &startInsn, Insn &endInsn) const +{ + DEBUG_ASSERT(&startInsn != &endInsn, "startInsn is not equal to endInsn"); + if (startInsn.GetBB() == endInsn.GetBB()) { + /* register is difined more than once */ + if (startInsn.GetId() > endInsn.GetId()) { + return false; + } + if (startInsn.GetNext() == &endInsn) { + return true; + } + if (regGen[startInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev()).empty()) { + return false; + } + if (regUse[startInsn.GetBB()->GetId()]->TestBit(regNO)) { + InsnSet useInsnSet; + FindRegUseBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev(), useInsnSet); + if (!useInsnSet.empty()) { + return false; + } + } + if (IsCallerSavedReg(regNO) && + KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *endInsn.GetPrev(), regNO)) { + return false; + } + return true; + } + + if (&startInsn != startInsn.GetBB()->GetLastInsn() && regGen[startInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, startInsn.GetNext(), startInsn.GetBB()->GetLastInsn()).empty()) { + return false; + } + + if (regUse[startInsn.GetBB()->GetId()]->TestBit(regNO)) { + InsnSet useInsnSet; + FindRegUseBetweenInsn(regNO, startInsn.GetNext(), startInsn.GetBB()->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return false; + } + } + + if (&startInsn != startInsn.GetBB()->GetLastInsn() && IsCallerSavedReg(regNO) && + KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *startInsn.GetBB()->GetLastInsn(), regNO)) { + return false; + } + + if (&endInsn != endInsn.GetBB()->GetFirstInsn() && regGen[endInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, endInsn.GetBB()->GetFirstInsn(), endInsn.GetPrev()).empty()) { + return false; + } + + if (regUse[startInsn.GetBB()->GetId()]->TestBit(regNO)) { + InsnSet useInsnSet; + FindRegUseBetweenInsn(regNO, endInsn.GetBB()->GetFirstInsn(), endInsn.GetPrev(), useInsnSet); + if (!useInsnSet.empty()) { + return false; + } + } + + if (&endInsn != endInsn.GetBB()->GetFirstInsn() && IsCallerSavedReg(regNO) && + KilledByCallBetweenInsnInSameBB(*endInsn.GetBB()->GetFirstInsn(), *endInsn.GetPrev(), regNO)) { + return false; + } + + std::vector visitedBB(kMaxBBNum, false); + return IsUseOrDefInAllPathBB(regNO, *startInsn.GetBB(), *endInsn.GetBB(), visitedBB); +} + +/* check whether register may be redefined form in the same BB */ +bool ReachingDefinition::IsUseOrDefBetweenInsn(uint32 regNO, const BB &curBB, const Insn &startInsn, + Insn &endInsn) const +{ + if (regGen[curBB.GetId()]->TestBit(regNO)) { + if (!FindRegDefBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev()).empty()) { + return false; + } + } + if (regUse[curBB.GetId()]->TestBit(regNO)) { + InsnSet useInsnSet; + FindRegUseBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev(), useInsnSet); + if (!useInsnSet.empty()) { + return false; + } + } + return true; +} + +/* check whether register may be redefined form startBB to endBB */ +bool ReachingDefinition::IsUseOrDefInAllPathBB(uint32 regNO, const BB &startBB, const BB &endBB, + std::vector &visitedBB) const +{ + for (auto succ : startBB.GetSuccs()) { + if (visitedBB[succ->GetId()] || succ == &endBB) { + continue; + } + visitedBB[succ->GetId()] = true; + std::vector traversedPathSet(kMaxBBNum, false); + bool canReachEndBB = true; + if (regGen[succ->GetId()]->TestBit(regNO) || regUse[succ->GetId()]->TestBit(regNO) || + (succ->HasCall() && IsCallerSavedReg(regNO))) { + canReachEndBB = CanReachEndBBFromCurrentBB(*succ, endBB, traversedPathSet); + if (canReachEndBB) { + return false; + } + } + if (!canReachEndBB) { + continue; + } + bool isLive = IsUseOrDefInAllPathBB(regNO, *succ, endBB, visitedBB); + if (!isLive) { + return false; + } + } + + for (auto ehSucc : startBB.GetEhSuccs()) { + if (visitedBB[ehSucc->GetId()]) { + continue; + } + visitedBB[ehSucc->GetId()] = true; + std::vector traversedPathSet(kMaxBBNum, false); + bool canReachEndBB = true; + if (regGen[ehSucc->GetId()]->TestBit(regNO) || regUse[ehSucc->GetId()]->TestBit(regNO)) { + canReachEndBB = CanReachEndBBFromCurrentBB(*ehSucc, endBB, traversedPathSet); + if (canReachEndBB) { + return false; + } + } + if (!canReachEndBB) { + continue; + } + bool isLive = IsUseOrDefInAllPathBB(regNO, *ehSucc, endBB, visitedBB); + if (!isLive) { + return false; + } + } + return true; +} + +bool ReachingDefinition::HasCallBetweenInsnInSameBB(const Insn &startInsn, const Insn &endInsn) const +{ + DEBUG_ASSERT(startInsn.GetBB() == endInsn.GetBB(), "two insns must be in same bb"); + for (const Insn *insn = &startInsn; insn != endInsn.GetNext(); insn = insn->GetNext()) { + if (insn->IsMachineInstruction() && insn->IsCall()) { + return true; + } + } + return false; +} + +/* operand is only defined in startBB, and only used in endBB. + * so traverse from endBB to startBB, all paths reach startBB finally. + * startBB and endBB are different, and call insns in both of them are not counted. + * whether startBB and endBB are in a loop is not counted. + */ +bool ReachingDefinition::HasCallInPath(const BB &startBB, const BB &endBB, std::vector &visitedBB) const +{ + DEBUG_ASSERT(&startBB != &endBB, "startBB and endBB are not counted"); + std::queue bbQueue; + bbQueue.push(&endBB); + visitedBB[endBB.GetId()] = true; + while (!bbQueue.empty()) { + const BB *bb = bbQueue.front(); + bbQueue.pop(); + for (auto predBB : bb->GetPreds()) { + if (predBB == &startBB || visitedBB[predBB->GetId()]) { + continue; + } + if (predBB->HasCall()) { + return true; + } + visitedBB[predBB->GetId()] = true; + bbQueue.push(predBB); + } + for (auto ehPredBB : bb->GetEhPreds()) { + if (ehPredBB == &startBB || visitedBB[ehPredBB->GetId()]) { + continue; + } + if (ehPredBB->HasCall()) { + return true; + } + visitedBB[ehPredBB->GetId()] = true; + bbQueue.push(ehPredBB); + } + } + return false; +} + +/* because of time cost, this function is not precise, BB in loop is not counted */ +bool ReachingDefinition::HasCallBetweenDefUse(const Insn &defInsn, const Insn &useInsn) const +{ + if (defInsn.GetBB()->GetId() == useInsn.GetBB()->GetId()) { + if (&useInsn == defInsn.GetNext()) { + return false; + } + if (useInsn.GetId() > defInsn.GetId()) { + return HasCallBetweenInsnInSameBB(defInsn, *useInsn.GetPrev()); + } + /* useInsn is in front of defInsn, we think there is call insn between them conservatively */ + return true; + } + /* check defInsn->GetBB() */ + if (&defInsn != defInsn.GetBB()->GetLastInsn() && defInsn.GetBB()->HasCall() && + HasCallBetweenInsnInSameBB(*defInsn.GetNext(), *defInsn.GetBB()->GetLastInsn())) { + return true; + } + /* check useInsn->GetBB() */ + if (&useInsn != useInsn.GetBB()->GetFirstInsn() && useInsn.GetBB()->HasCall() && + HasCallBetweenInsnInSameBB(*useInsn.GetBB()->GetFirstInsn(), *useInsn.GetPrev())) { + return true; + } + std::vector visitedBB(kMaxBBNum, false); + return HasCallInPath(*defInsn.GetBB(), *useInsn.GetBB(), visitedBB); +} + +void ReachingDefinition::EnlargeRegCapacity(uint32 size) +{ + FOR_ALL_BB(bb, cgFunc) { + regIn[bb->GetId()]->EnlargeCapacityToAdaptSize(size); + regOut[bb->GetId()]->EnlargeCapacityToAdaptSize(size); + regGen[bb->GetId()]->EnlargeCapacityToAdaptSize(size); + regUse[bb->GetId()]->EnlargeCapacityToAdaptSize(size); + } +} + +void ReachingDefinition::DumpInfo(const BB &bb, DumpType flag) const +{ + const DataInfo *info = nullptr; + switch (flag) { + case kDumpRegGen: + LogInfo::MapleLogger() << " regGen:\n"; + info = regGen[bb.GetId()]; + break; + case kDumpRegUse: + LogInfo::MapleLogger() << " regUse:\n"; + info = regUse[bb.GetId()]; + break; + case kDumpRegIn: + LogInfo::MapleLogger() << " regIn:\n"; + info = regIn[bb.GetId()]; + break; + case kDumpRegOut: + LogInfo::MapleLogger() << " regOut:\n"; + info = regOut[bb.GetId()]; + break; + case kDumpMemGen: + LogInfo::MapleLogger() << " memGen:\n"; + info = memGen[bb.GetId()]; + break; + case kDumpMemIn: + LogInfo::MapleLogger() << " memIn:\n"; + info = memIn[bb.GetId()]; + break; + case kDumpMemOut: + LogInfo::MapleLogger() << " memOut:\n"; + info = memOut[bb.GetId()]; + break; + case kDumpMemUse: + LogInfo::MapleLogger() << " memUse:\n"; + info = memUse[bb.GetId()]; + break; + default: + return; + } + DEBUG_ASSERT(info != nullptr, "null ptr check"); + uint32 count = 1; + LogInfo::MapleLogger() << " "; + for (uint32 i = 0; i != info->Size(); ++i) { + if (info->TestBit(i)) { + count += 1; + if (kDumpMemGen <= flag && flag <= kDumpMemUse) { + /* Each element i means a 4 byte stack slot. */ + LogInfo::MapleLogger() << (i * 4) << " "; + } else { + LogInfo::MapleLogger() << i << " "; + } + /* 10 output per line */ + if (count % 10 == 0) { + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << " "; + } + } + } + + LogInfo::MapleLogger() << "\n"; +} + +void ReachingDefinition::DumpBBCGIR(const BB &bb) const +{ + if (bb.IsCleanup()) { + LogInfo::MapleLogger() << "[is_cleanup] "; + } + if (bb.IsUnreachable()) { + LogInfo::MapleLogger() << "[unreachable] "; + } + if (bb.GetSuccs().size()) { + LogInfo::MapleLogger() << " succs: "; + for (auto *succBB : bb.GetSuccs()) { + LogInfo::MapleLogger() << succBB->GetId() << " "; + } + } + if (bb.GetEhSuccs().size()) { + LogInfo::MapleLogger() << " eh_succs: "; + for (auto *ehSuccBB : bb.GetEhSuccs()) { + LogInfo::MapleLogger() << ehSuccBB->GetId() << " "; + } + } + LogInfo::MapleLogger() << "\n"; + + FOR_BB_INSNS_CONST(insn, &bb) { + LogInfo::MapleLogger() << " "; + insn->Dump(); + } + LogInfo::MapleLogger() << "\n"; +} + +void ReachingDefinition::Dump(uint32 flag) const +{ + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc->GetFunction().GetStIdx().Idx()); + DEBUG_ASSERT(mirSymbol != nullptr, "get symbol in function failed in ReachingDefinition::Dump"); + LogInfo::MapleLogger() << "\n---- Reaching definition analysis for " << mirSymbol->GetName(); + LogInfo::MapleLogger() << " ----\n"; + FOR_ALL_BB(bb, cgFunc) { + LogInfo::MapleLogger() << " === BB_" << bb->GetId() << " ===\n"; + + if (flag & kDumpBBCGIR) { + DumpBBCGIR(*bb); + } + + if (flag & kDumpRegIn) { + DumpInfo(*bb, kDumpRegIn); + } + + if (flag & kDumpRegUse) { + DumpInfo(*bb, kDumpRegUse); + } + + if (flag & kDumpRegGen) { + DumpInfo(*bb, kDumpRegGen); + } + + if (flag & kDumpRegOut) { + DumpInfo(*bb, kDumpRegOut); + } + + if (flag & kDumpMemIn) { + DumpInfo(*bb, kDumpMemIn); + } + + if (flag & kDumpMemGen) { + DumpInfo(*bb, kDumpMemGen); + } + + if (flag & kDumpMemOut) { + DumpInfo(*bb, kDumpMemOut); + } + + if (flag & kDumpMemUse) { + DumpInfo(*bb, kDumpMemUse); + } + } + LogInfo::MapleLogger() << "------------------------------------------------------\n"; +} + +bool CgReachingDefinition::PhaseRun(maplebe::CGFunc &f) +{ +#if TARGAARCH64 || TARGRISCV64 + reachingDef = GetPhaseAllocator()->New(f, *GetPhaseMemPool()); +#endif +#if TARGARM32 + reachingDef = GetPhaseAllocator()->New(f, *GetPhaseMemPool()); +#endif + reachingDef->SetAnalysisMode(kRDAllAnalysis); + reachingDef->AnalysisStart(); + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgReachingDefinition, reachingdefinition) + +bool CgClearRDInfo::PhaseRun(maplebe::CGFunc &f) +{ + if (f.GetRDStatus()) { + f.GetRD()->ClearDefUseInfo(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgClearRDInfo, clearrdinfo) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/reg_alloc.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/reg_alloc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d6bb36b676dba4718a777a5954242c5c28c7d266 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/reg_alloc.cpp @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reg_alloc.h" +#include "live.h" +#include "loop.h" +#include "cg_dominance.h" +#include "mir_lower.h" +#include "securec.h" +#include "reg_alloc_basic.h" +#include "reg_alloc_lsra.h" +#include "cg.h" +#if TARGAARCH64 +#include "aarch64_color_ra.h" +#endif + +namespace maplebe { +void CgRegAlloc::GetAnalysisDependence(AnalysisDep &aDep) const +{ + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevelLiteCG || + CGOptions::GetInstance().DoLinearScanRegisterAllocation()) { + aDep.AddRequired(); + } + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.PreservedAllExcept(); + } +#if TARGAARCH64 + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0 && + CGOptions::GetInstance().DoColoringBasedRegisterAllocation()) { + aDep.AddRequired(); + } +#endif +} + +bool CgRegAlloc::PhaseRun(maplebe::CGFunc &f) +{ + bool success = false; + while (success == false) { + MemPool *phaseMp = GetPhaseMemPool(); + /* create register allocator */ + RegAllocator *regAllocator = nullptr; + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + regAllocator = phaseMp->New(f, *phaseMp); + } else if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevelLiteCG) { + Bfs *bfs = GET_ANALYSIS(CgBBSort, f); + CHECK_FATAL(bfs != nullptr, "null ptr check"); + regAllocator = phaseMp->New(f, *phaseMp, bfs); + } else { +#if TARGAARCH64 + if (f.GetCG()->GetCGOptions().DoLinearScanRegisterAllocation()) { + Bfs *bfs = GET_ANALYSIS(CgBBSort, f); + CHECK_FATAL(bfs != nullptr, "null ptr check"); + regAllocator = phaseMp->New(f, *phaseMp, bfs); + } else if (f.GetCG()->GetCGOptions().DoColoringBasedRegisterAllocation()) { + MaplePhase *it = GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>( + &CgLiveAnalysis::id, f); + LiveAnalysis *live = static_cast(it)->GetResult(); + CHECK_FATAL(live != nullptr, "null ptr check"); + /* revert liveanalysis result container. */ + live->ResetLiveSet(); + DomAnalysis *dom = GET_ANALYSIS(CgDomAnalysis, f); + CHECK_FATAL(dom != nullptr, "null ptr check"); + regAllocator = phaseMp->New(f, *phaseMp, *dom); + } else { + maple::LogInfo::MapleLogger(kLlErr) + << "Warning: We only support Linear Scan and GraphColor register allocation\n"; + } +#elif TARGX86_64 + LogInfo::MapleLogger(kLlErr) << "Error: We only support -O0, and -LiteCG for x64.\n"; +#endif + } + /* do register allocation */ + CHECK_FATAL(regAllocator != nullptr, "regAllocator is null in CgDoRegAlloc::Run"); + f.SetIsAfterRegAlloc(); + success = regAllocator->AllocateRegisters(); + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgLiveAnalysis::id); + } + } + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgLoopAnalysis::id); + } + return false; +} +} /* namespace maplebe */ \ No newline at end of file diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/reg_alloc_basic.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/reg_alloc_basic.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4b47f0a3c185deefcb797cf540a5f877015db624 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/reg_alloc_basic.cpp @@ -0,0 +1,497 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reg_alloc_basic.h" +#include "cg.h" + +namespace maplebe { +/* + * NB. As an optimization we can use X8 as a scratch (temporary) + * register if the return value is not returned through memory. + */ +Operand *DefaultO0RegAllocator::HandleRegOpnd(Operand &opnd) +{ + DEBUG_ASSERT(opnd.IsRegister(), "Operand should be register operand"); + auto ®Opnd = static_cast(opnd); + if (regOpnd.IsOfCC()) { + return &opnd; + } + if (!regInfo->IsVirtualRegister(regOpnd)) { + availRegSet[regOpnd.GetRegisterNumber()] = false; + (void)liveReg.insert(regOpnd.GetRegisterNumber()); + return ®Opnd; + } + auto regMapIt = regMap.find(regOpnd.GetRegisterNumber()); + if (regMapIt != regMap.end()) { /* already allocated this register */ + DEBUG_ASSERT(regMapIt->second < regInfo->GetAllRegNum(), "must be a physical register"); + regno_t newRegNO = regMapIt->second; + availRegSet[newRegNO] = false; /* make sure the real register can not be allocated and live */ + (void)liveReg.insert(newRegNO); + (void)allocatedSet.insert(&opnd); + return &cgFunc->GetOpndBuilder()->CreatePReg(newRegNO, regOpnd.GetSize(), regOpnd.GetRegisterType()); + } + if (AllocatePhysicalRegister(regOpnd)) { + (void)allocatedSet.insert(&opnd); + auto regMapItSecond = regMap.find(regOpnd.GetRegisterNumber()); + DEBUG_ASSERT(regMapItSecond != regMap.end(), " ERROR: can not find register number in regmap "); + return &cgFunc->GetOpndBuilder()->CreatePReg(regMapItSecond->second, regOpnd.GetSize(), + regOpnd.GetRegisterType()); + } + + /* use 0 register as spill register */ + regno_t regNO = 0; + return &cgFunc->GetOpndBuilder()->CreatePReg(regNO, regOpnd.GetSize(), regOpnd.GetRegisterType()); +} + +Operand *DefaultO0RegAllocator::HandleMemOpnd(Operand &opnd) +{ + DEBUG_ASSERT(opnd.IsMemoryAccessOperand(), "Operand should be memory access operand"); + auto *memOpnd = static_cast(&opnd); + Operand *res = nullptr; + if (memOpnd->GetBaseRegister() != nullptr) { + res = AllocSrcOpnd(*memOpnd->GetBaseRegister()); + memOpnd->SetBaseRegister(static_cast(*res)); + } + if (memOpnd->GetIndexRegister() != nullptr) { + res = AllocSrcOpnd(*memOpnd->GetIndexRegister()); + memOpnd->SetIndexRegister(static_cast(*res)); + } + (void)allocatedSet.insert(&opnd); + return memOpnd; +} + +Operand *DefaultO0RegAllocator::AllocSrcOpnd(Operand &opnd) +{ + if (opnd.IsRegister()) { + if (regInfo->IsUnconcernedReg(static_cast(opnd))) { + return &opnd; + } + return HandleRegOpnd(opnd); + } else if (opnd.IsMemoryAccessOperand()) { + return HandleMemOpnd(opnd); + } + DEBUG_ASSERT(false, "NYI"); + return nullptr; +} + +Operand *DefaultO0RegAllocator::AllocDestOpnd(Operand &opnd, const Insn &insn) +{ + if (!opnd.IsRegister()) { + DEBUG_ASSERT(false, "result operand must be of type register"); + return nullptr; + } + auto ®Opnd = static_cast(opnd); + if (regInfo->IsUnconcernedReg(static_cast(opnd))) { + return &opnd; + } + if (!regInfo->IsVirtualRegister(regOpnd)) { + auto reg = regOpnd.GetRegisterNumber(); + availRegSet[reg] = true; + uint32 id = GetRegLivenessId(regOpnd.GetRegisterNumber()); + if (id != 0 && id <= insn.GetId()) { + ReleaseReg(reg); + } + return &opnd; + } + + auto regMapIt = regMap.find(regOpnd.GetRegisterNumber()); + if (regMapIt != regMap.end()) { + regno_t reg = regMapIt->second; + if (!insn.IsCondDef()) { + uint32 id = GetRegLivenessId(regOpnd.GetRegisterNumber()); + if (id != 0 && id <= insn.GetId()) { + ReleaseReg(reg); + } + } + } else { + /* AllocatePhysicalRegister insert a mapping from vreg no to phy reg no into regMap */ + if (AllocatePhysicalRegister(regOpnd)) { + regMapIt = regMap.find(regOpnd.GetRegisterNumber()); + if (!insn.IsCondDef()) { + uint32 id = GetRegLivenessId(regOpnd.GetRegisterNumber()); + if (id && (id <= insn.GetId())) { + ReleaseReg(regMapIt->second); + } + } + } else { + /* For register spill. use 0 register as spill register */ + regno_t regNO = 0; + return &cgFunc->GetOpndBuilder()->CreatePReg(regNO, regOpnd.GetSize(), regOpnd.GetRegisterType()); + } + } + (void)allocatedSet.insert(&opnd); + return &cgFunc->GetOpndBuilder()->CreatePReg(regMapIt->second, regOpnd.GetSize(), regOpnd.GetRegisterType()); +} + +void DefaultO0RegAllocator::GetPhysicalRegisterBank(RegType regTy, regno_t &begin, regno_t &end) const +{ + switch (regTy) { + case kRegTyVary: + case kRegTyCc: + break; + case kRegTyInt: + begin = *regInfo->GetIntRegs().begin(); + end = *regInfo->GetIntRegs().rbegin(); + break; + case kRegTyFloat: + begin = *regInfo->GetFpRegs().begin(); + end = *regInfo->GetFpRegs().rbegin(); + break; + default: + DEBUG_ASSERT(false, "NYI"); + break; + } +} + +void DefaultO0RegAllocator::InitAvailReg() +{ + for (auto it : regInfo->GetAllRegs()) { + availRegSet[it] = true; + } +} + +/* these registers can not be allocated */ +bool DefaultO0RegAllocator::IsSpecialReg(regno_t reg) const +{ + return regInfo->IsSpecialReg(reg); +} + +void DefaultO0RegAllocator::ReleaseReg(const RegOperand ®Opnd) +{ + ReleaseReg(regMap[regOpnd.GetRegisterNumber()]); +} + +void DefaultO0RegAllocator::ReleaseReg(regno_t reg) +{ + DEBUG_ASSERT(reg < regInfo->GetAllRegNum(), "can't release virtual register"); + liveReg.erase(reg); + if (!IsSpecialReg(reg)) { + availRegSet[reg] = true; + } +} + +/* trying to allocate a physical register to opnd. return true if success */ +bool DefaultO0RegAllocator::AllocatePhysicalRegister(const RegOperand &opnd) +{ + RegType regType = opnd.GetRegisterType(); + regno_t regNo = opnd.GetRegisterNumber(); + regno_t regStart = 0; + regno_t regEnd = 0; + GetPhysicalRegisterBank(regType, regStart, regEnd); + + const auto opndRegIt = regLiveness.find(regNo); + for (regno_t reg = regStart; reg <= regEnd; ++reg) { + if (!availRegSet[reg]) { + continue; + } + + if (opndRegIt != regLiveness.end()) { + const auto regIt = regLiveness.find(reg); + DEBUG_ASSERT(opndRegIt->second.size() == 1, "NIY, opnd reg liveness range must be 1."); + if (regIt != regLiveness.end() && CheckRangesOverlap(opndRegIt->second.front(), regIt->second)) { + continue; + } + } + + regMap[opnd.GetRegisterNumber()] = reg; + availRegSet[reg] = false; + (void)liveReg.insert(reg); /* this register is live now */ + return true; + } + return false; +} + +/* If opnd is a callee saved register, save it in the prolog and restore it in the epilog */ +void DefaultO0RegAllocator::SaveCalleeSavedReg(const RegOperand ®Opnd) +{ + regno_t regNO = regOpnd.GetRegisterNumber(); + auto phyReg = regInfo->IsVirtualRegister(regOpnd) ? regMap[regNO] : regNO; + /* when yieldpoint is enabled, skip the reserved register for yieldpoint. */ + if (cgFunc->GetCG()->GenYieldPoint() && (regInfo->IsYieldPointReg(phyReg))) { + return; + } + + if (regInfo->IsCalleeSavedReg(phyReg)) { + calleeSaveUsed.insert(phyReg); + } +} + +uint32 DefaultO0RegAllocator::GetRegLivenessId(regno_t regNo) +{ + auto regIt = regLiveness.find(regNo); + return ((regIt == regLiveness.end()) ? 0 : regIt->second.back().second); +} + +bool DefaultO0RegAllocator::CheckRangesOverlap(const std::pair &range1, + const MapleVector> &ranges2) const +{ + /* + * Check whether range1 and ranges2 overlap. + * The ranges2 is sorted. + */ + auto pos = std::lower_bound( + ranges2.begin(), ranges2.end(), range1, + [](const std::pair &r2, const std::pair &r1) { return r1.first >= r2.second; }); + if (pos == ranges2.end()) { + return false; + } + auto &range2 = *pos; + if (std::max(range1.first, range2.first) <= std::min(range1.second, range2.second)) { + return true; + } + return false; +} + +void DefaultO0RegAllocator::SetupRegLiveness(BB *bb) +{ + regLiveness.clear(); + + uint32 id = 1; + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + insn->SetId(id); + id++; + uint32 opndNum = insn->GetOperandSize(); + const InsnDesc *curMd = insn->GetDesc(); + for (uint32 i = 0; i < opndNum; i++) { + Operand &opnd = insn->GetOperand(i); + const OpndDesc *opndDesc = curMd->GetOpndDes(i); + if (opnd.IsRegister()) { + /* def-use is processed by use */ + SetupRegLiveness(static_cast(opnd), insn->GetId(), !opndDesc->IsUse()); + } else if (opnd.IsMemoryAccessOperand()) { + SetupRegLiveness(static_cast(opnd), insn->GetId()); + } else if (opnd.IsList()) { + SetupRegLiveness(static_cast(opnd), insn->GetId(), opndDesc->IsDef()); + } + } + } + + /* clear the last empty range */ + for (auto ®LivenessIt : regLiveness) { + auto ®LivenessRanges = regLivenessIt.second; + if (regLivenessRanges.back().first == 0) { + regLivenessRanges.pop_back(); + } + } +} + +void DefaultO0RegAllocator::SetupRegLiveness(MemOperand &opnd, uint32 insnId) +{ + /* base regOpnd is use in O0 */ + if (opnd.GetBaseRegister()) { + SetupRegLiveness(*opnd.GetBaseRegister(), insnId, false); + } + /* index regOpnd must be use */ + if (opnd.GetIndexRegister()) { + SetupRegLiveness(*opnd.GetIndexRegister(), insnId, false); + } +} + +void DefaultO0RegAllocator::SetupRegLiveness(ListOperand &opnd, uint32 insnId, bool isDef) +{ + for (RegOperand *regOpnd : opnd.GetOperands()) { + SetupRegLiveness(*regOpnd, insnId, isDef); + } +} + +void DefaultO0RegAllocator::SetupRegLiveness(RegOperand &opnd, uint32 insnId, bool isDef) +{ + MapleVector> ranges(alloc.Adapter()); + auto regLivenessIt = regLiveness.emplace(opnd.GetRegisterNumber(), ranges).first; + auto ®LivenessRanges = regLivenessIt->second; + if (regLivenessRanges.empty()) { + regLivenessRanges.push_back(std::make_pair(0, 0)); + } + auto ®LivenessLastRange = regLivenessRanges.back(); + if (regLivenessLastRange.first == 0) { + regLivenessLastRange.first = insnId; + } + regLivenessLastRange.second = insnId; + + /* create new range, only phyReg need to be segmented */ + if (isDef && regInfo->IsAvailableReg(opnd.GetRegisterNumber())) { + regLivenessRanges.push_back(std::make_pair(0, 0)); + } +} + +void DefaultO0RegAllocator::AllocHandleDestList(Insn &insn, Operand &opnd, uint32 idx) +{ + if (!opnd.IsList()) { + return; + } + auto *listOpnds = &static_cast(opnd); + auto *listOpndsNew = &cgFunc->GetOpndBuilder()->CreateList(); + for (auto *dstOpnd : listOpnds->GetOperands()) { + if (allocatedSet.find(dstOpnd) != allocatedSet.end()) { + auto ®Opnd = static_cast(*dstOpnd); + SaveCalleeSavedReg(regOpnd); + listOpndsNew->PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(regMap[regOpnd.GetRegisterNumber()], + regOpnd.GetSize(), regOpnd.GetRegisterType())); + continue; /* already allocated */ + } + RegOperand *regOpnd = static_cast(AllocDestOpnd(*dstOpnd, insn)); + DEBUG_ASSERT(regOpnd != nullptr, "null ptr check"); + auto physRegno = regOpnd->GetRegisterNumber(); + availRegSet[physRegno] = false; + (void)liveReg.insert(physRegno); + listOpndsNew->PushOpnd( + cgFunc->GetOpndBuilder()->CreatePReg(physRegno, regOpnd->GetSize(), regOpnd->GetRegisterType())); + } + insn.SetOperand(idx, *listOpndsNew); + for (auto *dstOpnd : listOpndsNew->GetOperands()) { + uint32 id = GetRegLivenessId(dstOpnd->GetRegisterNumber()); + if (id != 0 && id <= insn.GetId()) { + ReleaseReg(*dstOpnd); + } + } +} + +void DefaultO0RegAllocator::AllocHandleDest(Insn &insn, Operand &opnd, uint32 idx) +{ + if (allocatedSet.find(&opnd) != allocatedSet.end()) { + /* free the live range of this register */ + auto ®Opnd = static_cast(opnd); + SaveCalleeSavedReg(regOpnd); + if (insn.IsAtomicStore() || insn.IsSpecialIntrinsic()) { + /* remember the physical machine register assigned */ + regno_t regNO = regOpnd.GetRegisterNumber(); + rememberRegs.push_back(regInfo->IsVirtualRegister(regOpnd) ? regMap[regNO] : regNO); + } else if (!insn.IsCondDef()) { + uint32 id = GetRegLivenessId(regOpnd.GetRegisterNumber()); + if (id != 0 && id <= insn.GetId()) { + ReleaseReg(regOpnd); + } + } + insn.SetOperand(idx, cgFunc->GetOpndBuilder()->CreatePReg(regMap[regOpnd.GetRegisterNumber()], + regOpnd.GetSize(), regOpnd.GetRegisterType())); + return; /* already allocated */ + } + + if (opnd.IsRegister()) { + insn.SetOperand(idx, *AllocDestOpnd(opnd, insn)); + SaveCalleeSavedReg(static_cast(opnd)); + } +} + +void DefaultO0RegAllocator::AllocHandleSrcList(Insn &insn, Operand &opnd, uint32 idx) +{ + if (!opnd.IsList()) { + return; + } + auto *listOpnds = &static_cast(opnd); + auto *listOpndsNew = &cgFunc->GetOpndBuilder()->CreateList(); + for (auto *srcOpnd : listOpnds->GetOperands()) { + if (allocatedSet.find(srcOpnd) != allocatedSet.end()) { + auto *regOpnd = static_cast(srcOpnd); + regno_t reg = regMap[regOpnd->GetRegisterNumber()]; + availRegSet[reg] = false; + (void)liveReg.insert(reg); /* this register is live now */ + listOpndsNew->PushOpnd( + cgFunc->GetOpndBuilder()->CreatePReg(reg, regOpnd->GetSize(), regOpnd->GetRegisterType())); + continue; /* already allocated */ + } + RegOperand *regOpnd = static_cast(AllocSrcOpnd(*srcOpnd)); + CHECK_NULL_FATAL(regOpnd); + listOpndsNew->PushOpnd(*regOpnd); + } + insn.SetOperand(idx, *listOpndsNew); +} + +void DefaultO0RegAllocator::AllocHandleSrc(Insn &insn, Operand &opnd, uint32 idx) +{ + if (allocatedSet.find(&opnd) != allocatedSet.end() && opnd.IsRegister()) { + auto *regOpnd = &static_cast(opnd); + regno_t reg = regMap[regOpnd->GetRegisterNumber()]; + availRegSet[reg] = false; + (void)liveReg.insert(reg); /* this register is live now */ + insn.SetOperand(idx, cgFunc->GetOpndBuilder()->CreatePReg(reg, regOpnd->GetSize(), regOpnd->GetRegisterType())); + } else { + Operand *srcOpnd = AllocSrcOpnd(opnd); + CHECK_NULL_FATAL(srcOpnd); + insn.SetOperand(idx, *srcOpnd); + } +} + +bool DefaultO0RegAllocator::AllocateRegisters() +{ + regInfo->Init(); + InitAvailReg(); + cgFunc->SetIsAfterRegAlloc(); + + FOR_ALL_BB_REV(bb, cgFunc) { + if (bb->IsEmpty()) { + continue; + } + + SetupRegLiveness(bb); + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + + /* handle inline assembly first due to specific def&use order */ + if (insn->IsAsmInsn()) { + AllocHandleDestList(*insn, insn->GetOperand(kAsmClobberListOpnd), kAsmClobberListOpnd); + AllocHandleDestList(*insn, insn->GetOperand(kAsmOutputListOpnd), kAsmOutputListOpnd); + AllocHandleSrcList(*insn, insn->GetOperand(kAsmInputListOpnd), kAsmInputListOpnd); + } + + const InsnDesc *curMd = insn->GetDesc(); + + for (uint32 i = 0; i < insn->GetOperandSize() && !insn->IsAsmInsn(); ++i) { /* the dest registers */ + Operand &opnd = insn->GetOperand(i); + if (!(opnd.IsRegister() && curMd->GetOpndDes(i)->IsDef())) { + continue; + } + if (opnd.IsList()) { + AllocHandleDestList(*insn, opnd, i); + } else { + AllocHandleDest(*insn, opnd, i); + } + } + + for (uint32 i = 0; i < insn->GetOperandSize() && !insn->IsAsmInsn(); ++i) { /* the src registers */ + Operand &opnd = insn->GetOperand(i); + if (!((opnd.IsRegister() && curMd->GetOpndDes(i)->IsUse()) || opnd.IsMemoryAccessOperand())) { + continue; + } + if (opnd.IsList()) { + AllocHandleSrcList(*insn, opnd, i); + } else { + AllocHandleSrc(*insn, opnd, i); + } + } + + /* hack. a better way to handle intrinsics? */ + for (auto rememberReg : rememberRegs) { + DEBUG_ASSERT(rememberReg != regInfo->GetInvalidReg(), "not a valid register"); + ReleaseReg(rememberReg); + } + rememberRegs.clear(); + } + } + /* + * we store both FP/LR if using FP or if not using FP, but func has a call + * Using FP, record it for saving + * notice the order here : the first callee saved reg is expected to be RFP. + */ + regInfo->Fini(); + regInfo->SaveCalleeSavedReg(calleeSaveUsed); + return true; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/reg_alloc_lsra.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/reg_alloc_lsra.cpp new file mode 100644 index 0000000000000000000000000000000000000000..06d1b8a1b6dfc4e35a1a03ccd3a52a13e4cdfc67 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/reg_alloc_lsra.cpp @@ -0,0 +1,2447 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reg_alloc_lsra.h" +#include +#include +#include +#include "loop.h" + +namespace maplebe { +/* + * ================== + * = Linear Scan RA + * ================== + */ +#define LSRA_DUMP (CG_DEBUG_FUNC(*cgFunc)) +namespace { +constexpr uint32 kSpilled = 1; +constexpr uint32 kMinLiveIntervalLength = 20; +constexpr uint32 kPrintedActiveListLength = 10; +/* Here, kLoopWeight is a fine-tuned empirical parameter */ +constexpr uint32 kLoopWeight = 4; +} // namespace + +#define IN_SPILL_RANGE \ + (cgFunc->GetName().find(CGOptions::GetDumpFunc()) != std::string::npos && ++debugSpillCnt && \ + (CGOptions::GetSpillRangesBegin() < debugSpillCnt) && (debugSpillCnt < CGOptions::GetSpillRangesEnd())) + +#ifdef RA_PERF_ANALYSIS +static long bfsUS = 0; +static long liveIntervalUS = 0; +static long holesUS = 0; +static long lsraUS = 0; +static long finalizeUS = 0; +static long totalUS = 0; + +extern void printLSRATime() +{ + std::cout << "============================================================\n"; + std::cout << " LSRA sub-phase time information \n"; + std::cout << "============================================================\n"; + std::cout << "BFS BB sorting cost: " << bfsUS << "us \n"; + std::cout << "live interval computing cost: " << liveIntervalUS << "us \n"; + std::cout << "live range approximation cost: " << holesUS << "us \n"; + std::cout << "LSRA cost: " << lsraUS << "us \n"; + std::cout << "finalize cost: " << finalizeUS << "us \n"; + std::cout << "LSRA total cost: " << totalUS << "us \n"; + std::cout << "============================================================\n"; +} +#endif + +/* + * This LSRA implementation is an interpretation of the [Poletto97] paper. + * BFS BB ordering is used to order the instructions. The live intervals are vased on + * this instruction order. All vreg defines should come before an use, else a warning is + * given. + * Live interval is traversed in order from lower instruction order to higher order. + * When encountering a live interval for the first time, it is assumed to be live and placed + * inside the 'active' structure until the vreg's last access. During the time a vreg + * is in 'active', the vreg occupies a physical register allocation and no other vreg can + * be allocated the same physical register. + */ +void LSRALinearScanRegAllocator::PrintRegSet(const MapleSet &set, const std::string &str) const +{ + LogInfo::MapleLogger() << str; + for (auto reg : set) { + LogInfo::MapleLogger() << " " << reg; + } + LogInfo::MapleLogger() << "\n"; +} + +bool LSRALinearScanRegAllocator::CheckForReg(Operand &opnd, const Insn &insn, const LiveInterval &li, regno_t regNO, + bool isDef) const +{ + if (!opnd.IsRegister()) { + return false; + } + auto ®Opnd = static_cast(opnd); + if (regOpnd.GetRegisterType() == kRegTyCc || regOpnd.GetRegisterType() == kRegTyVary) { + return false; + } + if (regOpnd.GetRegisterNumber() == regNO) { + LogInfo::MapleLogger() << "set object circle at " << insn.GetId() << "," << li.GetRegNO() + << " size 5 fillcolor rgb \""; + if (isDef) { + LogInfo::MapleLogger() << "black\"\n"; + } else { + LogInfo::MapleLogger() << "orange\"\n"; + } + } + return true; +} + +void LSRALinearScanRegAllocator::PrintLiveRanges(const LiveInterval &li) const +{ + if (li.GetAssignedReg() != 0) { + uint32 base = (li.GetRegType() == kRegTyInt) ? firstIntReg : firstFpReg; + LogInfo::MapleLogger() << "(assigned R" << (li.GetAssignedReg() - base) << ")"; + } + if (li.GetStackSlot() == kSpilled) { + LogInfo::MapleLogger() << "(spill)"; + } + for (auto range : li.GetRanges()) { + LogInfo::MapleLogger() << "[" << range.GetStart() << ", " << range.GetEnd() << "]" + << " "; + } + if (li.GetSplitNext() != nullptr) { + LogInfo::MapleLogger() << "### SPLIT ### "; + PrintLiveRanges(*li.GetSplitNext()); + } + LogInfo::MapleLogger() << "\n"; +} + +void LSRALinearScanRegAllocator::PrintAllLiveRanges() const +{ + LogInfo::MapleLogger() << "func: " << cgFunc->GetName() << "\n"; + for (auto *li : liveIntervalsArray) { + if (li == nullptr || li->GetRegNO() == 0) { + continue; + } + LogInfo::MapleLogger() << "vreg" << li->GetRegNO() << ": "; + if (li->GetSplitParent() != nullptr) { + PrintLiveRanges(*li->GetSplitParent()); + } else { + PrintLiveRanges(*li); + } + } +} + +/* + * This is a support routine to compute the overlapping live intervals in graph form. + * The output file can be viewed by gnuplot. + * Despite the function name of LiveRanges, it is using live intervals. + */ +void LSRALinearScanRegAllocator::PrintLiveRangesGraph() const +{ + /* ================= Output to plot.pg =============== */ + std::ofstream out("plot.pg"); + CHECK_FATAL(out.is_open(), "Failed to open output file: plot.pg"); + std::streambuf *coutBuf = LogInfo::MapleLogger().rdbuf(); /* old buf */ + LogInfo::MapleLogger().rdbuf(out.rdbuf()); /* new buf */ + + LogInfo::MapleLogger() << "#!/usr/bin/gnuplot\n"; + LogInfo::MapleLogger() << "#maxInsnNum " << maxInsnNum << "\n"; + LogInfo::MapleLogger() << "#minVregNum " << minVregNum << "\n"; + LogInfo::MapleLogger() << "#maxVregNum " << maxVregNum << "\n"; + LogInfo::MapleLogger() << "reset\nset terminal png\n"; + LogInfo::MapleLogger() << "set xrange [1:" << maxInsnNum << "]\n"; + LogInfo::MapleLogger() << "set grid\nset style data linespoints\n"; + LogInfo::MapleLogger() << "set datafile missing '0'\n"; + std::vector> graph(maxVregNum, std::vector(maxInsnNum, 0)); + + uint32 minY = 0xFFFFFFFF; + uint32 maxY = 0; + for (auto *li : liveIntervalsArray) { + if (li == nullptr || li->GetRegNO() == 0) { + continue; + } + uint32 regNO = li->GetRegNO(); + if ((li->GetLastUse() - li->GetFirstDef()) < kMinLiveIntervalLength) { + continue; + } + if (regNO < minY) { + minY = regNO; + } + if (regNO > maxY) { + maxY = regNO; + } + uint32 n; + for (n = 0; n <= (li->GetFirstDef() - 1); ++n) { + graph[regNO - minVregNum][n] = 0; + } + if (li->GetLastUse() >= n) { + for (; n <= (li->GetLastUse() - 1); ++n) { + graph[regNO - minVregNum][n] = regNO; + } + } + for (; n < maxInsnNum; ++n) { + graph[regNO - minVregNum][n] = 0; + } + + for (auto *bb : bfs->sortedBBs) { + FOR_BB_INSNS(insn, bb) { + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 iSecond = 0; iSecond < opndNum; ++iSecond) { + Operand &opnd = insn->GetOperand(iSecond); + const OpndDesc *regProp = md->GetOpndDes(iSecond); + DEBUG_ASSERT(regProp != nullptr, + "pointer is null in LSRALinearScanRegAllocator::PrintLiveRangesGraph"); + bool isDef = regProp->IsRegDef(); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + (void)CheckForReg(*op, *insn, *li, regNO, isDef); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr && !CheckForReg(*base, *insn, *li, regNO, false)) { + continue; + } + if (offset != nullptr && !CheckForReg(*offset, *insn, *li, regNO, false)) { + continue; + } + } else { + (void)CheckForReg(opnd, *insn, *li, regNO, isDef); + } + } + } + } + } + LogInfo::MapleLogger() << "set yrange [" << (minY - 1) << ":" << (maxY + 1) << "]\n"; + + LogInfo::MapleLogger() << "plot \"plot.dat\" using 1:2 title \"R" << minVregNum << "\""; + for (uint32 i = 1; i < ((maxVregNum - minVregNum) + 1); ++i) { + LogInfo::MapleLogger() << ", \\\n\t\"\" using 1:" << (i + kDivide2) << " title \"R" << (minVregNum + i) << "\""; + } + LogInfo::MapleLogger() << ";\n"; + + /* ================= Output to plot.dat =============== */ + std::ofstream out2("plot.dat"); + CHECK_FATAL(out2.is_open(), "Failed to open output file: plot.dat"); + LogInfo::MapleLogger().rdbuf(out2.rdbuf()); /* new buf */ + LogInfo::MapleLogger() << "##reg"; + for (uint32 i = minVregNum; i <= maxVregNum; ++i) { + LogInfo::MapleLogger() << " R" << i; + } + LogInfo::MapleLogger() << "\n"; + for (uint32 n = 0; n < maxInsnNum; ++n) { + LogInfo::MapleLogger() << (n + 1); + for (uint32 i = minVregNum; i <= maxVregNum; ++i) { + LogInfo::MapleLogger() << " " << graph[i - minVregNum][n]; + } + LogInfo::MapleLogger() << "\n"; + } + LogInfo::MapleLogger().rdbuf(coutBuf); +} + +void LSRALinearScanRegAllocator::SpillStackMapInfo() +{ + const auto &referenceMapInsns = cgFunc->GetStackMapInsns(); + + for (auto *li : liveIntervalsArray) { + if (li == nullptr) { + continue; + } + + int32 regNO = li->GetRegNO(); + bool toSpill = false; + for (auto *insn : referenceMapInsns) { + auto &deoptInfo = insn->GetStackMap()->GetDeoptInfo(); + const auto &deoptBundleInfo = deoptInfo.GetDeoptBundleInfo(); + for (const auto &item : deoptBundleInfo) { + const auto *opnd = item.second; + if (opnd->IsRegister() && (regNO == static_cast(opnd)->GetRegisterNumber())) { + li->SetStackSlot(kSpilled); + li->SetShouldSave(false); + toSpill = true; + break; + } + } + if (toSpill) { + break; + } + if (!cgFunc->IsRegReference(regNO)) { + continue; + } + auto *stackMapLiveIn = insn->GetStackMapLiveIn(); + if (stackMapLiveIn->GetInfo().count(regNO) != 0) { + auto itr = dereivedRef2Base.find(regNO); + if (itr != dereivedRef2Base.end()) { + DEBUG_ASSERT(liveIntervalsArray[itr->second] != nullptr, "empty li"); + liveIntervalsArray[itr->second]->SetStackSlot(kSpilled); + liveIntervalsArray[itr->second]->SetShouldSave(false); + } + li->SetStackSlot(kSpilled); + li->SetShouldSave(false); + break; + } + } + } +} + +void LSRALinearScanRegAllocator::PrintLiveInterval(const LiveInterval &li, const std::string &str) const +{ + LogInfo::MapleLogger() << str << "\n"; + if (li.GetIsCall() != nullptr) { + LogInfo::MapleLogger() << " firstDef " << li.GetFirstDef(); + LogInfo::MapleLogger() << " isCall"; + } else if (li.GetPhysUse()) { + LogInfo::MapleLogger() << "\tregNO " << li.GetRegNO(); + LogInfo::MapleLogger() << " firstDef " << li.GetFirstDef(); + LogInfo::MapleLogger() << " physUse " << li.GetPhysUse(); + LogInfo::MapleLogger() << " endByCall " << li.IsEndByCall(); + } else { + /* show regno/firstDef/lastUse with 5/8/8 width respectively */ + LogInfo::MapleLogger() << "\tregNO " << std::setw(5) << li.GetRegNO(); + LogInfo::MapleLogger() << " firstDef " << std::setw(8) << li.GetFirstDef(); + LogInfo::MapleLogger() << " lastUse " << std::setw(8) << li.GetLastUse(); + LogInfo::MapleLogger() << " assigned " << li.GetAssignedReg(); + LogInfo::MapleLogger() << " refCount " << li.GetRefCount(); + LogInfo::MapleLogger() << " priority " << li.GetPriority(); + } + LogInfo::MapleLogger() << " object_address 0x" << std::hex << &li << std::dec << "\n"; +} + +void LSRALinearScanRegAllocator::PrintParamQueue(const std::string &str) +{ + LogInfo::MapleLogger() << str << "\n"; + for (SingleQue &que : intParamQueue) { + if (que.empty()) { + continue; + } + LiveInterval *li = que.front(); + LiveInterval *last = que.back(); + PrintLiveInterval(*li, ""); + while (li != last) { + que.pop_front(); + que.push_back(li); + li = que.front(); + PrintLiveInterval(*li, ""); + } + que.pop_front(); + que.push_back(li); + } +} + +void LSRALinearScanRegAllocator::PrintCallQueue(const std::string &str) const +{ + LogInfo::MapleLogger() << str << "\n"; + for (auto callInsnID : callQueue) { + LogInfo::MapleLogger() << callInsnID << " "; + } + LogInfo::MapleLogger() << "\n"; +} + +void LSRALinearScanRegAllocator::PrintActiveList(const std::string &str, uint32 len) const +{ + uint32 count = 0; + LogInfo::MapleLogger() << str << " " << active.size() << "\n"; + for (auto *li : active) { + PrintLiveInterval(*li, ""); + ++count; + if ((len != 0) && (count == len)) { + break; + } + } +} + +void LSRALinearScanRegAllocator::PrintActiveListSimple() const +{ + for (const auto *li : active) { + uint32 assignedReg = li->GetAssignedReg(); + LogInfo::MapleLogger() << li->GetRegNO() << "(" << assignedReg << ", "; + if (li->GetPhysUse()) { + LogInfo::MapleLogger() << "p) "; + } else { + LogInfo::MapleLogger() << li->GetFirstAcrossedCall(); + } + LogInfo::MapleLogger() << "<" << li->GetFirstDef() << "," << li->GetLastUse() << ">) "; + } + LogInfo::MapleLogger() << "\n"; +} + +void LSRALinearScanRegAllocator::PrintLiveIntervals() const +{ + /* vreg LogInfo */ + for (auto *li : liveIntervalsArray) { + if (li == nullptr || li->GetRegNO() == 0) { + continue; + } + PrintLiveInterval(*li, ""); + } + LogInfo::MapleLogger() << "\n"; + /* preg LogInfo */ + for (auto param : intParamQueue) { + for (auto *li : param) { + if (li == nullptr || li->GetRegNO() == 0) { + continue; + } + PrintLiveInterval(*li, ""); + } + } + LogInfo::MapleLogger() << "\n"; +} + +void LSRALinearScanRegAllocator::DebugCheckActiveList() const +{ + LiveInterval *prev = nullptr; + for (auto *li : active) { + if (prev != nullptr) { + if ((li->GetRegNO() <= regInfo->GetLastParamsFpReg()) && + (prev->GetRegNO() > regInfo->GetLastParamsFpReg())) { + if (li->GetFirstDef() < prev->GetFirstDef()) { + LogInfo::MapleLogger() << "ERRer: active list with out of order phys + vreg\n"; + PrintLiveInterval(*prev, "prev"); + PrintLiveInterval(*li, "current"); + PrintActiveList("Active", kPrintedActiveListLength); + } + } + if ((li->GetRegNO() <= regInfo->GetLastParamsFpReg()) && + (prev->GetRegNO() <= regInfo->GetLastParamsFpReg())) { + if (li->GetFirstDef() < prev->GetFirstDef()) { + LogInfo::MapleLogger() << "ERRer: active list with out of order phys reg use\n"; + PrintLiveInterval(*prev, "prev"); + PrintLiveInterval(*li, "current"); + PrintActiveList("Active", kPrintedActiveListLength); + } + } + } else { + prev = li; + } + } +} + +/* + * Prepare the free physical register pool for allocation. + * When a physical register is allocated, it is removed from the pool. + * The physical register is re-inserted into the pool when the associated live + * interval has ended. + */ +void LSRALinearScanRegAllocator::InitFreeRegPool() +{ + for (regno_t regNO = regInfo->GetInvalidReg(); regNO < regInfo->GetAllRegNum(); ++regNO) { + if (!regInfo->IsAvailableReg(regNO)) { + continue; + } + if (regInfo->IsGPRegister(regNO)) { + if (regInfo->IsYieldPointReg(regNO)) { + continue; + } + /* ExtraSpillReg */ + if (regInfo->IsSpillRegInRA(regNO, needExtraSpillReg)) { + intSpillRegSet.push_back(regNO - firstIntReg); + continue; + } + if (regInfo->IsCalleeSavedReg(regNO)) { + /* callee-saved registers */ + (void)intCalleeRegSet.insert(regNO - firstIntReg); + intCalleeMask |= 1u << (regNO - firstIntReg); + } else { + /* caller-saved registers */ + (void)intCallerRegSet.insert(regNO - firstIntReg); + intCallerMask |= 1u << (regNO - firstIntReg); + } + } else { + /* fp ExtraSpillReg */ + if (regInfo->IsSpillRegInRA(regNO, needExtraSpillReg)) { + fpSpillRegSet.push_back(regNO - firstFpReg); + continue; + } + if (regInfo->IsCalleeSavedReg(regNO)) { + /* fp callee-saved registers */ + (void)fpCalleeRegSet.insert(regNO - firstFpReg); + fpCalleeMask |= 1u << (regNO - firstFpReg); + } else { + /* fp caller-saved registers */ + (void)fpCallerRegSet.insert(regNO - firstFpReg); + fpCallerMask |= 1u << (regNO - firstFpReg); + } + } + } + + if (LSRA_DUMP) { + PrintRegSet(intCallerRegSet, "ALLOCATABLE_INT_CALLER"); + PrintRegSet(intCalleeRegSet, "ALLOCATABLE_INT_CALLEE"); + PrintRegSet(intParamRegSet, "ALLOCATABLE_INT_PARAM"); + PrintRegSet(fpCallerRegSet, "ALLOCATABLE_FP_CALLER"); + PrintRegSet(fpCalleeRegSet, "ALLOCATABLE_FP_CALLEE"); + PrintRegSet(fpParamRegSet, "ALLOCATABLE_FP_PARAM"); + LogInfo::MapleLogger() << "INT_SPILL_REGS"; + for (uint32 intSpillRegNO : intSpillRegSet) { + LogInfo::MapleLogger() << " " << intSpillRegNO; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "FP_SPILL_REGS"; + for (uint32 fpSpillRegNO : fpSpillRegSet) { + LogInfo::MapleLogger() << " " << fpSpillRegNO; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << std::hex; + LogInfo::MapleLogger() << "INT_CALLER_MASK " << intCallerMask << "\n"; + LogInfo::MapleLogger() << "INT_CALLEE_MASK " << intCalleeMask << "\n"; + LogInfo::MapleLogger() << "INT_PARAM_MASK " << intParamMask << "\n"; + LogInfo::MapleLogger() << "FP_CALLER_FP_MASK " << fpCallerMask << "\n"; + LogInfo::MapleLogger() << "FP_CALLEE_FP_MASK " << fpCalleeMask << "\n"; + LogInfo::MapleLogger() << "FP_PARAM_FP_MASK " << fpParamMask << "\n"; + LogInfo::MapleLogger() << std::dec; + } +} + +void LSRALinearScanRegAllocator::RecordPhysRegs(const RegOperand ®Opnd, uint32 insnNum, bool isDef) +{ + RegType regType = regOpnd.GetRegisterType(); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return; + } + if (regInfo->IsUntouchableReg(regNO)) { + return; + } + if (!regInfo->IsPreAssignedReg(regNO)) { + return; + } + if (isDef) { + /* parameter/return register def is assumed to be live until a call. */ + auto *li = memPool->New(*memPool); + li->SetRegNO(regNO); + li->SetRegType(regType); + li->SetStackSlot(0xFFFFFFFF); + li->SetFirstDef(insnNum); + li->SetPhysUse(insnNum); + li->SetAssignedReg(regNO); + + if (regType == kRegTyInt) { + intParamQueue[regInfo->GetIntParamRegIdx(regNO)].push_back(li); + } else { + fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].push_back(li); + } + } else { + if (regType == kRegTyInt) { + CHECK_FATAL(!intParamQueue[regInfo->GetIntParamRegIdx(regNO)].empty(), + "was not defined before use, impossible"); + LiveInterval *li = intParamQueue[regInfo->GetIntParamRegIdx(regNO)].back(); + li->SetPhysUse(insnNum); + } else { + CHECK_FATAL(!fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].empty(), + "was not defined before use, impossible"); + LiveInterval *li = fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].back(); + li->SetPhysUse(insnNum); + } + } +} + +void LSRALinearScanRegAllocator::UpdateLiveIntervalState(const BB &bb, LiveInterval &li) const +{ + if (bb.IsCatch()) { + li.SetInCatchState(); + } else { + li.SetNotInCatchState(); + } + + if (bb.GetInternalFlag1()) { + li.SetInCleanupState(); + } else { + li.SetNotInCleanupState(bb.GetId() == 1); + } +} + +void LSRALinearScanRegAllocator::UpdateRegUsedInfo(LiveInterval &li, regno_t regNO) +{ + uint32 index = regNO / (sizeof(uint64) * k8ByteSize); + uint64 bit = regNO % (sizeof(uint64) * k8ByteSize); + if ((regUsedInBB[index] & (static_cast(1) << bit)) != 0) { + li.SetMultiUseInBB(true); + } + regUsedInBB[index] |= (static_cast(1) << bit); + + if (minVregNum > regNO) { + minVregNum = regNO; + } + if (maxVregNum < regNO) { + maxVregNum = regNO; + } +} + +/* main entry function for live interval computation. */ +void LSRALinearScanRegAllocator::SetupLiveInterval(Operand &opnd, Insn &insn, bool isDef, uint32 &nUses) +{ + if (!opnd.IsRegister()) { + return; + } + auto ®Opnd = static_cast(opnd); + uint32 insnNum = insn.GetId(); + if (regOpnd.IsPhysicalRegister()) { + RecordPhysRegs(regOpnd, insnNum, isDef); + return; + } + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return; + } + + LiveInterval *li = nullptr; + uint32 regNO = regOpnd.GetRegisterNumber(); + if (liveIntervalsArray[regNO] == nullptr) { + li = memPool->New(*memPool); + li->SetRegNO(regNO); + li->SetStackSlot(0xFFFFFFFF); + liveIntervalsArray[regNO] = li; + liQue.push_back(li); + } else { + li = liveIntervalsArray[regNO]; + } + li->SetRegType(regType); + + BB *curBB = insn.GetBB(); + if (isDef) { + /* never set to 0 before, why consider this condition ? */ + if (li->GetFirstDef() == 0) { + li->SetFirstDef(insnNum); + li->SetLastUse(insnNum + 1); + } else if (!curBB->IsUnreachable()) { + if (li->GetLastUse() < insnNum || li->IsUseBeforeDef()) { + li->SetLastUse(insnNum + 1); + } + } + /* + * try-catch related + * Not set when extending live interval with bb's livein in ComputeLiveInterval. + */ + li->SetResultCount(li->GetResultCount() + 1); + } else { + if (li->GetFirstDef() == 0) { + DEBUG_ASSERT(false, "SetupLiveInterval: use before def"); + } + /* + * In ComputeLiveInterval when extending live interval using + * live-out information, li created does not have a type. + */ + if (!curBB->IsUnreachable()) { + li->SetLastUse(insnNum); + } + ++nUses; + } + UpdateLiveIntervalState(*curBB, *li); + + li->SetRefCount(li->GetRefCount() + 1); + li->AddUsePositions(insnNum); + UpdateRegUsedInfo(*li, regNO); + + /* setup the def/use point for it */ + DEBUG_ASSERT(regNO < liveIntervalsArray.size(), "out of range of vector liveIntervalsArray"); +} + +/* + * Support 'hole' in LSRA. + * For a live interval, there might be multiple segments of live ranges, + * and between these segments a 'hole'. + * Some other short lived vreg can go into these 'holes'. + * + * from : starting instruction sequence id + * to : ending instruction sequence id + */ +void LSRALinearScanRegAllocator::LiveInterval::AddRange(uint32 from, uint32 to) +{ + if (ranges.empty()) { + ranges.emplace_back(LinearRange(from, to)); + return; + } + /* create a new range */ + if (to < ranges.front().GetStart()) { + (void)ranges.insert(ranges.begin(), LinearRange(from, to)); + return; + } + DEBUG_ASSERT(from <= ranges.front().GetEnd(), "No possible on reverse traverse."); + if (to >= ranges.front().GetEnd() && from < ranges.front().GetStart()) { + ranges.front().SetStart(from); + ranges.front().SetEnd(to); + return; + } + /* extend it's range forward. e.g. def-use opnd */ + if (to >= ranges.front().GetStart() && from < ranges.front().GetStart()) { + ranges.front().SetStart(from); + return; + } + return; +} + +/* See if a vreg can fit in one of the holes of a longer live interval. */ +uint32 LSRALinearScanRegAllocator::FillInHole(const LiveInterval &li) +{ + MapleSet::iterator it; + for (it = active.begin(); it != active.end(); ++it) { + auto *ili = static_cast(*it); + + /* + * If ili is part in cleanup, the hole info will be not correct, + * since cleanup bb do not have edge to normal func bb, and the + * live-out info will not correct. + */ + if (!ili->IsAllOutCleanup() || ili->IsAllInCatch()) { + continue; + } + + if (ili->GetRegType() != li.GetRegType() || ili->GetStackSlot() != 0xFFFFFFFF || ili->GetLiChild() != nullptr || + ili->GetAssignedReg() == 0) { + continue; + } + /* todo: find available holes in ili->GetRanges() */ + } + return 0; +} + +uint32 LSRALinearScanRegAllocator::LiveInterval::GetUsePosAfter(uint32 pos) const +{ + for (auto usePos : usePositions) { + if (usePos > pos) { + return usePos; + } + } + return 0; +} + +MapleVector::iterator LSRALinearScanRegAllocator::LiveInterval::FindPosRange( + uint32 pos) +{ + while (rangeFinder != ranges.end()) { + if (rangeFinder->GetEnd() > pos) { + break; + } + ++rangeFinder; + } + return rangeFinder; +} + +void LSRALinearScanRegAllocator::SetupIntervalRangesByOperand(Operand &opnd, const Insn &insn, uint32 blockFrom, + bool isDef) +{ + auto ®Opnd = static_cast(opnd); + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return; + } + regno_t regNO = regOpnd.GetRegisterNumber(); + if (regNO <= regInfo->GetAllRegNum()) { + return; + } + if (!isDef) { + liveIntervalsArray[regNO]->AddRange(blockFrom, insn.GetId()); + return; + } + if (liveIntervalsArray[regNO]->GetRanges().empty()) { + liveIntervalsArray[regNO]->AddRange(insn.GetId(), insn.GetId()); + } else { + liveIntervalsArray[regNO]->GetRanges().front().SetStart(insn.GetId()); + } + liveIntervalsArray[regNO]->AddUsePositions(insn.GetId()); +} + +void LSRALinearScanRegAllocator::BuildIntervalRangesForEachOperand(const Insn &insn, uint32 blockFrom) +{ + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr && base->IsRegister()) { + SetupIntervalRangesByOperand(*base, insn, blockFrom, false); + } + if (offset != nullptr && offset->IsRegister()) { + SetupIntervalRangesByOperand(*offset, insn, blockFrom, false); + } + } else if (opnd.IsRegister()) { + const OpndDesc *regProp = md->GetOpndDes(i); + DEBUG_ASSERT(regProp != nullptr, + "pointer is null in LSRALinearScanRegAllocator::BuildIntervalRangesForEachOperand"); + bool isDef = isDef = !regProp->IsRegUse(); + SetupIntervalRangesByOperand(opnd, insn, blockFrom, isDef); + } + } +} + +/* Support finding holes by searching for ranges where holes exist. */ +void LSRALinearScanRegAllocator::BuildIntervalRanges() +{ + size_t bbIdx = bfs->sortedBBs.size(); + if (bbIdx == 0) { + return; + } + + do { + --bbIdx; + BB *bb = bfs->sortedBBs[bbIdx]; + if (bb->GetFirstInsn() == nullptr || bb->GetLastInsn() == nullptr) { + continue; + } + uint32 blockFrom = bb->GetFirstInsn()->GetId(); + uint32 blockTo = bb->GetLastInsn()->GetId() + 1; + + for (auto regNO : bb->GetLiveOutRegNO()) { + if (regNO < regInfo->GetAllRegNum()) { + /* Do not consider physical regs. */ + continue; + } + liveIntervalsArray[regNO]->AddRange(blockFrom, blockTo); + } + + FOR_BB_INSNS_REV(insn, bb) { + BuildIntervalRangesForEachOperand(*insn, blockFrom); + } + } while (bbIdx != 0); +} + +/* Extend live interval with live-in info */ +void LSRALinearScanRegAllocator::UpdateLiveIntervalByLiveIn(const BB &bb, uint32 insnNum) +{ + for (const auto ®NO : bb.GetLiveInRegNO()) { + if (!regInfo->IsVirtualRegister(regNO)) { + /* Do not consider physical regs. */ + continue; + } + DEBUG_ASSERT(regNO < liveIntervalsArray.size(), "index out of range."); + LiveInterval *liOuter = liveIntervalsArray[regNO]; + if (liOuter != nullptr || (bb.IsEmpty() && bb.GetId() != 1)) { + continue; + } + /* + * try-catch related + * Since it is livein but not seen before, its a use before def + * spill it temporarily + */ + auto *li = memPool->New(*memPool); + li->SetRegNO(regNO); + li->SetStackSlot(kSpilled); + liveIntervalsArray[regNO] = li; + li->SetFirstDef(insnNum); + liQue.push_back(li); + + li->SetUseBeforeDef(true); + + if (!bb.IsUnreachable()) { + if (bb.GetId() != 1) { + LogInfo::MapleLogger() << "ERROR: " << regNO << " use before def in bb " << bb.GetId() << " : " + << cgFunc->GetName() << "\n"; + DEBUG_ASSERT(false, "There should only be [use before def in bb 1], temporarily."); + } + LogInfo::MapleLogger() << "WARNING: " << regNO << " use before def in bb " << bb.GetId() << " : " + << cgFunc->GetName() << "\n"; + } + UpdateLiveIntervalState(bb, *li); + } +} + +/* traverse live in regNO, for each live in regNO create a new liveinterval */ +void LSRALinearScanRegAllocator::UpdateParamLiveIntervalByLiveIn(const BB &bb, uint32 insnNum) +{ + for (const auto ®NO : bb.GetLiveInRegNO()) { + if (!regInfo->IsPreAssignedReg(regNO)) { + continue; + } + auto *li = memPool->New(*memPool); + li->SetRegNO(regNO); + li->SetStackSlot(0xFFFFFFFF); + li->SetFirstDef(insnNum); + li->SetPhysUse(insnNum); + li->SetAssignedReg(regNO); + + if (regInfo->IsGPRegister(regNO)) { + li->SetRegType(kRegTyInt); + intParamQueue[regInfo->GetIntParamRegIdx(regNO)].push_back(li); + } else { + li->SetRegType(kRegTyFloat); + fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].push_back(li); + } + UpdateLiveIntervalState(bb, *li); + } +} + +void LSRALinearScanRegAllocator::ComputeLiveIn(BB &bb, uint32 insnNum) +{ + if (bb.IsEmpty() && bb.GetId() != 1) { + return; + } + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "bb(" << bb.GetId() << ")LIVEOUT:"; + for (const auto &liveOutRegNO : bb.GetLiveOutRegNO()) { + LogInfo::MapleLogger() << " " << liveOutRegNO; + } + LogInfo::MapleLogger() << ".\n"; + LogInfo::MapleLogger() << "bb(" << bb.GetId() << ")LIVEIN:"; + for (const auto &liveInRegNO : bb.GetLiveInRegNO()) { + LogInfo::MapleLogger() << " " << liveInRegNO; + } + LogInfo::MapleLogger() << ".\n"; + } + + UpdateLiveIntervalByLiveIn(bb, insnNum); + + if (bb.GetFirstInsn() == nullptr) { + return; + } + if (!bb.GetEhPreds().empty()) { + bb.InsertLiveInRegNO(firstIntReg); + bb.InsertLiveInRegNO(firstIntReg + 1); + } + UpdateParamLiveIntervalByLiveIn(bb, insnNum); + if (!bb.GetEhPreds().empty()) { + bb.EraseLiveInRegNO(firstIntReg); + bb.EraseLiveInRegNO(firstIntReg + 1); + } +} + +void LSRALinearScanRegAllocator::ComputeLiveOut(BB &bb, uint32 insnNum) +{ + /* + * traverse live out regNO + * for each live out regNO if the last corresponding live interval is created within this bb + * update this lastUse of li to the end of BB + */ + for (const auto ®NO : bb.GetLiveOutRegNO()) { + if (regInfo->IsPreAssignedReg(static_cast(regNO))) { + LiveInterval *liOut = nullptr; + if (regInfo->IsGPRegister(regNO)) { + if (intParamQueue[regInfo->GetIntParamRegIdx(regNO)].empty()) { + continue; + } + liOut = intParamQueue[regInfo->GetIntParamRegIdx(regNO)].back(); + if (bb.GetFirstInsn() && liOut->GetFirstDef() >= bb.GetFirstInsn()->GetId()) { + liOut->SetPhysUse(insnNum); + } + } else { + if (fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].empty()) { + continue; + } + liOut = fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].back(); + if (bb.GetFirstInsn() && liOut->GetFirstDef() >= bb.GetFirstInsn()->GetId()) { + liOut->SetPhysUse(insnNum); + } + } + } + /* Extend live interval with live-out info */ + LiveInterval *li = liveIntervalsArray[regNO]; + if (li != nullptr && !bb.IsEmpty()) { + li->SetLastUse(bb.GetLastInsn()->GetId()); + UpdateLiveIntervalState(bb, *li); + if (bb.GetKind() == BB::kBBRangeGoto) { + li->SetSplitForbid(true); + } + } + } +} + +void LSRALinearScanRegAllocator::ComputeLiveIntervalForEachOperand(Insn &insn) +{ + uint32 numUses = 0; + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + /* + * we need to process src opnd first just in case the src/dest vreg are the same and the src vreg belongs to the + * last interval. + */ + for (int32 i = opndNum - 1; i >= 0; --i) { + Operand &opnd = insn.GetOperand(static_cast(i)); + const OpndDesc *opndDesc = md->GetOpndDes(i); + DEBUG_ASSERT(opndDesc != nullptr, "ptr null check."); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + SetupLiveInterval(*op, insn, opndDesc->IsDef(), numUses); + } + } else if (opnd.IsMemoryAccessOperand()) { + bool isDef = false; + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + SetupLiveInterval(*base, insn, isDef, numUses); + } + if (offset != nullptr) { + SetupLiveInterval(*offset, insn, isDef, numUses); + } + } else { + /* Specifically, the "use-def" opnd is treated as a "use" opnd */ + bool isUse = opndDesc->IsRegUse(); + SetupLiveInterval(opnd, insn, !isUse, numUses); + } + } + if (numUses >= regInfo->GetNormalUseOperandNum()) { + needExtraSpillReg = true; + } +} + +void LSRALinearScanRegAllocator::ComputeLoopLiveIntervalPriority(const CGFuncLoops &loop) +{ + for (const auto *lp : loop.GetInnerLoops()) { + /* handle nested Loops */ + ComputeLoopLiveIntervalPriority(*lp); + } + for (auto *bb : loop.GetLoopMembers()) { + if (bb->IsEmpty()) { + continue; + } + FOR_BB_INSNS(insn, bb) { + ComputeLoopLiveIntervalPriorityInInsn(*insn); + } + loopBBRegSet.clear(); + } +} + +void LSRALinearScanRegAllocator::ComputeLoopLiveIntervalPriorityInInsn(const Insn &insn) +{ + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (!opnd.IsRegister()) { + continue; + } + auto ®Opnd = static_cast(opnd); + if (regOpnd.IsPhysicalRegister()) { + continue; + } + uint32 regNo = regOpnd.GetRegisterNumber(); + LiveInterval *li = liveIntervalsArray[regNo]; + if (li == nullptr || loopBBRegSet.find(regNo) != loopBBRegSet.end()) { + continue; + } + li->SetPriority(kLoopWeight * li->GetPriority()); + (void)loopBBRegSet.insert(regNo); + } + return; +} + +void LSRALinearScanRegAllocator::ComputeLiveInterval() +{ + liQue.clear(); + uint32 regUsedInBBSz = (cgFunc->GetMaxVReg() / (sizeof(uint64) * k8ByteSize) + 1); + regUsedInBB.resize(regUsedInBBSz, 0); + uint32 insnNum = 1; + for (BB *bb : bfs->sortedBBs) { + ComputeLiveIn(*bb, insnNum); + FOR_BB_INSNS(insn, bb) { + insn->SetId(insnNum); + /* skip comment and debug insn */ + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction()) { + continue; + } + + /* RecordCall, remember calls for caller/callee allocation. */ + if (insn->IsCall()) { + if (!insn->GetIsThrow() || !bb->GetEhSuccs().empty()) { + callQueue.emplace_back(insn->GetId()); + } + } + + ComputeLiveIntervalForEachOperand(*insn); + + /* handle return value for call insn */ + if (insn->IsCall()) { + /* For all backend architectures so far, adopt all RetRegs as Def via this insn, + * and then their live begins. + * next optimization, you can determine which registers are actually used. + */ + RegOperand *retReg = nullptr; + if (insn->GetRetType() == Insn::kRegInt) { + for (int i = 0; i < regInfo->GetIntRetRegsNum(); i++) { + retReg = regInfo->GetOrCreatePhyRegOperand(regInfo->GetIntRetReg(i), k64BitSize, kRegTyInt); + RecordPhysRegs(*retReg, insnNum, true); + } + } else { + for (int i = 0; i < regInfo->GetFpRetRegsNum(); i++) { + retReg = regInfo->GetOrCreatePhyRegOperand(regInfo->GetFpRetReg(i), k64BitSize, kRegTyFloat); + RecordPhysRegs(*retReg, insnNum, true); + } + } + } + ++insnNum; + } + + ComputeLiveOut(*bb, insnNum); + } + + maxInsnNum = insnNum - 1; /* insn_num started from 1 */ + regUsedInBB.clear(); + /* calculate Live Interval weight */ + for (auto *li : liveIntervalsArray) { + if (li == nullptr || li->GetRegNO() == 0) { + continue; + } + if (li->GetIsCall() != nullptr || li->GetPhysUse()) { + continue; + } + if (li->GetLastUse() > li->GetFirstDef()) { + li->SetPriority(static_cast(li->GetRefCount()) / + static_cast(li->GetLastUse() - li->GetFirstDef())); + } else { + li->SetPriority(static_cast(li->GetRefCount()) / + static_cast(li->GetFirstDef() - li->GetLastUse())); + } + } + + /* enhance loop Live Interval Priority */ + if (!cgFunc->GetLoops().empty()) { + for (const auto *lp : cgFunc->GetLoops()) { + ComputeLoopLiveIntervalPriority(*lp); + } + } + + if (LSRA_DUMP) { + PrintLiveIntervals(); + } +} + +/* Calculate the weight of a live interval for pre-spill and flexible spill */ +void LSRALinearScanRegAllocator::LiveIntervalAnalysis() +{ + for (uint32 bbIdx = 0; bbIdx < bfs->sortedBBs.size(); ++bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx]; + + FOR_BB_INSNS(insn, bb) { + /* 1 calculate live interfere */ + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction() || insn->GetId() == 0) { + /* New instruction inserted by reg alloc (ie spill) */ + continue; + } + /* 1.1 simple retire from active */ + MapleSet::iterator it; + for (it = active.begin(); it != active.end(); /* erase will update */) { + auto *li = static_cast(*it); + if (li->GetLastUse() > insn->GetId()) { + break; + } + it = active.erase(it); + } + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *regProp = md->GetOpndDes(i); + DEBUG_ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::LiveIntervalAnalysis"); + bool isDef = regProp->IsRegDef(); + Operand &opnd = insn->GetOperand(i); + if (isDef) { + auto ®Opnd = static_cast(opnd); + if (regOpnd.IsVirtualRegister() && regOpnd.GetRegisterType() != kRegTyCc) { + /* 1.2 simple insert to active */ + uint32 regNO = regOpnd.GetRegisterNumber(); + LiveInterval *li = liveIntervalsArray[regNO]; + // set the base reference of derived reference for stackmap + if (regOpnd.GetBaseRefOpnd() != nullptr) { + dereivedRef2Base[regNO] = regOpnd.GetBaseRefOpnd()->GetRegisterNumber(); + } + if (li->GetFirstDef() == insn->GetId()) { + (void)active.insert(li); + } + } + } + } + + /* 2 get interfere info, and analysis */ + uint32 interNum = active.size(); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "In insn " << insn->GetId() << ", " << interNum + << " overlap live intervals.\n"; + LogInfo::MapleLogger() << "\n"; + } + + /* 2.2 interfere with each other, analysis which to spill */ + while (interNum > CGOptions::GetOverlapNum()) { + LiveInterval *lowestLi = nullptr; + FindLowestPrioInActive(lowestLi); + if (lowestLi != nullptr) { + if (LSRA_DUMP) { + PrintLiveInterval(*lowestLi, "Pre spilled: "); + } + lowestLi->SetStackSlot(kSpilled); + lowestLi->SetShouldSave(false); + active.erase(itFinded); + interNum = active.size(); + } else { + break; + } + } + } + } + active.clear(); +} + +void LSRALinearScanRegAllocator::UpdateCallQueueAtRetirement(uint32 insnID) +{ + /* + * active list is sorted based on increasing lastUse + * any operand whose use is greater than current + * instruction number is still in use. + * If the use is less than or equal to instruction number + * then it is possible to retire this live interval and + * reclaim the physical register associated with it. + */ + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "RetireActiveByInsn instr_num " << insnID << "\n"; + } + /* Retire invalidated call from call queue */ + while (!callQueue.empty() && callQueue.front() <= insnID) { + callQueue.pop_front(); + } +} + +/* update allocate info by active queue */ +void LSRALinearScanRegAllocator::UpdateActiveAllocateInfo(const LiveInterval &li) +{ + uint32 start = li.GetFirstDef(); + uint32 end = li.GetLastUse(); + if (li.GetSplitParent() != nullptr || li.IsUseBeforeDef()) { + --start; + } + for (auto *activeLi : active) { + uint32 regNO = activeLi->GetAssignedReg(); + uint32 rangeStartPos; + auto posRange = activeLi->FindPosRange(start); + if (posRange == activeLi->GetRanges().end()) { + /* handle splited li */ + uint32 splitSafePos = activeLi->GetSplitPos(); + if (splitSafePos == li.GetFirstDef() && (li.GetSplitParent() != nullptr || li.IsUseBeforeDef())) { + rangeStartPos = 0; + } else if (splitSafePos > li.GetFirstDef()) { + rangeStartPos = splitSafePos - 1; + } else { + rangeStartPos = 0XFFFFFFFUL; + } + } else if (posRange->GetEhStart() != 0 && posRange->GetEhStart() < posRange->GetStart()) { + rangeStartPos = posRange->GetEhStart(); + } else { + rangeStartPos = posRange->GetStart(); + } + if (rangeStartPos > li.GetFirstDef()) { + if (rangeStartPos < end) { + blockForbiddenMask |= (1UL << activeLi->GetAssignedReg()); + } + if (rangeStartPos < freeUntilPos[regNO]) { + freeUntilPos[regNO] = rangeStartPos; + } + } else { + freeUntilPos[regNO] = 0; + } + } +} + +/* update allocate info by param queue */ +void LSRALinearScanRegAllocator::UpdateParamAllocateInfo(const LiveInterval &li) +{ + bool isInt = (li.GetRegType() == kRegTyInt); + MapleVector ¶mQueue = isInt ? intParamQueue : fpParamQueue; + uint32 baseReg = isInt ? firstIntReg : firstFpReg; + uint32 paramNum = isInt ? regInfo->GetIntRegs().size() : regInfo->GetFpRegs().size(); + uint32 start = li.GetFirstDef(); + uint32 end = li.GetLastUse(); + for (uint32 i = 0; i < paramNum; ++i) { + while (!paramQueue[i].empty() && paramQueue[i].front()->GetPhysUse() <= start) { + if (paramQueue[i].front()->GetPhysUse() == start && li.GetSplitParent() != nullptr) { + break; + } + paramQueue[i].pop_front(); + } + if (paramQueue[i].empty()) { + continue; + } + auto regNo = paramQueue[i].front()->GetRegNO(); + uint32 startPos = paramQueue[i].front()->GetFirstDef(); + if (startPos <= start) { + freeUntilPos[regNo] = 0; + } else { + if (startPos < end) { + blockForbiddenMask |= (1UL << (i + baseReg)); + } + if (startPos < freeUntilPos[regNo]) { + freeUntilPos[regNo] = startPos; + } + } + } +} + +/* update active in retire */ +void LSRALinearScanRegAllocator::RetireActive(LiveInterval &li, uint32 insnID) +{ + /* Retire live intervals from active list */ + MapleSet::iterator it; + for (it = active.begin(); it != active.end(); /* erase will update */) { + auto *activeLi = static_cast(*it); + if (activeLi->GetLastUse() > insnID) { + break; + } + if (activeLi->GetLastUse() == insnID) { + if (li.GetSplitParent() != nullptr || activeLi->GetSplitNext() != nullptr) { + ++it; + continue; + } + if (activeLi->IsEndByMov() && activeLi->GetRegType() == li.GetRegType()) { + li.SetPrefer(activeLi->GetAssignedReg()); + } + } + /* reserve split li in active */ + if (activeLi->GetSplitPos() >= insnID) { + ++it; + continue; + } + /* + * live interval ended for this reg in active + * release physical reg assigned to free reg pool + */ + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "Removing " + << "(" << activeLi->GetAssignedReg() << ")" + << "from regmask\n"; + PrintLiveInterval(*activeLi, "\tRemoving virt_reg li\n"); + } + it = active.erase(it); + } +} + +/* find the best physical reg by freeUntilPos */ +uint32 LSRALinearScanRegAllocator::GetRegFromMask(uint32 mask, regno_t offset, const LiveInterval &li) +{ + uint32 prefer = li.GetPrefer(); + if (prefer != 0) { + uint32 preg = li.GetPrefer() - offset; + if ((mask & (1u << preg)) != 0 && freeUntilPos[prefer] == 0XFFFFFFFUL) { + return prefer; + } + } + uint32 bestReg = 0; + uint32 maxFreeUntilPos = 0; + for (uint32 preg = 0; preg < k32BitSize; ++preg) { + if ((mask & (1u << preg)) == 0) { + continue; + } + uint32 regNO = preg + offset; + if (freeUntilPos[regNO] >= li.GetLastUse()) { + return regNO; + } + if (freeUntilPos[regNO] > maxFreeUntilPos) { + maxFreeUntilPos = freeUntilPos[regNO]; + bestReg = regNO; + } + } + return bestReg; +} + +/* Handle adrp register assignment. Use the same register for the next instruction. */ +uint32 LSRALinearScanRegAllocator::GetSpecialPhysRegPattern(const LiveInterval &li) +{ + /* li's first def point */ + Insn *nInsn = nullptr; + if (nInsn == nullptr || !nInsn->IsMachineInstruction() || nInsn->IsDMBInsn() || li.GetLastUse() > nInsn->GetId()) { + return 0; + } + + const InsnDesc *md = nInsn->GetDesc(); + if (!md->GetOpndDes(0)->IsRegDef()) { + return 0; + } + Operand &opnd = nInsn->GetOperand(0); + if (!opnd.IsRegister()) { + return 0; + } + auto ®Opnd = static_cast(opnd); + if (!regOpnd.IsPhysicalRegister()) { + return 0; + } + uint32 regNO = regOpnd.GetRegisterNumber(); + if (!regInfo->IsPreAssignedReg(regNO)) { + return 0; + } + + /* next insn's dest is a physical param reg 'regNO'. return 'regNO' if dest of adrp is src of next insn */ + uint32 opndNum = nInsn->GetOperandSize(); + for (uint32 i = 1; i < opndNum; ++i) { + Operand &src = nInsn->GetOperand(i); + if (src.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(src); + Operand *base = memOpnd.GetBaseRegister(); + if (base != nullptr) { + auto *regSrc = static_cast(base); + uint32 srcRegNO = regSrc->GetRegisterNumber(); + if (li.GetRegNO() == srcRegNO) { + return regNO; + } + } + Operand *offset = memOpnd.GetIndexRegister(); + if (offset != nullptr) { + auto *regSrc = static_cast(offset); + uint32 srcRegNO = regSrc->GetRegisterNumber(); + if (li.GetRegNO() == srcRegNO) { + return regNO; + } + } + } else if (src.IsRegister()) { + auto ®Src = static_cast(src); + uint32 srcRegNO = regSrc.GetRegisterNumber(); + if (li.GetRegNO() == srcRegNO) { + const OpndDesc *regProp = md->GetOpndDes(i); + DEBUG_ASSERT(regProp != nullptr, + "pointer is null in LSRALinearScanRegAllocator::GetSpecialPhysRegPattern"); + bool srcIsDef = regProp->IsRegDef(); + if (srcIsDef) { + break; + } + return regNO; + } + } + } + return 0; +} + +uint32 LSRALinearScanRegAllocator::FindAvailablePhyRegByFastAlloc(LiveInterval &li) +{ + uint32 regNO = 0; + if (li.GetRegType() == kRegTyInt) { + regNO = GetRegFromMask(intCalleeMask, firstIntReg, li); + li.SetShouldSave(false); + if (regNO == 0 || freeUntilPos[regNO] < li.GetLastUse()) { + regNO = GetRegFromMask(intCallerMask, firstIntReg, li); + li.SetShouldSave(true); + } + } else if (li.GetRegType() == kRegTyFloat) { + regNO = GetRegFromMask(fpCalleeMask, firstFpReg, li); + li.SetShouldSave(false); + if (regNO == 0 || freeUntilPos[regNO] < li.GetLastUse()) { + regNO = GetRegFromMask(fpCallerMask, firstFpReg, li); + li.SetShouldSave(true); + } + } + return regNO; +} + +/* Determine if live interval crosses the call */ +bool LSRALinearScanRegAllocator::NeedSaveAcrossCall(LiveInterval &li) +{ + bool saveAcrossCall = false; + for (uint32 callInsnID : callQueue) { + if (callInsnID > li.GetLastUse()) { + break; + } + if (callInsnID < li.GetFirstDef()) { + continue; + } + /* Need to spill/fill around this call */ + for (auto range : li.GetRanges()) { + uint32 start; + if (range.GetEhStart() != 0 && range.GetEhStart() < range.GetStart()) { + start = range.GetEhStart(); + } else { + start = range.GetStart(); + } + if (callInsnID >= start && callInsnID < range.GetEnd()) { + saveAcrossCall = true; + break; + } + } + if (saveAcrossCall) { + break; + } + } + if (LSRA_DUMP) { + if (saveAcrossCall) { + LogInfo::MapleLogger() << "\t\tlive interval crosses a call\n"; + } else { + LogInfo::MapleLogger() << "\t\tlive interval does not cross a call\n"; + } + } + return saveAcrossCall; +} + +/* Return a phys register number for the live interval. */ +uint32 LSRALinearScanRegAllocator::FindAvailablePhyReg(LiveInterval &li) +{ + if (fastAlloc) { + return FindAvailablePhyRegByFastAlloc(li); + } + uint32 regNO = 0; + if (li.GetRegType() == kRegTyInt) { + regNO = FindAvailablePhyReg(li, true); + } else { + DEBUG_ASSERT(li.GetRegType() == kRegTyFloat, "impossible register type"); + regNO = FindAvailablePhyReg(li, false); + } + return regNO; +} + +/* Spill and reload for caller saved registers. */ +void LSRALinearScanRegAllocator::InsertCallerSave(Insn &insn, Operand &opnd, bool isDef) +{ + auto ®Opnd = static_cast(opnd); + uint32 vRegNO = regOpnd.GetRegisterNumber(); + if (vRegNO >= liveIntervalsArray.size()) { + CHECK_FATAL(false, "index out of range in LSRALinearScanRegAllocator::InsertCallerSave"); + } + LiveInterval *rli = liveIntervalsArray[vRegNO]; + RegType regType = regOpnd.GetRegisterType(); + + isSpillZero = false; + if (!isDef) { + uint32 mask; + uint32 regBase; + if (regType == kRegTyInt) { + mask = intBBDefMask; + regBase = firstIntReg; + } else { + mask = fpBBDefMask; + regBase = firstFpReg; + } + if (mask & (1u << (rli->GetAssignedReg() - regBase))) { + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "InsertCallerSave " << rli->GetAssignedReg() + << " skipping due to local def\n"; + } + return; + } + } + + if (!rli->IsShouldSave()) { + return; + } + + uint32 regSize = regOpnd.GetSize(); + PrimType spType; + + if (regType == kRegTyInt) { + spType = (regSize <= k32BitSize) ? PTY_i32 : PTY_i64; + intBBDefMask |= (1u << (rli->GetAssignedReg() - firstIntReg)); + } else { + spType = (regSize <= k32BitSize) ? PTY_f32 : PTY_f64; + fpBBDefMask |= (1u << (rli->GetAssignedReg() - firstFpReg)); + } + + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "InsertCallerSave " << vRegNO << "\n"; + } + + if (!isDef && !rli->IsCallerSpilled()) { + LogInfo::MapleLogger() << "WARNING: " << vRegNO << " caller restore without spill in bb " + << insn.GetBB()->GetId() << " : " << cgFunc->GetName() << "\n"; + } + rli->SetIsCallerSpilled(true); + + MemOperand *memOpnd = nullptr; + RegOperand *phyOpnd = nullptr; + + phyOpnd = regInfo->GetOrCreatePhyRegOperand(static_cast(rli->GetAssignedReg()), regSize, regType); + std::string comment; + bool isOutOfRange = false; + if (isDef) { + Insn *nextInsn = insn.GetNext(); + memOpnd = GetSpillMem(vRegNO, true, insn, static_cast(intSpillRegSet[0] + firstIntReg), isOutOfRange, + regSize); + Insn *stInsn = regInfo->BuildStrInsn(regSize, spType, *phyOpnd, *memOpnd); + comment = " SPILL for caller_save " + std::to_string(vRegNO); + ++callerSaveSpillCount; + if (rli->GetLastUse() == insn.GetId()) { + regInfo->FreeSpillRegMem(vRegNO); + comment += " end"; + } + stInsn->SetComment(comment); + if (isOutOfRange && nextInsn != nullptr) { + insn.GetBB()->InsertInsnBefore(*nextInsn, *stInsn); + } else if (isOutOfRange && nextInsn == nullptr) { + insn.GetBB()->AppendInsn(*stInsn); + } else { + insn.GetBB()->InsertInsnAfter(insn, *stInsn); + } + } else { + memOpnd = GetSpillMem(vRegNO, false, insn, static_cast(intSpillRegSet[0] + firstIntReg), isOutOfRange, + regSize); + Insn *ldInsn = regInfo->BuildLdrInsn(regSize, spType, *phyOpnd, *memOpnd); + comment = " RELOAD for caller_save " + std::to_string(vRegNO); + ++callerSaveReloadCount; + if (rli->GetLastUse() == insn.GetId()) { + regInfo->FreeSpillRegMem(vRegNO); + comment += " end"; + } + ldInsn->SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, *ldInsn); + } +} + +MemOperand *LSRALinearScanRegAllocator::GetSpillMem(uint32 vRegNO, bool isDest, Insn &insn, regno_t regNO, + bool &isOutOfRange, uint32 bitSize) const +{ + MemOperand *memOpnd = regInfo->GetOrCreatSpillMem(vRegNO, bitSize); + return regInfo->AdjustMemOperandIfOffsetOutOfRange(memOpnd, vRegNO, isDest, insn, regNO, isOutOfRange); +} + +/* Set a vreg in live interval as being marked for spill. */ +void LSRALinearScanRegAllocator::SetOperandSpill(Operand &opnd) +{ + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "SetOperandSpill " << regNO; + LogInfo::MapleLogger() << "(" << liveIntervalsArray[regNO]->GetFirstAcrossedCall(); + LogInfo::MapleLogger() << ", refCount " << liveIntervalsArray[regNO]->GetRefCount() << ")\n"; + } + + DEBUG_ASSERT(regNO < liveIntervalsArray.size(), + "index out of vector size in LSRALinearScanRegAllocator::SetOperandSpill"); + LiveInterval *li = liveIntervalsArray[regNO]; + li->SetStackSlot(kSpilled); + li->SetShouldSave(false); +} + +/* + * Generate spill/reload for an operand. + * spill_idx : one of 3 phys regs set aside for the purpose of spills. + */ +void LSRALinearScanRegAllocator::SpillOperand(Insn &insn, Operand &opnd, bool isDef, uint32 spillIdx) +{ + /* + * Insert spill (def) and fill (use) instructions for the operand. + * Keep track of the 'slot' (base 0). The actual slot on the stack + * will be some 'base_slot_offset' + 'slot' off FP. + * For simplification, entire 64bit register is spilled/filled. + * + * For example, a virtual register home 'slot' on the stack is location 5. + * This represents a 64bit slot (8bytes). The base_slot_offset + * from the base 'slot' determined by whoever is added, off FP. + * stack address is ( FP - (5 * 8) + base_slot_offset ) + * So the algorithm is simple, for each virtual register that is not + * allocated, it has to have a home address on the stack (a slot). + * A class variable is used, start from 0, increment by 1. + * Since LiveInterval already represent unique regNO information, + * just add a slot number to it. Subsequent reference to a regNO + * will either get an allocated physical register or a slot number + * for computing the stack location. + * + * This function will also determine the operand to be a def or use. + * For def, spill instruction(s) is appended after the insn. + * For use, spill instruction(s) is prepended before the insn. + * Use FP - (slot# *8) for now. Will recompute if base_slot_offset + * is not 0. + * + * The total number of slots used will be used to compute the stack + * frame size. This will require some interface external to LSRA. + * + * For normal instruction, two spill regs should be enough. The caller + * controls which ones to use. + * For more complex operations, need to break down the instruction. + * eg. store v1 -> [v2 + v3] // 3 regs needed + * => p1 <- v2 // address part 1 + * p2 <- v3 // address part 2 + * p1 <- p1 + p2 // freeing up p2 + * p2 <- v1 + * store p2 -> [p1] + * or we can allocate more registers to the spill register set + * For store multiple, need to break it down into two or more instr. + */ + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "SpillOperand " << regNO << "\n"; + } + + regno_t spReg; + PrimType spType; + CHECK_FATAL(regNO < liveIntervalsArray.size(), "index out of range in LSRALinearScanRegAllocator::SpillOperand"); + LiveInterval *li = liveIntervalsArray[regNO]; + DEBUG_ASSERT(!li->IsShouldSave(), "SpillOperand: Should not be caller"); + uint32 regSize = regOpnd.GetSize(); + RegType regType = regOpnd.GetRegisterType(); + + if (li->GetRegType() == kRegTyInt) { + DEBUG_ASSERT((spillIdx < intSpillRegSet.size()), "SpillOperand: ran out int spill reg"); + spReg = intSpillRegSet[spillIdx] + firstIntReg; + spType = (regSize <= k32BitSize) ? PTY_i32 : PTY_i64; + } else if (li->GetRegType() == kRegTyFloat) { + DEBUG_ASSERT((spillIdx < fpSpillRegSet.size()), "SpillOperand: ran out fp spill reg"); + spReg = fpSpillRegSet[spillIdx] + firstFpReg; + spType = (regSize <= k32BitSize) ? PTY_f32 : PTY_f64; + } else { + CHECK_FATAL(false, "SpillOperand: Should be int or float type"); + } + + bool isOutOfRange = false; + RegOperand *phyOpnd = nullptr; + if (isSpillZero) { + phyOpnd = &cgFunc->GetZeroOpnd(regSize); + } else { + phyOpnd = regInfo->GetOrCreatePhyRegOperand(static_cast(spReg), regSize, regType); + } + li->SetAssignedReg(phyOpnd->GetRegisterNumber()); + + MemOperand *memOpnd = nullptr; + if (isDef) { + /* + * Need to assign spReg (one of the two spill reg) to the destination of the insn. + * spill_vreg <- opn1 op opn2 + * to + * spReg <- opn1 op opn2 + * store spReg -> spillmem + */ + li->SetStackSlot(kSpilled); + + ++spillCount; + Insn *nextInsn = insn.GetNext(); + memOpnd = GetSpillMem(regNO, true, insn, static_cast(intSpillRegSet[spillIdx + 1] + firstIntReg), + isOutOfRange, regSize); + Insn *stInsn = regInfo->BuildStrInsn(regSize, spType, *phyOpnd, *memOpnd); + std::string comment = " SPILL vreg:" + std::to_string(regNO); + if (li->GetLastUse() == insn.GetId()) { + regInfo->FreeSpillRegMem(regNO); + comment += " end"; + } + stInsn->SetComment(comment); + if (isOutOfRange && nextInsn != nullptr) { + insn.GetBB()->InsertInsnBefore(*nextInsn, *stInsn); + } else if (isOutOfRange && nextInsn == nullptr) { + insn.GetBB()->AppendInsn(*stInsn); + } else { + insn.GetBB()->InsertInsnAfter(insn, *stInsn); + } + } else { + /* Here, reverse of isDef, change either opn1 or opn2 to the spReg. */ + if (li->GetStackSlot() == 0xFFFFFFFF) { + LogInfo::MapleLogger() << "WARNING: " << regNO << " assigned " << li->GetAssignedReg() + << " restore without spill in bb " << insn.GetBB()->GetId() << " : " + << cgFunc->GetName() << "\n"; + } + ++reloadCount; + memOpnd = GetSpillMem(regNO, false, insn, static_cast(intSpillRegSet[spillIdx] + firstIntReg), + isOutOfRange, regSize); + Insn *ldInsn = regInfo->BuildLdrInsn(regSize, spType, *phyOpnd, *memOpnd); + std::string comment = " RELOAD vreg" + std::to_string(regNO); + if (li->GetLastUse() == insn.GetId()) { + regInfo->FreeSpillRegMem(regNO); + comment += " end"; + } + ldInsn->SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, *ldInsn); + } +} + +/* find the lowest li that meets the constraints related to li0 form current active */ +void LSRALinearScanRegAllocator::FindLowestPrioInActive(LiveInterval *&targetLi, LiveInterval *li0, RegType regType) +{ + std::map activeLiAssignedRegCnt; + for (auto *li : active) { + if (li->GetAssignedReg() != 0) { + ++activeLiAssignedRegCnt[li->GetAssignedReg()]; + } + } + + float lowestPrio = 1000.0; + bool found = false; + bool hintCalleeSavedReg = li0 && NeedSaveAcrossCall(*li0); + MapleSet::iterator lowestIt; + for (auto it = active.begin(); it != active.end(); ++it) { + LiveInterval *li = static_cast(*it); + regno_t regNO = li->GetAssignedReg(); + /* 1. Basic Constraints */ + if (li->GetPriority() >= lowestPrio || li->GetRegType() != regType || li->GetLiParent() || li->GetLiChild()) { + continue; + } + /* 2. If li is pre-assigned to Physical register primitively, ignore it. */ + if (regInfo->IsPreAssignedReg(li->GetRegNO())) { + continue; + } + /* 3. CalleeSavedReg is preferred here. If li is assigned to Non-CalleeSavedReg, ignore it. */ + if (hintCalleeSavedReg && !regInfo->IsCalleeSavedReg(regNO - firstIntReg)) { + continue; + } + /* 4. Checkinterference. If li is assigned to li0's OverlapPhyReg, ignore it. */ + if (li0 && li0->IsOverlapPhyReg(regNO)) { + continue; + } + /* 5. if regNO is assigned to multiple active li, ignore it. */ + if (activeLiAssignedRegCnt[regNO] > 1) { + continue; + } + lowestPrio = li->GetPriority(); + lowestIt = it; + found = true; + } + if (found) { + targetLi = *lowestIt; + itFinded = lowestIt; + } + return; +} + +/* Set a vreg in live interval as being marked for spill. */ +void LSRALinearScanRegAllocator::SetLiSpill(LiveInterval &li) +{ + uint32 regNO = li.GetRegNO(); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "SetLiSpill " << regNO; + LogInfo::MapleLogger() << "(" << li.GetFirstAcrossedCall(); + LogInfo::MapleLogger() << ", refCount " << li.GetRefCount() << ")\n"; + } + li.SetStackSlot(kSpilled); + li.SetShouldSave(false); +} + +uint32 LSRALinearScanRegAllocator::HandleSpillForLi(LiveInterval &li) +{ + /* choose the lowest priority li to spill */ + RegType regType = li.GetRegType(); + LiveInterval *spillLi = nullptr; + FindLowestPrioInActive(spillLi, &li, regType); + + /* + * compare spill_li with current li + * spill_li is null and li->SetStackSlot(Spilled) when the li is spilled due to LiveIntervalAnalysis + */ + if (!li.IsMustAllocate()) { + if (spillLi == nullptr || li.GetStackSlot() == kSpilled || li.GetRefCount() <= spillLi->GetRefCount()) { + /* spill current li */ + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "Flexible Spill: still spill " << li.GetRegNO() << ".\n"; + } + SetLiSpill(li); + return 0; + } + } + DEBUG_ASSERT(spillLi != nullptr, "spillLi is null in LSRALinearScanRegAllocator::HandleSpillForLi"); + + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "Flexible Spill: " << spillLi->GetRegNO() << " instead of " << li.GetRegNO() << ".\n"; + PrintLiveInterval(*spillLi, "TO spill: "); + PrintLiveInterval(li, "Instead of: "); + } + + uint32 newRegNO = spillLi->GetAssignedReg(); + li.SetAssignedReg(newRegNO); + + /* spill this live interval */ + (void)active.erase(itFinded); + SetLiSpill(*spillLi); + spillLi->SetAssignedReg(0); + + (void)active.insert(&li); + return newRegNO; +} + +uint32 LSRALinearScanRegAllocator::FindAvailablePhyReg(LiveInterval &li, bool isIntReg) +{ + uint32 &callerRegMask = isIntReg ? intCallerMask : fpCallerMask; + uint32 &calleeRegMask = isIntReg ? intCalleeMask : fpCalleeMask; + regno_t reg0 = isIntReg ? firstIntReg : firstFpReg; + regno_t bestReg = 0; + regno_t secondReg = 0; + + /* See if register is live accross a call */ + if (NeedSaveAcrossCall(li)) { + if (!li.IsAllInCatch() && !li.IsAllInCleanupOrFirstBB()) { + /* call in live interval, use callee if available */ + bestReg = GetRegFromMask(calleeRegMask, reg0, li); + if (bestReg != 0 && freeUntilPos[bestReg] >= li.GetLastUse()) { + li.SetShouldSave(false); + return bestReg; + } + } + /* can be optimize multi use between calls rather than in bb */ + if (bestReg == 0 || li.IsMultiUseInBB()) { + secondReg = GetRegFromMask(callerRegMask, reg0, li); + if (freeUntilPos[secondReg] >= li.GetLastUse()) { + li.SetShouldSave(true); + return secondReg; + } + } + } else { + /* Get forced register */ + uint32 forcedReg = GetSpecialPhysRegPattern(li); + if (forcedReg != 0) { + return forcedReg; + } + + bestReg = GetRegFromMask(intCallerMask, reg0, li); + if (bestReg == 0) { + bestReg = GetRegFromMask(intCalleeMask, reg0, li); + } else if (freeUntilPos[bestReg] < li.GetLastUse()) { + secondReg = GetRegFromMask(intCalleeMask, reg0, li); + if (secondReg != 0) { + bestReg = (freeUntilPos[bestReg] > freeUntilPos[secondReg]) ? bestReg : secondReg; + } + } + } + if (bestReg != 0 && freeUntilPos[bestReg] < li.GetLastUse()) { + DEBUG_ASSERT(freeUntilPos[bestReg] != 0, "impossible"); + bestReg = 0; + } + /* todo : try to fill in holes */ + /* todo : try first split if no hole exists */ + return bestReg; +} + +/* Shell function to find a physical register for an operand. */ +uint32 LSRALinearScanRegAllocator::AssignPhysRegs(LiveInterval &li) +{ + if (spillAll && !li.IsMustAllocate()) { + return 0; + } + + /* pre spilled: */ + if (li.GetStackSlot() != 0xFFFFFFFF && !li.IsMustAllocate()) { + return 0; + } + + if (LSRA_DUMP) { + uint32 activeSz = active.size(); + LogInfo::MapleLogger() << "\tAssignPhysRegs-active_sz " << activeSz << "\n"; + } + + // aarch64 add fp lr to callee saved area; + regInfo->Fini(); + + uint32 regNO = FindAvailablePhyReg(li); + if (regNO != 0) { + li.SetAssignedReg(regNO); + if (regInfo->IsCalleeSavedReg(regNO)) { + if (!CGOptions::DoCalleeToSpill()) { + if (LSRA_DUMP) { + LogInfo::MapleLogger() + << "\tCallee-save register for save/restore in prologue/epilogue: " << regNO << "\n"; + } + cgFunc->AddtoCalleeSaved(regNO); + } + ++calleeUseCnt[regNO]; + } + } + return regNO; +} + +void LSRALinearScanRegAllocator::AssignPhysRegsForLi(LiveInterval &li) +{ + uint32 newRegNO = AssignPhysRegs(li); + if (newRegNO == 0) { + newRegNO = HandleSpillForLi(li); + } + + if (newRegNO != 0) { + (void)active.insert(&li); + } +} + +/* Replace Use-Def Opnd */ +RegOperand *LSRALinearScanRegAllocator::GetReplaceUdOpnd(Insn &insn, Operand &opnd, uint32 &spillIdx) +{ + if (!opnd.IsRegister()) { + return nullptr; + } + const auto *regOpnd = static_cast(&opnd); + + uint32 vRegNO = regOpnd->GetRegisterNumber(); + RegType regType = regOpnd->GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return nullptr; + } + if (regInfo->IsUntouchableReg(vRegNO)) { + return nullptr; + } + if (regOpnd->IsPhysicalRegister()) { + return nullptr; + } + + DEBUG_ASSERT(vRegNO < liveIntervalsArray.size(), + "index out of range of MapleVector in LSRALinearScanRegAllocator::GetReplaceUdOpnd"); + LiveInterval *li = liveIntervalsArray[vRegNO]; + + regno_t regNO = li->GetAssignedReg(); + if (regInfo->IsCalleeSavedReg(regNO)) { + cgFunc->AddtoCalleeSaved(regNO); + } + + if (li->IsShouldSave()) { + InsertCallerSave(insn, opnd, false); + } else if (li->GetStackSlot() == kSpilled) { + SpillOperand(insn, opnd, false, spillIdx); + SpillOperand(insn, opnd, true, spillIdx); + ++spillIdx; + } + RegOperand *phyOpnd = + regInfo->GetOrCreatePhyRegOperand(static_cast(li->GetAssignedReg()), opnd.GetSize(), regType); + + return phyOpnd; +} + +/* + * Create an operand with physical register assigned, or a spill register + * in the case where a physical register cannot be assigned. + */ +RegOperand *LSRALinearScanRegAllocator::GetReplaceOpnd(Insn &insn, Operand &opnd, uint32 &spillIdx, bool isDef) +{ + if (!opnd.IsRegister()) { + return nullptr; + } + const auto *regOpnd = static_cast(&opnd); + + uint32 vRegNO = regOpnd->GetRegisterNumber(); + RegType regType = regOpnd->GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return nullptr; + } + if (regInfo->IsUntouchableReg(vRegNO)) { + return nullptr; + } + if (regOpnd->IsPhysicalRegister()) { + return nullptr; + } + + DEBUG_ASSERT(vRegNO < liveIntervalsArray.size(), + "index out of range of MapleVector in LSRALinearScanRegAllocator::GetReplaceOpnd"); + LiveInterval *li = liveIntervalsArray[vRegNO]; + + regno_t regNO = li->GetAssignedReg(); + if (regInfo->IsCalleeSavedReg(regNO)) { + cgFunc->AddtoCalleeSaved(regNO); + } + + if (li->IsShouldSave()) { + InsertCallerSave(insn, opnd, isDef); + } else if (li->GetStackSlot() == kSpilled) { + spillIdx = isDef ? 0 : spillIdx; + SpillOperand(insn, opnd, isDef, spillIdx); + if (!isDef) { + ++spillIdx; + } + } + RegOperand *phyOpnd = + regInfo->GetOrCreatePhyRegOperand(static_cast(li->GetAssignedReg()), opnd.GetSize(), regType); + + return phyOpnd; +} + +/* Try to estimate if spill callee should be done based on even/odd for stp in prolog. */ +void LSRALinearScanRegAllocator::CheckSpillCallee() +{ + if (CGOptions::DoCalleeToSpill()) { + uint32 pairCnt = 0; + for (size_t idx = 0; idx < sizeof(uint32); ++idx) { + if ((intCalleeMask & (1ULL << idx)) != 0 && calleeUseCnt[idx] != 0) { + ++pairCnt; + } + } + if ((pairCnt & 0x01) != 0) { + shouldOptIntCallee = true; + } + + for (size_t idx = 0; idx < sizeof(uint32); ++idx) { + if ((fpCalleeMask & (1ULL << idx)) != 0 && calleeUseCnt[idx] != 0) { + ++pairCnt; + } + } + if ((pairCnt & 0x01) != 0) { + shouldOptFpCallee = true; + } + } +} + +/* Iterate through all instructions and change the vreg to preg. */ +void LSRALinearScanRegAllocator::FinalizeRegisters() +{ + CheckSpillCallee(); + for (BB *bb : bfs->sortedBBs) { + intBBDefMask = 0; + fpBBDefMask = 0; + + FOR_BB_INSNS(insn, bb) { + if (insn->IsImmaterialInsn() || insn->GetId() == 0) { + continue; + } + if (!insn->IsMachineInstruction()) { + continue; + } + + uint32 spillIdx = 0; + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + + /* Handle source(use) opernads first */ + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *regProp = md->GetOpndDes(i); + DEBUG_ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::FinalizeRegisters"); + bool isDef = regProp->IsRegDef(); + if (isDef) { + continue; + } + Operand &opnd = insn->GetOperand(i); + RegOperand *phyOpnd = nullptr; + if (opnd.IsList()) { + /* For arm32, not arm64 */ + } else if (opnd.IsMemoryAccessOperand()) { + auto *memOpnd = + static_cast(static_cast(opnd).Clone(*cgFunc->GetMemoryPool())); + DEBUG_ASSERT(memOpnd != nullptr, + "memopnd is null in LSRALinearScanRegAllocator::FinalizeRegisters"); + insn->SetOperand(i, *memOpnd); + Operand *base = memOpnd->GetBaseRegister(); + Operand *offset = memOpnd->GetIndexRegister(); + if (base != nullptr) { + phyOpnd = GetReplaceOpnd(*insn, *base, spillIdx, false); + if (phyOpnd != nullptr) { + memOpnd->SetBaseRegister(*phyOpnd); + } + } + if (offset != nullptr) { + phyOpnd = GetReplaceOpnd(*insn, *offset, spillIdx, false); + if (phyOpnd != nullptr) { + memOpnd->SetIndexRegister(*phyOpnd); + } + } + } else { + phyOpnd = GetReplaceOpnd(*insn, opnd, spillIdx, false); + if (phyOpnd != nullptr) { + insn->SetOperand(i, *phyOpnd); + } + } + } + + /* Handle ud(use-def) opernads */ + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *regProp = md->GetOpndDes(i); + DEBUG_ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::FinalizeRegisters"); + Operand &opnd = insn->GetOperand(i); + bool isUseDef = regProp->IsRegDef() && regProp->IsRegUse(); + if (!isUseDef) { + continue; + } + RegOperand *phyOpnd = GetReplaceUdOpnd(*insn, opnd, spillIdx); + if (phyOpnd != nullptr) { + insn->SetOperand(i, *phyOpnd); + } + } + + /* Handle dest(def) opernads last */ + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *regProp = md->GetOpndDes(i); + DEBUG_ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::FinalizeRegisters"); + Operand &opnd = insn->GetOperand(i); + bool isUse = (regProp->IsRegUse()) || (opnd.IsMemoryAccessOperand()); + if (isUse) { + continue; + } + isSpillZero = false; + RegOperand *phyOpnd = GetReplaceOpnd(*insn, opnd, spillIdx, true); + if (phyOpnd != nullptr) { + insn->SetOperand(i, *phyOpnd); + if (isSpillZero) { + insn->GetBB()->RemoveInsn(*insn); + } + } + } + + if (insn->IsCall()) { + intBBDefMask = 0; + fpBBDefMask = 0; + } + } + } +} + +void LSRALinearScanRegAllocator::CollectReferenceMap() +{ + const auto &referenceMapInsns = cgFunc->GetStackMapInsns(); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "===========reference map stack info================\n"; + } + + for (auto *insn : referenceMapInsns) { + auto *stackMapLiveIn = insn->GetStackMapLiveIn(); + for (auto regNO : stackMapLiveIn->GetInfo()) { + if (!cgFunc->IsRegReference(regNO)) { + continue; + } + + auto *li = liveIntervalsArray[regNO]; + if (li == nullptr) { + continue; + } + + if (li->IsShouldSave() || li->GetStackSlot() == kSpilled) { + auto itr = dereivedRef2Base.find(regNO); + if (itr != dereivedRef2Base.end()) { + MemOperand *baseRegMemOpnd = cgFunc->GetOrCreatSpillMem(itr->second, k64BitSize); + int64 baseRefMemoffset = baseRegMemOpnd->GetOffsetImmediate()->GetOffsetValue(); + insn->GetStackMap()->GetReferenceMap().ReocordStackRoots(baseRefMemoffset); + } + MemOperand *memOperand = cgFunc->GetOrCreatSpillMem(regNO, k64BitSize); + int64 offset = memOperand->GetOffsetImmediate()->GetOffsetValue(); + insn->GetStackMap()->GetReferenceMap().ReocordStackRoots(offset); + if (itr == dereivedRef2Base.end()) { + insn->GetStackMap()->GetReferenceMap().ReocordStackRoots(offset); + } + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "--------insn id: " << insn->GetId() << " regNO: " << regNO << "offset: " + << offset << std::endl; + } + } else { + // TODO: li->GetAssignedReg - R0/RAX? + CHECK_FATAL(false, "not support currently"); + insn->GetStackMap()->GetReferenceMap().ReocordRegisterRoots(li->GetAssignedReg()); + } + } + } + + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "===========reference map stack info end================\n"; + } + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "===========reference map info================\n"; + for (auto *insn : referenceMapInsns) { + LogInfo::MapleLogger() << " referenceMap insn: "; + insn->Dump(); + insn->GetStackMap()->GetReferenceMap().Dump(); + } + } +} + +void LSRALinearScanRegAllocator::SolveRegOpndDeoptInfo(const RegOperand ®Opnd, DeoptInfo &deoptInfo, + int32 deoptVregNO) const +{ + if (regOpnd.IsPhysicalRegister()) { + // TODO: Get Register No + deoptInfo.RecordDeoptVreg2LocationInfo(deoptVregNO, LocationInfo({kInRegister, 0})); + return; + } + // process virtual RegOperand + regno_t vRegNO = regOpnd.GetRegisterNumber(); + // TODO: LiveInterval *li = GetLiveIntervalAt(vRegNO, insn->GetId()); + LiveInterval *li = liveIntervalsArray[vRegNO]; + if (li->IsShouldSave() || li->GetStackSlot() == kSpilled) { + MemOperand *memOpnd = cgFunc->GetOrCreatSpillMem(vRegNO, regOpnd.GetSize()); + SolveMemOpndDeoptInfo(*(static_cast(memOpnd)), deoptInfo, deoptVregNO); + } else { + // TODO: Get Register NO + deoptInfo.RecordDeoptVreg2LocationInfo(deoptVregNO, LocationInfo({kInRegister, li->GetAssignedReg()})); + } +} + +void LSRALinearScanRegAllocator::SolveMemOpndDeoptInfo(const MemOperand &memOpnd, DeoptInfo &deoptInfo, + int32 deoptVregNO) const +{ + int64 offset = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + deoptInfo.RecordDeoptVreg2LocationInfo(deoptVregNO, LocationInfo({kOnStack, offset})); +} + +void LSRALinearScanRegAllocator::CollectDeoptInfo() +{ + const auto referenceMapInsns = cgFunc->GetStackMapInsns(); + for (auto *insn : referenceMapInsns) { + auto &deoptInfo = insn->GetStackMap()->GetDeoptInfo(); + const auto &deoptBundleInfo = deoptInfo.GetDeoptBundleInfo(); + if (deoptBundleInfo.empty()) { + continue; + } + for (const auto &item : deoptBundleInfo) { + const auto *opnd = item.second; + if (opnd->IsRegister()) { + SolveRegOpndDeoptInfo(*static_cast(opnd), deoptInfo, item.first); + continue; + } + if (opnd->IsImmediate()) { + const auto *immOpnd = static_cast(opnd); + deoptInfo.RecordDeoptVreg2LocationInfo(item.first, LocationInfo({kInConstValue, immOpnd->GetValue()})); + continue; + } + if (opnd->IsMemoryAccessOperand()) { + SolveMemOpndDeoptInfo(*(static_cast(opnd)), deoptInfo, item.first); + continue; + } + DEBUG_ASSERT(false, "can't reach here!"); + } + } + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "===========deopt info================\n"; + for (auto *insn : referenceMapInsns) { + LogInfo::MapleLogger() << "---- deoptInfo insn: "; + insn->Dump(); + insn->GetStackMap()->GetDeoptInfo().Dump(); + } + } +} + +void LSRALinearScanRegAllocator::SetAllocMode() +{ + if (CGOptions::IsFastAlloc()) { + if (CGOptions::GetFastAllocMode() == 0) { + fastAlloc = true; + } else { + spillAll = true; + } + /* In-Range spill range can still be specified (only works with --dump-func=). */ + } else if (cgFunc->NumBBs() > CGOptions::GetLSRABBOptSize()) { + /* instruction size is checked in ComputeLieveInterval() */ + fastAlloc = true; + } + + if (LSRA_DUMP) { + if (fastAlloc) { + LogInfo::MapleLogger() << "fastAlloc mode on\n"; + } + if (spillAll) { + LogInfo::MapleLogger() << "spillAll mode on\n"; + } + } +} + +void LSRALinearScanRegAllocator::LinearScanRegAllocator() +{ + if (LSRA_DUMP) { + PrintParamQueue("Initial param queue"); + PrintCallQueue("Initial call queue"); + } + freeUntilPos.resize(regInfo->GetAllRegNum(), 0XFFFFFFFUL); + MapleVector initialPosVec(freeUntilPos); + uint32 curInsnID = 0; + + while (!liQue.empty()) { + LiveInterval *li = liQue.front(); + liQue.pop_front(); + if (li->GetRangesSize() == 0) { + /* range building has been skiped */ + li->AddRange(li->GetFirstDef(), li->GetLastUse()); + } + li->InitRangeFinder(); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "======Alloc R" << li->GetRegNO() << "======" + << "\n"; + } + blockForbiddenMask = 0; + freeUntilPos = initialPosVec; + DEBUG_ASSERT(li->GetFirstDef() >= curInsnID, "wrong li order"); + curInsnID = li->GetFirstDef(); + RetireActive(*li, curInsnID); + UpdateCallQueueAtRetirement(curInsnID); + UpdateActiveAllocateInfo(*li); + UpdateParamAllocateInfo(*li); + if (LSRA_DUMP) { + DebugCheckActiveList(); + } + AssignPhysRegsForLi(*li); + } +} + +/* Main entrance for the LSRA register allocator */ +bool LSRALinearScanRegAllocator::AllocateRegisters() +{ + cgFunc->SetIsAfterRegAlloc(); + calleeUseCnt.resize(regInfo->GetAllRegNum()); + liveIntervalsArray.resize(cgFunc->GetMaxVReg()); + SetAllocMode(); +#ifdef RA_PERF_ANALYSIS + auto begin = std::chrono::system_clock::now(); +#endif + if (LSRA_DUMP) { + const MIRModule &mirModule = cgFunc->GetMirModule(); + DotGenerator::GenerateDot("RA", *cgFunc, mirModule); + DotGenerator::GenerateDot("RAe", *cgFunc, mirModule, true); + LogInfo::MapleLogger() << "Entering LinearScanRegAllocator: " << cgFunc->GetName() << "\n"; + } +/* ================= LiveInterval =============== */ +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + ComputeLiveInterval(); + + if (LSRA_DUMP) { + PrintLiveRangesGraph(); + } + + bool enableDoLSRAPreSpill = true; + if (enableDoLSRAPreSpill) { + LiveIntervalAnalysis(); + } + +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + liveIntervalUS += std::chrono::duration_cast(end - start).count(); +#endif + +/* ================= LiveRange =============== */ +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + + bool enableDoLSRAHole = true; + if (enableDoLSRAHole) { + BuildIntervalRanges(); + } + + SpillStackMapInfo(); + + if (LSRA_DUMP) { + PrintAllLiveRanges(); + } +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + holesUS += std::chrono::duration_cast(end - start).count(); +#endif + /* ================= InitFreeRegPool =============== */ + InitFreeRegPool(); + +/* ================= LinearScanRegAllocator =============== */ +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + LinearScanRegAllocator(); +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + lsraUS += std::chrono::duration_cast(end - start).count(); +#endif + + if (LSRA_DUMP) { + PrintAllLiveRanges(); + } + +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + FinalizeRegisters(); +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + finalizeUS += std::chrono::duration_cast(end - start).count(); +#endif + + CollectReferenceMap(); + CollectDeoptInfo(); + + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "Total " << spillCount << " spillCount in " << cgFunc->GetName() << " \n"; + LogInfo::MapleLogger() << "Total " << reloadCount << " reloadCount\n"; + LogInfo::MapleLogger() << "Total " + << "(" << spillCount << "+ " << callerSaveSpillCount + << ") = " << (spillCount + callerSaveSpillCount) << " SPILL\n"; + LogInfo::MapleLogger() << "Total " + << "(" << reloadCount << "+ " << callerSaveReloadCount + << ") = " << (reloadCount + callerSaveReloadCount) << " RELOAD\n"; + uint32_t insertInsn = spillCount + callerSaveSpillCount + reloadCount + callerSaveReloadCount; + float rate = (float(insertInsn) / float(maxInsnNum)); + LogInfo::MapleLogger() << "insn Num Befor RA:" << maxInsnNum << ", insert " << insertInsn << " insns: " + << ", insertInsn/insnNumBeforRA: " << rate << "\n"; + } + + bfs = nullptr; /* bfs is not utilized outside the function. */ + +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + totalUS += std::chrono::duration_cast(end - begin).count(); +#endif + + return true; +} + +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/reg_coalesce.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/reg_coalesce.cpp new file mode 100644 index 0000000000000000000000000000000000000000..746d125f6af4cdae85eb26c56cba4c9a8a9495c4 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/reg_coalesce.cpp @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "reg_coalesce.h" +#include "cg_option.h" +#ifdef TARGAARCH64 +#include "aarch64_reg_coalesce.h" +#include "aarch64_isa.h" +#include "aarch64_insn.h" +#endif +#include "cg.h" + +/* + * This phase implements if-conversion optimization, + * which tries to convert conditional branches into cset/csel instructions + */ +namespace maplebe { + +void LiveIntervalAnalysis::Run() +{ + Analysis(); + CoalesceRegisters(); + ClearBFS(); +} + +void LiveIntervalAnalysis::DoAnalysis() +{ + runAnalysis = true; + Analysis(); +} + +void LiveIntervalAnalysis::Analysis() +{ + bfs = memPool->New(*cgFunc, *memPool); + bfs->ComputeBlockOrder(); + ComputeLiveIntervals(); +} + +/* bfs is not utilized outside the function. */ +void LiveIntervalAnalysis::ClearBFS() +{ + bfs = nullptr; +} + +void LiveIntervalAnalysis::Dump() +{ + for (auto it : vregIntervals) { + LiveInterval *li = it.second; + li->Dump(); + li->DumpDefs(); + li->DumpUses(); + } +} + +void LiveIntervalAnalysis::CoalesceLiveIntervals(LiveInterval &lrDest, LiveInterval &lrSrc) +{ + if (cgFunc->IsExtendReg(lrDest.GetRegNO())) { + cgFunc->InsertExtendSet(lrSrc.GetRegNO()); + } + cgFunc->RemoveFromExtendSet(lrDest.GetRegNO()); + /* merge destlr to srclr */ + lrSrc.MergeRanges(lrDest); + /* update conflicts */ + lrSrc.MergeConflict(lrDest); + for (auto reg : lrDest.GetConflict()) { + LiveInterval *conf = GetLiveInterval(reg); + if (conf) { + conf->AddConflict(lrSrc.GetRegNO()); + } + } + /* merge refpoints */ + lrSrc.MergeRefPoints(lrDest); + vregIntervals.erase(lrDest.GetRegNO()); +} + +bool CGliveIntervalAnalysis::PhaseRun(maplebe::CGFunc &f) +{ + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + live->ResetLiveSet(); + MemPool *memPool = GetPhaseMemPool(); + liveInterval = f.GetCG()->CreateLLAnalysis(*memPool, f); + liveInterval->DoAnalysis(); + return false; +} +void CGliveIntervalAnalysis::GetAnalysisDependence(AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.AddRequired(); +} +MAPLE_ANALYSIS_PHASE_REGISTER_CANSKIP(CGliveIntervalAnalysis, cgliveintervalananlysis) + +bool CgRegCoalesce::PhaseRun(maplebe::CGFunc &f) +{ + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + live->ResetLiveSet(); + MemPool *memPool = GetPhaseMemPool(); + LiveIntervalAnalysis *ll = f.GetCG()->CreateLLAnalysis(*memPool, f); + ll->Run(); + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return false; +} + +void CgRegCoalesce::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgRegCoalesce, cgregcoalesce) + +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/regsaves.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/regsaves.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f53e785e84cc95a08e25c6cfd8d9976be0500ded --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/regsaves.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "cgfunc.h" +#if TARGAARCH64 +#include "aarch64_regsaves.h" +#elif TARGRISCV64 +#include "riscv64_regsaves.h" +#endif + +namespace maplebe { +using namespace maple; + +bool CgRegSavesOpt::PhaseRun(maplebe::CGFunc &f) +{ + if (Globals::GetInstance()->GetOptimLevel() <= CGOptions::kLevel1) { + return false; + } + + /* Perform loop analysis, result to be obtained in CGFunc */ + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); + + /* Perform live analysis, result to be obtained in CGFunc */ + LiveAnalysis *live = nullptr; + MaplePhase *it = + GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLiveAnalysis::id, f); + live = static_cast(it)->GetResult(); + CHECK_FATAL(live != nullptr, "null ptr check"); + /* revert liveanalysis result container. */ + live->ResetLiveSet(); + + /* Perform dom analysis, result to be inserted into AArch64RegSavesOpt object */ + DomAnalysis *dom = nullptr; + PostDomAnalysis *pdom = nullptr; + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel1 && + f.GetCG()->GetCGOptions().DoColoringBasedRegisterAllocation()) { + MaplePhase *phase = + GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgDomAnalysis::id, f); + dom = static_cast(phase)->GetResult(); + CHECK_FATAL(dom != nullptr, "null ptr check"); + phase = + GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgPostDomAnalysis::id, f); + pdom = static_cast(phase)->GetResult(); + CHECK_FATAL(pdom != nullptr, "null ptr check"); + } + + MemPool *memPool = GetPhaseMemPool(); + RegSavesOpt *regSavesOpt = nullptr; +#if TARGAARCH64 + regSavesOpt = memPool->New(f, *memPool, *dom, *pdom); +#elif || TARGRISCV64 + regSavesOpt = memPool->New(f, *memPool); +#endif + + if (regSavesOpt) { + regSavesOpt->SetEnabledDebug(false); /* To turn on debug trace */ + if (regSavesOpt->GetEnabledDebug()) { + dom->Dump(); + } + regSavesOpt->Run(); + } + return true; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgRegSavesOpt, regsaves) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/schedule.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/schedule.cpp new file mode 100644 index 0000000000000000000000000000000000000000..316948f3a48b8ace294d88eb8352a86195d7f972 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/schedule.cpp @@ -0,0 +1,997 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if TARGAARCH64 +#include "aarch64_schedule.h" +#elif TARGRISCV64 +#include "riscv64_schedule.h" +#endif +#if TARGARM32 +#include "arm32_schedule.h" +#endif +#include "cg.h" +#include "optimize_common.h" + +#undef PRESCHED_DEBUG + +namespace maplebe { +/* pressure standard value; pressure under this value will not lead to spill operation */ +static constexpr int g_pressureStandard = 27; +/* optimistic scheduling option */ +static constexpr bool g_optimisticScheduling = false; +/* brute maximum count limit option */ +static constexpr bool g_bruteMaximumLimit = true; +/* brute maximum count */ +static constexpr int g_schedulingMaximumCount = 20000; + +/* ---- RegPressureSchedule function ---- */ +void RegPressureSchedule::InitBBInfo(BB &b, MemPool &memPool, const MapleVector &nodes) +{ + bb = &b; + liveReg.clear(); + scheduledNode.clear(); + readyList.clear(); + maxPriority = 0; + maxPressure = memPool.NewArray(RegPressure::GetMaxRegClassNum()); + curPressure = memPool.NewArray(RegPressure::GetMaxRegClassNum()); + physicalRegNum = memPool.NewArray(RegPressure::GetMaxRegClassNum()); + for (auto node : nodes) { + node->SetState(kNormal); + } +} + +/* return register type according to register number */ +RegType RegPressureSchedule::GetRegisterType(regno_t reg) const +{ + return cgFunc.GetRegisterType(reg); +} + +/* Get amount of every physical register */ +void RegPressureSchedule::BuildPhyRegInfo(const std::vector ®NumVec) +{ + FOR_ALL_REGCLASS(i) + { + physicalRegNum[i] = regNumVec[i]; + } +} + +/* Initialize pre-scheduling split point in BB */ +void RegPressureSchedule::initPartialSplitters(const MapleVector &nodes) +{ + bool addFirstAndLastNodeIndex = false; + constexpr uint32 SecondLastNodeIndexFromBack = 2; + constexpr uint32 LastNodeIndexFromBack = 1; + constexpr uint32 FirstNodeIndex = 0; + constexpr uint32 minimumBBSize = 2; + /* Add split point for the last instruction in return BB */ + if (bb->GetKind() == BB::kBBReturn && nodes.size() > minimumBBSize) { + splitterIndexes.emplace_back(nodes.size() - SecondLastNodeIndexFromBack); + addFirstAndLastNodeIndex = true; + } + /* Add first and last node as split point if needed */ + if (addFirstAndLastNodeIndex) { + splitterIndexes.emplace_back(nodes.size() - LastNodeIndexFromBack); + splitterIndexes.emplace_back(FirstNodeIndex); + } + std::sort(splitterIndexes.begin(), splitterIndexes.end(), std::less {}); +} + +/* initialize register pressure information according to bb's live-in data. + * initialize node's valid preds size. + */ +void RegPressureSchedule::Init(const MapleVector &nodes) +{ + readyList.clear(); + scheduledNode.clear(); + liveReg.clear(); + liveInRegNO.clear(); + liveOutRegNO.clear(); + liveInRegNO = bb->GetLiveInRegNO(); + liveOutRegNO = bb->GetLiveOutRegNO(); + + FOR_ALL_REGCLASS(i) + { + curPressure[i] = 0; + maxPressure[i] = 0; + } + + for (auto *node : nodes) { + /* calculate the node uses'register pressure */ + for (auto &useReg : node->GetUseRegnos()) { + CalculatePressure(*node, useReg, false); + } + + /* calculate the node defs'register pressure */ + size_t i = 0; + for (auto &defReg : node->GetDefRegnos()) { + CalculatePressure(*node, defReg, true); + RegType regType = GetRegisterType(defReg); + /* if no use list, a register is only defined, not be used */ + if (node->GetRegDefs(i) == nullptr && liveOutRegNO.find(defReg) == liveOutRegNO.end()) { + node->IncDeadDefByIndex(regType); + } + ++i; + } + /* Calculate pred size of the node */ + CalculatePredSize(*node); + } + + DepNode *firstNode = nodes.front(); + readyList.emplace_back(firstNode); + firstNode->SetState(kReady); + scheduledNode.reserve(nodes.size()); + constexpr size_t readyListSize = 10; + readyList.reserve(readyListSize); +} + +void RegPressureSchedule::SortReadyList() +{ + std::sort(readyList.begin(), readyList.end(), DepNodePriorityCmp); +} + +/* return true if nodes1 first. */ +bool RegPressureSchedule::DepNodePriorityCmp(const DepNode *node1, const DepNode *node2) +{ + CHECK_NULL_FATAL(node1); + CHECK_NULL_FATAL(node2); + int32 priority1 = node1->GetPriority(); + int32 priority2 = node2->GetPriority(); + if (priority1 != priority2) { + return priority1 > priority2; + } + + int32 numCall1 = node1->GetNumCall(); + int32 numCall2 = node2->GetNumCall(); + if (node1->GetIncPressure() == true && node2->GetIncPressure() == true) { + if (numCall1 != numCall2) { + return numCall1 > numCall2; + } + } + + int32 near1 = node1->GetNear(); + int32 near2 = node1->GetNear(); + int32 depthS1 = node1->GetMaxDepth() + near1; + int32 depthS2 = node2->GetMaxDepth() + near2; + if (depthS1 != depthS2) { + return depthS1 > depthS2; + } + + if (near1 != near2) { + return near1 > near2; + } + + if (numCall1 != numCall2) { + return numCall1 > numCall2; + } + + size_t succsSize1 = node1->GetSuccs().size(); + size_t succsSize2 = node1->GetSuccs().size(); + if (succsSize1 != succsSize2) { + return succsSize1 < succsSize2; + } + + if (node1->GetHasPreg() != node2->GetHasPreg()) { + return node1->GetHasPreg(); + } + + return node1->GetInsn()->GetId() < node2->GetInsn()->GetId(); +} + +/* set a node's incPressure is true, when a class register inscrease */ +void RegPressureSchedule::ReCalculateDepNodePressure(DepNode &node) +{ + /* if there is a type of register pressure increases, set incPressure as true. */ + auto &pressures = node.GetPressure(); + node.SetIncPressure(pressures[kRegisterInt] > 0); +} + +/* calculate the maxDepth of every node in nodes. */ +void RegPressureSchedule::CalculateMaxDepth(const MapleVector &nodes) +{ + /* from the last node to first node. */ + for (auto it = nodes.rbegin(); it != nodes.rend(); ++it) { + /* init call count */ + if ((*it)->GetInsn()->IsCall()) { + (*it)->SetNumCall(1); + } + /* traversing each successor of it. */ + for (auto succ : (*it)->GetSuccs()) { + DepNode &to = succ->GetTo(); + if ((*it)->GetMaxDepth() < (to.GetMaxDepth() + 1)) { + (*it)->SetMaxDepth(to.GetMaxDepth() + 1); + } + + if (to.GetInsn()->IsCall() && ((*it)->GetNumCall() < to.GetNumCall() + 1)) { + (*it)->SetNumCall(to.GetNumCall() + 1); + } else if ((*it)->GetNumCall() < to.GetNumCall()) { + (*it)->SetNumCall(to.GetNumCall()); + } + } + } +} + +/* calculate the near of every successor of the node. */ +void RegPressureSchedule::CalculateNear(const DepNode &node) +{ + for (auto succ : node.GetSuccs()) { + DepNode &to = succ->GetTo(); + if (succ->GetDepType() == kDependenceTypeTrue && to.GetNear() < node.GetNear() + 1) { + to.SetNear(node.GetNear() + 1); + } + } +} + +/* return true if it is last time using the regNO. */ +bool RegPressureSchedule::IsLastUse(const DepNode &node, regno_t regNO) +{ + size_t i = 0; + for (auto reg : node.GetUseRegnos()) { + if (reg == regNO) { + break; + } + ++i; + } + RegList *regList = node.GetRegUses(i); + + /* + * except the node, if there are insn that has no scheduled in regNO'sregList, + * then it is not the last time using the regNO, return false. + */ + while (regList != nullptr) { + CHECK_NULL_FATAL(regList->insn); + DepNode *useNode = regList->insn->GetDepNode(); + DEBUG_ASSERT(useNode != nullptr, "get depend node failed in RegPressureSchedule::IsLastUse"); + if ((regList->insn != node.GetInsn()) && (useNode->GetState() != kScheduled)) { + return false; + } + regList = regList->next; + } + return true; +} + +void RegPressureSchedule::CalculatePressure(DepNode &node, regno_t reg, bool def) +{ + RegType regType = GetRegisterType(reg); + /* if def a register, register pressure increase. */ + if (def) { + node.IncPressureByIndex(regType); + } else { + /* if it is the last time using the reg, register pressure decrease. */ + if (IsLastUse(node, reg)) { + node.DecPressureByIndex(regType); + } + } +} + +/* update live reg information. */ +void RegPressureSchedule::UpdateLiveReg(const DepNode &node, regno_t reg, bool def) +{ + if (def) { + if (liveReg.find(reg) == liveReg.end()) { + (void)liveReg.insert(reg); +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "Add new def R" << reg << " to live reg list \n"; +#endif + } + /* if no use list, a register is only defined, not be used */ + size_t i = 1; + for (auto defReg : node.GetDefRegnos()) { + if (defReg == reg) { + break; + } + ++i; + } + if (node.GetRegDefs(i) == nullptr && liveOutRegNO.find(reg) == liveOutRegNO.end()) { +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "Remove dead def " << reg << " from live reg list \n"; +#endif + liveReg.erase(reg); + } else if (node.GetRegDefs(i) != nullptr) { +#ifdef PRESCHED_DEBUG + auto regList = node.GetRegDefs(i); + LogInfo::MapleLogger() << i << " Live def, dump use insn here \n"; + while (regList != nullptr) { + node.GetRegDefs(i)->insn->Dump(); + regList = regList->next; + } +#endif + } + } else { + if (IsLastUse(node, reg)) { + if (liveReg.find(reg) != liveReg.end() && liveOutRegNO.find(reg) == liveOutRegNO.end()) { +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "Remove last use R" << reg << " from live reg list\n"; +#endif + liveReg.erase(reg); + } + } + } +} + +/* update register pressure information. */ +void RegPressureSchedule::UpdateBBPressure(const DepNode &node) +{ + size_t idx = 0; + for (auto ® : node.GetUseRegnos()) { +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "Use Reg : R" << reg << "\n"; + UpdateLiveReg(node, reg, false); + if (liveReg.find(reg) == liveReg.end()) { + ++idx; + continue; + } +#endif + + /* find all insn that use the reg, if a insn use the reg lastly, insn'pressure - 1 */ + RegList *regList = node.GetRegUses(idx); + + while (regList != nullptr) { + CHECK_NULL_FATAL(regList->insn); + DepNode *useNode = regList->insn->GetDepNode(); + if (useNode->GetState() == kScheduled) { + regList = regList->next; + continue; + } + + if (IsLastUse(*useNode, reg)) { + RegType regType = GetRegisterType(reg); + useNode->DecPressureByIndex(regType); + } + break; + } + ++idx; + } + +#ifdef PRESCHED_DEBUG + for (auto &defReg : node.GetDefRegnos()) { + UpdateLiveReg(node, defReg, true); + } +#endif + + const auto &pressures = node.GetPressure(); + const auto &deadDefNum = node.GetDeadDefNum(); +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "\nnode's pressure: "; + for (auto pressure : pressures) { + LogInfo::MapleLogger() << pressure << " "; + } + LogInfo::MapleLogger() << "\n"; +#endif + + FOR_ALL_REGCLASS(i) + { + curPressure[i] += pressures[i]; + curPressure[i] -= deadDefNum[i]; + if (curPressure[i] > maxPressure[i]) { + maxPressure[i] = curPressure[i]; + } + } +} + +/* update node priority and try to update the priority of all node's ancestor. */ +void RegPressureSchedule::UpdatePriority(DepNode &node) +{ + std::vector workQueue; + workQueue.emplace_back(&node); + node.SetPriority(maxPriority++); + do { + DepNode *nowNode = workQueue.front(); + (void)workQueue.erase(workQueue.begin()); + for (auto pred : nowNode->GetPreds()) { + DepNode &from = pred->GetFrom(); + if (from.GetState() != kScheduled && from.GetPriority() < maxPriority) { + from.SetPriority(maxPriority); + workQueue.emplace_back(&from); + } + } + } while (!workQueue.empty()); +} + +/* return true if all node's pred has been scheduled. */ +bool RegPressureSchedule::CanSchedule(const DepNode &node) const +{ + return node.GetValidPredsSize() == 0; +} + +/* + * delete node from readylist and + * add the successor of node to readyList when + * 1. successor has no been scheduled; + * 2. successor's has been scheduled or the dependence between node and successor is true-dependence. + */ +void RegPressureSchedule::UpdateReadyList(const DepNode &node) +{ + /* delete node from readylist */ + for (auto it = readyList.begin(); it != readyList.end(); ++it) { + if (*it == &node) { + readyList.erase(it); + break; + } + } + /* update dependency information of the successors and add nodes into readyList */ + for (auto *succ : node.GetSuccs()) { + DepNode &succNode = succ->GetTo(); + if (!partialSet.empty() && (partialSet.find(&succNode) == partialSet.end())) { + continue; + } + succNode.DescreaseValidPredsSize(); + if (((succ->GetDepType() == kDependenceTypeTrue) || CanSchedule(succNode)) && + (succNode.GetState() == kNormal)) { + readyList.emplace_back(&succNode); + succNode.SetState(kReady); + } + } +} +/* + * Another version of UpdateReadyList for brute force ready list update + * The difference is to store the state change status for the successors for later restoring + */ +void RegPressureSchedule::BruteUpdateReadyList(const DepNode &node, std::vector &changedToReady) +{ + /* delete node from readylist */ + for (auto it = readyList.begin(); it != readyList.end(); ++it) { + if (*it == &node) { + readyList.erase(it); + break; + } + } + /* update dependency information of the successors and add nodes into readyList */ + for (auto *succ : node.GetSuccs()) { + DepNode &succNode = succ->GetTo(); + if (!partialSet.empty() && (partialSet.find(&succNode) == partialSet.end())) { + continue; + } + succNode.DescreaseValidPredsSize(); + if (((succ->GetDepType() == kDependenceTypeTrue) || CanSchedule(succNode)) && + (succNode.GetState() == kNormal)) { + readyList.emplace_back(&succNode); + succNode.SetState(kReady); + changedToReady.emplace_back(true); + } else { + changedToReady.emplace_back(false); + } + } +} + +/* + * Restore the ready list status when finishing one brute scheduling series generation + */ +void RegPressureSchedule::RestoreReadyList(DepNode &node, std::vector &changedToReady) +{ + uint32 i = 0; + /* restore state information of the successors and delete them from readyList */ + for (auto *succ : node.GetSuccs()) { + DepNode &succNode = succ->GetTo(); + succNode.IncreaseValidPredsSize(); + if (changedToReady.at(i)) { + succNode.SetState(kNormal); + for (auto it = readyList.begin(); it != readyList.end(); ++it) { + if (*it == &succNode) { + readyList.erase(it); + break; + } + } + } + ++i; + } + /* add the node back into the readyList */ + readyList.emplace_back(&node); +} +/* choose a node to schedule */ +DepNode *RegPressureSchedule::ChooseNode() +{ + DepNode *node = nullptr; + for (auto *it : readyList) { + if (!it->GetIncPressure() && !it->GetHasNativeCallRegister()) { + if (CanSchedule(*it)) { + return it; + } else if (node == nullptr) { + node = it; + } + } + } + if (node == nullptr) { + node = readyList.front(); + } + return node; +} + +void RegPressureSchedule::DumpBBLiveInfo() const +{ + LogInfo::MapleLogger() << "Live In: "; + for (auto reg : bb->GetLiveInRegNO()) { + LogInfo::MapleLogger() << "R" << reg << " "; + } + LogInfo::MapleLogger() << "\n"; + + LogInfo::MapleLogger() << "Live Out: "; + for (auto reg : bb->GetLiveOutRegNO()) { + LogInfo::MapleLogger() << "R" << reg << " "; + } + LogInfo::MapleLogger() << "\n"; +} + +void RegPressureSchedule::DumpReadyList() const +{ + LogInfo::MapleLogger() << "readyList: " + << "\n"; + for (DepNode *it : readyList) { + if (CanSchedule(*it)) { + LogInfo::MapleLogger() << it->GetInsn()->GetId() << "CS "; + } else { + LogInfo::MapleLogger() << it->GetInsn()->GetId() << "NO "; + } + } + LogInfo::MapleLogger() << "\n"; +} + +void RegPressureSchedule::DumpSelectInfo(const DepNode &node) const +{ + LogInfo::MapleLogger() << "select a node: " + << "\n"; + node.DumpSchedInfo(); + node.DumpRegPressure(); + node.GetInsn()->Dump(); + + LogInfo::MapleLogger() << "liveReg: "; + for (auto reg : liveReg) { + LogInfo::MapleLogger() << "R" << reg << " "; + } + LogInfo::MapleLogger() << "\n"; + + LogInfo::MapleLogger() << "\n"; +} + +void RegPressureSchedule::DumpDependencyInfo(const MapleVector &nodes) +{ + LogInfo::MapleLogger() << "Dump Dependency Begin \n"; + for (auto node : nodes) { + LogInfo::MapleLogger() << "Insn \n"; + node->GetInsn()->Dump(); + LogInfo::MapleLogger() << "Successors \n"; + /* update dependency information of the successors and add nodes into readyList */ + for (auto *succ : node->GetSuccs()) { + DepNode &succNode = succ->GetTo(); + succNode.GetInsn()->Dump(); + } + } + LogInfo::MapleLogger() << "Dump Dependency End \n"; +} + +void RegPressureSchedule::ReportScheduleError() const +{ + LogInfo::MapleLogger() << "Error No Equal Length for Series" + << "\n"; + DumpDependencyInfo(originalNodeSeries); + for (auto node : scheduledNode) { + node->GetInsn()->Dump(); + } + LogInfo::MapleLogger() << "Original One" + << "\n"; + for (auto node : originalNodeSeries) { + node->GetInsn()->Dump(); + } + LogInfo::MapleLogger() << "Error No Equal Length for End" + << "\n"; +} + +void RegPressureSchedule::ReportScheduleOutput() const +{ + LogInfo::MapleLogger() << "Original Pressure : " << originalPressure << " \n"; + LogInfo::MapleLogger() << "Scheduled Pressure : " << scheduledPressure << " \n"; + if (originalPressure > scheduledPressure) { + LogInfo::MapleLogger() << "Pressure Reduced by : " << (originalPressure - scheduledPressure) << " \n"; + return; + } else if (originalPressure == scheduledPressure) { + LogInfo::MapleLogger() << "Pressure Not Changed \n"; + } else { + LogInfo::MapleLogger() << "Pressure Increased by : " << (scheduledPressure - originalPressure) << " \n"; + } + LogInfo::MapleLogger() << "Pressure Not Reduced, Restore Node Series \n"; +} + +void RegPressureSchedule::DumpBBPressureInfo() const +{ + LogInfo::MapleLogger() << "curPressure: "; + FOR_ALL_REGCLASS(i) + { + LogInfo::MapleLogger() << curPressure[i] << " "; + } + LogInfo::MapleLogger() << "\n"; + + LogInfo::MapleLogger() << "maxPressure: "; + FOR_ALL_REGCLASS(i) + { + LogInfo::MapleLogger() << maxPressure[i] << " "; + } + LogInfo::MapleLogger() << "\n"; +} + +void RegPressureSchedule::DoScheduling(MapleVector &nodes) +{ + /* Store the original series */ + originalNodeSeries.clear(); + for (auto node : nodes) { + originalNodeSeries.emplace_back(node); + } + initPartialSplitters(nodes); +#if PRESCHED_DEBUG + LogInfo::MapleLogger() << "\n Calculate Pressure Info for Schedule Input Series \n"; +#endif + originalPressure = CalculateRegisterPressure(nodes); +#if PRESCHED_DEBUG + LogInfo::MapleLogger() << "Original pressure : " << originalPressure << "\n"; +#endif + /* Original pressure is small enough, skip pre-scheduling */ + if (originalPressure < g_pressureStandard) { +#if PRESCHED_DEBUG + LogInfo::MapleLogger() << "Original pressure is small enough, skip pre-scheduling \n"; +#endif + return; + } + if (splitterIndexes.empty()) { + LogInfo::MapleLogger() << "No splitter, normal scheduling \n"; + if (!g_optimisticScheduling) { + HeuristicScheduling(nodes); + } else { + InitBruteForceScheduling(nodes); + BruteForceScheduling(); + if (optimisticScheduledNodes.size() == nodes.size() && minPressure < originalPressure) { + nodes.clear(); + for (auto node : optimisticScheduledNodes) { + nodes.emplace_back(node); + } + } + } + } else { + /* Split the node list into multiple parts based on split point and conduct scheduling */ + PartialScheduling(nodes); + } + scheduledPressure = CalculateRegisterPressure(nodes); + EmitSchedulingSeries(nodes); +} + +void RegPressureSchedule::HeuristicScheduling(MapleVector &nodes) +{ +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "--------------- bb " << bb->GetId() << " begin scheduling -------------" + << "\n"; + DumpBBLiveInfo(); +#endif + + /* initialize register pressure information and readylist. */ + Init(nodes); + CalculateMaxDepth(nodes); + while (!readyList.empty()) { + /* calculate register pressure */ + for (DepNode *it : readyList) { + ReCalculateDepNodePressure(*it); + } + if (readyList.size() > 1) { + SortReadyList(); + } + + /* choose a node can be scheduled currently. */ + DepNode *node = ChooseNode(); +#ifdef PRESCHED_DEBUG + DumpBBPressureInfo(); + DumpReadyList(); + LogInfo::MapleLogger() << "first tmp select node: " << node->GetInsn()->GetId() << "\n"; +#endif + + while (!CanSchedule(*node)) { + UpdatePriority(*node); + SortReadyList(); + node = readyList.front(); +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "update ready list: " + << "\n"; + DumpReadyList(); +#endif + } + + scheduledNode.emplace_back(node); + /* mark node has scheduled */ + node->SetState(kScheduled); + UpdateBBPressure(*node); + CalculateNear(*node); + UpdateReadyList(*node); +#ifdef PRESCHED_DEBUG + DumpSelectInfo(*node); +#endif + } + +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "---------------------------------- end --------------------------------" + << "\n"; +#endif + /* update nodes according to scheduledNode. */ + nodes.clear(); + for (auto node : scheduledNode) { + nodes.emplace_back(node); + } +} +/* + * Calculate the register pressure for current BB based on an instruction series + */ +int RegPressureSchedule::CalculateRegisterPressure(MapleVector &nodes) +{ + /* Initialize the live, live in, live out register max pressure information */ + liveReg.clear(); + liveInRegNO = bb->GetLiveInRegNO(); + liveOutRegNO = bb->GetLiveOutRegNO(); + std::vector restoreStateSeries; + int maximumPressure = 0; + /* Mock all the nodes to kScheduled status for pressure calculation */ + for (auto node : nodes) { + restoreStateSeries.emplace_back(node->GetState()); + node->SetState(kScheduled); + } + /* Update live register set according to the instruction series */ + for (auto node : nodes) { + for (auto ® : node->GetUseRegnos()) { + UpdateLiveReg(*node, reg, false); + } + for (auto &defReg : node->GetDefRegnos()) { + UpdateLiveReg(*node, defReg, true); + } + int currentPressure = static_cast(liveReg.size()); + if (currentPressure > maximumPressure) { + maximumPressure = currentPressure; + } +#ifdef PRESCHED_DEBUG + node->GetInsn()->Dump(); + LogInfo::MapleLogger() << "Dump Live Reg : " + << "\n"; + for (auto reg : liveReg) { + LogInfo::MapleLogger() << "R" << reg << " "; + } + LogInfo::MapleLogger() << "\n"; +#endif + } + /* Restore the Schedule State */ + uint32 i = 0; + for (auto node : nodes) { + node->SetState(restoreStateSeries.at(i)); + ++i; + } + return maximumPressure; +} + +/* + * Split the series into multiple parts and conduct pre-scheduling in every part + */ +void RegPressureSchedule::PartialScheduling(MapleVector &nodes) +{ + for (size_t i = 0; i < splitterIndexes.size() - 1; ++i) { + constexpr uint32 lastTwoNodeIndex = 2; + auto begin = static_cast(splitterIndexes.at(i)); + auto end = static_cast(splitterIndexes.at(i + 1)); + for (uint32 j = begin; j < end; ++j) { + partialList.emplace_back(nodes.at(j)); + } + if (i == splitterIndexes.size() - lastTwoNodeIndex) { + partialList.emplace_back(nodes.at(end)); + } + for (auto node : partialList) { + partialSet.insert(node); + } + HeuristicScheduling(partialList); + for (auto node : partialList) { + partialScheduledNode.emplace_back(node); + } + partialList.clear(); + partialSet.clear(); + } + nodes.clear(); + /* Construct overall scheduling output */ + for (auto node : partialScheduledNode) { + nodes.emplace_back(node); + } +} + +/* + * Brute-force scheduling algorithm + * It enumerates all the possible schedule series and pick a best one + */ +void RegPressureSchedule::BruteForceScheduling() +{ + /* stop brute force scheduling when exceeding the count limit */ + if (g_bruteMaximumLimit && (scheduleSeriesCount > g_schedulingMaximumCount)) { + return; + } + int defaultPressureValue = -1; + /* ReadyList is empty, scheduling is over */ + if (readyList.empty()) { + if (originalNodeSeries.size() != scheduledNode.size()) { +#ifdef PRESCHED_DEBUG + ReportScheduleError(); +#endif + return; + } + ++scheduleSeriesCount; + int currentPressure = CalculateRegisterPressure(scheduledNode); + if (minPressure == defaultPressureValue || currentPressure < minPressure) { + minPressure = currentPressure; + /* update better scheduled series */ + optimisticScheduledNodes.clear(); + for (auto node : scheduledNode) { + optimisticScheduledNodes.emplace_back(node); + } + return; + } + return; + } + /* store the current status of the ready list */ + std::vector innerList; + for (auto tempNode : readyList) { + innerList.emplace_back(tempNode); + } + for (auto *node : innerList) { + if (CanSchedule(*node)) { + /* update readyList and node dependency info */ + std::vector changedToReady; + BruteUpdateReadyList(*node, changedToReady); + scheduledNode.emplace_back(node); + node->SetState(kScheduled); + BruteForceScheduling(); + node->SetState(kReady); + /* restore readyList and node dependency info */ + RestoreReadyList(*node, changedToReady); + scheduledNode.pop_back(); + } + } +} + +/* + * Calculate the pred size based on the dependency information + */ +void RegPressureSchedule::CalculatePredSize(DepNode &node) +{ + constexpr uint32 emptyPredsSize = 0; + node.SetValidPredsSize(emptyPredsSize); + for (auto pred : node.GetPreds()) { + DepNode &from = pred->GetFrom(); + if (!partialSet.empty() && (partialSet.find(&from) == partialSet.end())) { + continue; + } else { + node.IncreaseValidPredsSize(); + } + } +} + +void RegPressureSchedule::InitBruteForceScheduling(MapleVector &nodes) +{ + /* Calculate pred size of the node */ + for (auto node : nodes) { + CalculatePredSize(*node); + } + readyList.clear(); + optimisticScheduledNodes.clear(); + scheduledNode.clear(); + DepNode *firstNode = nodes.front(); + firstNode->SetState(kReady); + readyList.emplace_back(firstNode); +} + +/* + * Give out the pre-scheduling output based on new register pressure + */ +void RegPressureSchedule::EmitSchedulingSeries(MapleVector &nodes) +{ +#ifdef PRESCHED_DEBUG + ReportScheduleOutput(); +#endif + if (originalPressure <= scheduledPressure) { + /* Restore the original series */ + nodes.clear(); + for (auto node : originalNodeSeries) { + nodes.emplace_back(node); + } + } +} + +/* + * ------------- Schedule function ---------- + * calculate and mark each insn id, each BB's firstLoc and lastLoc. + */ +void Schedule::InitIDAndLoc() +{ + uint32 id = 0; + FOR_ALL_BB(bb, &cgFunc) { + bb->SetLastLoc(bb->GetPrev() ? bb->GetPrev()->GetLastLoc() : nullptr); + FOR_BB_INSNS(insn, bb) { + insn->SetId(id++); +#if DEBUG + insn->AppendComment(" Insn id: " + std::to_string(insn->GetId())); +#endif + if (insn->IsImmaterialInsn() && !insn->IsComment()) { + bb->SetLastLoc(insn); + } else if (!bb->GetFirstLoc() && insn->IsMachineInstruction()) { + bb->SetFirstLoc(*bb->GetLastLoc()); + } + } + } +} + +/* === new pm === */ +bool CgPreScheduling::PhaseRun(maplebe::CGFunc &f) +{ + if (f.HasAsm()) { + return true; + } + if (LIST_SCHED_DUMP_NEWPM) { + LogInfo::MapleLogger() << "Before CgDoPreScheduling : " << f.GetName() << "\n"; + DotGenerator::GenerateDot("preschedule", f, f.GetMirModule(), true); + } + auto *live = GET_ANALYSIS(CgLiveAnalysis, f); + /* revert liveanalysis result container. */ + DEBUG_ASSERT(live != nullptr, "nullptr check"); + live->ResetLiveSet(); + + Schedule *schedule = nullptr; +#if TARGAARCH64 || TARGRISCV64 + schedule = GetPhaseAllocator()->New(f, *GetPhaseMemPool(), *live, PhaseName()); +#endif +#if TARGARM32 + schedule = GetPhaseAllocator()->New(f, *GetPhaseMemPool(), *live, PhaseName()); +#endif + schedule->ListScheduling(true); + live->ClearInOutDataInfo(); + + return true; +} + +void CgPreScheduling::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPreScheduling, prescheduling) + +bool CgScheduling::PhaseRun(maplebe::CGFunc &f) +{ + if (f.HasAsm()) { + return true; + } + if (LIST_SCHED_DUMP_NEWPM) { + LogInfo::MapleLogger() << "Before CgDoScheduling : " << f.GetName() << "\n"; + DotGenerator::GenerateDot("scheduling", f, f.GetMirModule(), true); + } + auto *live = GET_ANALYSIS(CgLiveAnalysis, f); + /* revert liveanalysis result container. */ + DEBUG_ASSERT(live != nullptr, "nullptr check"); + live->ResetLiveSet(); + + Schedule *schedule = nullptr; +#if TARGAARCH64 || TARGRISCV64 + schedule = GetPhaseAllocator()->New(f, *GetPhaseMemPool(), *live, PhaseName()); +#endif +#if TARGARM32 + schedule = GetPhaseAllocator()->New(f, *GetPhaseMemPool(), *live, PhaseName()); +#endif + schedule->ListScheduling(false); + live->ClearInOutDataInfo(); + + return true; +} + +void CgScheduling::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgScheduling, scheduling) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/standardize.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/standardize.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0a7d19087667c7388a1760fb15df48a63937d58f --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/standardize.cpp @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "isel.h" +#include "standardize.h" +namespace maplebe { + +void Standardize::DoStandardize() +{ + /* two address mapping first */ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsMachineInstruction()) { + continue; + } + if (NeedAddressMapping(*insn)) { + AddressMapping(*insn); + } + } + } + + /* standardize for each op */ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsMachineInstruction()) { + continue; + } + if (insn->IsMove()) { + StdzMov(*insn); + } else if (insn->IsStore() || insn->IsLoad()) { + StdzStrLdr(*insn); + } else if (insn->IsBasicOp()) { + StdzBasicOp(*insn); + } else if (insn->IsUnaryOp()) { + StdzUnaryOp(*insn, *cgFunc); + } else if (insn->IsConversion()) { + StdzCvtOp(*insn, *cgFunc); + } else if (insn->IsShift()) { + StdzShiftOp(*insn, *cgFunc); + } else { + LogInfo::MapleLogger() << "Need STDZ function for " << insn->GetDesc()->GetName() << "\n"; + CHECK_FATAL(false, "NIY"); + } + } + } +} + +void Standardize::AddressMapping(Insn &insn) +{ + Operand &dest = insn.GetOperand(kInsnFirstOpnd); + Operand &src1 = insn.GetOperand(kInsnSecondOpnd); + uint32 destSize = dest.GetSize(); + bool isInt = (static_cast(dest).GetRegisterType() == kRegTyInt); + MOperator mOp = abstract::MOP_undef; + switch (destSize) { + case k8BitSize: + mOp = isInt ? abstract::MOP_copy_rr_8 : abstract::MOP_undef; + break; + case k16BitSize: + mOp = isInt ? abstract::MOP_copy_rr_16 : abstract::MOP_undef; + break; + case k32BitSize: + mOp = isInt ? abstract::MOP_copy_rr_32 : abstract::MOP_copy_ff_32; + break; + case k64BitSize: + mOp = isInt ? abstract::MOP_copy_rr_64 : abstract::MOP_copy_ff_64; + break; + default: + break; + } + CHECK_FATAL(mOp != abstract::MOP_undef, "do two address mapping failed"); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)newInsn.AddOpndChain(dest).AddOpndChain(src1); + (void)insn.GetBB()->InsertInsnBefore(insn, newInsn); +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/strldr.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/strldr.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e5ad075fecd94b60080934ccc59fa7abf30100e3 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/strldr.cpp @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if TARGAARCH64 +#include "aarch64_strldr.h" +#elif TARGRISCV64 +#include "riscv64_strldr.h" +#endif +#if TARGARM32 +#include "arm32_strldr.h" +#endif +#include "reaching.h" +#include "cg.h" +#include "optimize_common.h" + +namespace maplebe { +using namespace maple; +#define SCHD_DUMP_NEWPM CG_DEBUG_FUNC(f) +bool CgStoreLoadOpt::PhaseRun(maplebe::CGFunc &f) +{ + if (SCHD_DUMP_NEWPM) { + DotGenerator::GenerateDot("storeloadopt", f, f.GetMirModule(), true); + } + ReachingDefinition *reachingDef = nullptr; + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2) { + reachingDef = GET_ANALYSIS(CgReachingDefinition, f); + } + if (reachingDef == nullptr || !f.GetRDStatus()) { + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgReachingDefinition::id); + return false; + } + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); + + StoreLoadOpt *storeLoadOpt = nullptr; +#if TARGAARCH64 || TARGRISCV64 + storeLoadOpt = GetPhaseMemPool()->New(f, *GetPhaseMemPool()); +#endif +#if TARGARM32 + storeLoadOpt = GetPhaseMemPool()->New(f, *GetPhaseMemPool()); +#endif + storeLoadOpt->Run(); + return true; +} +void CgStoreLoadOpt::GetAnalysisDependence(maple::AnalysisDep &aDep) const +{ + aDep.AddRequired(); + aDep.SetPreservedAll(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgStoreLoadOpt, storeloadopt) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/asm_assembler.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/asm_assembler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e210fecfcd5adb2b55e7b4818f6a2557237e523d --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/asm_assembler.cpp @@ -0,0 +1,1914 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "assembler/asm_assembler.h" +#include +#include +#include "dwarf.h" + +namespace assembler { +void AsmAssembler::InitialFileInfo(const std::string &inputFileName) +{ + std::string curDirName = get_current_dir_name(); + assert(curDirName != "" && "InitialFileInfo: curDirName is nullptr"); + std::string path(curDirName); + std::string cgFile(path.append("/mplcg")); + EmitComment(cgFile); + EmitComment("Compiling"); + EmitComment("Be options"); + + path = curDirName; + (void)path.append("/").append(inputFileName); + std::string irFile("\""); + (void)irFile.append(path).append("\""); + EmitDirective(kFile); + Emit(irFile); + Emit("\n"); +} + +void AsmAssembler::EmitFunctionHeader(int64 symIdx, SymbolAttr funcAttr, const std::string *secName) +{ + if (secName != nullptr) { + EmitDirective(kSection); + Emit(*secName); + Emit(",\"ax\",@progbits\n"); + } else { + EmitSectionDirective(kSText); + } + EmitDirective(kAlign, 0, false, k8Bits); + + EmitSymbolAttrDirective(funcAttr, symIdx); + + EmitDirective(kFuncType, symIdx); + EmitDirective(kName, symIdx); + Emit("\t.cfi_startproc\n"); +} + +void AsmAssembler::EmitBBLabel(int64 labelSymIdx, bool genVerboseInfo, uint32 freq, const std::string *mirName) +{ + std::string bbLabel = GetNameFromSymMap(labelSymIdx); + if (genVerboseInfo) { + Emit("// freq:"); + Emit(freq); + Emit("\n"); + + Emit(bbLabel); + Emit(":"); + if (mirName != nullptr) { + Emit("\t// MIR: @"); + Emit(*mirName); + } + Emit("\n"); + } else { + EmitDirective(kName, labelSymIdx); + } +} + +void AsmAssembler::EmitFunctionFoot(int64 symIdx, SymbolAttr funcAttr) +{ + (void)funcAttr; + Emit("\t.cfi_endproc\n"); + EmitDirective(kSize, symIdx); + Emit("\n"); +} + +void AsmAssembler::PostEmitVariable(int64 symIdx, SymbolAttr symAttr, uint64 sizeInByte, bool belongsToTextSec) +{ + (void)sizeInByte; + bool isLocal = false; + if (symAttr == kSALocal) { + isLocal = true; + } + EmitDirective(kSize, symIdx, isLocal); + Emit("\n"); +} + +void AsmAssembler::EmitFloatValue(int64 symIdx, int64 value, size_t valueSize) +{ + (void) symIdx; + EmitSizeDirective(valueSize, value, false, true); +} + +void AsmAssembler::EmitJmpTableElem(int64 jmpLabelIdx, const std::vector &labelIdxs) +{ + EmitDirective(kAlign, 0, false, k8Bits); + EmitDirective(kName, jmpLabelIdx); + for (int64 labelIdx : labelIdxs) { + EmitSizeDirective(k8Bytes, labelIdx, true); + } +} + +void AsmAssembler::EmitVariable(int64 symIdx, uint64 sizeInByte, uint8 alignInByte, SymbolAttr symAttr, + SectionKind sectionKind) +{ + bool isLocal = false; + if (symAttr == kSALocal) { + isLocal = true; + } + + if (sectionKind == kSComm || sectionKind == kSBss) { + EmitSectionDirective(kSData); + EmitSymbolAttrDirective(symAttr, symIdx, isLocal); + EmitDirective(kAlign, 0, isLocal, alignInByte); + Emit("\t.comm\t"); + std::string name = GetNameFromSymMap(symIdx, isLocal); + Emit(name); + Emit(", "); + Emit(sizeInByte); + Emit(", "); + Emit(alignInByte); + Emit("\n"); + } else { + EmitDirective(kObjType, symIdx, isLocal); + EmitSectionDirective(sectionKind); + EmitSymbolAttrDirective(symAttr, symIdx, isLocal); + EmitDirective(kAlign, 0, isLocal, alignInByte); + EmitDirective(kName, symIdx, isLocal); + } +} + +void AsmAssembler::EmitDirectString(const std::string &ustr, bool belongsToDataSec, int64 strSymIdx, bool emitAscii) +{ + (void)belongsToDataSec; + if (strSymIdx != 0) { + EmitSectionDirective(kSData); + EmitDirective(kAlign, 0, false, k8Bits); + EmitDirective(kName, strSymIdx); + } + + if (emitAscii) { + Emit("\t.ascii\t\""); + } else { + Emit("\t.string\t\""); + } + + const char *str = ustr.c_str(); + size_t len = ustr.size(); + /* Rewrite special char with \\ */ + for (size_t i = 0; i < len; i++) { + /* Referred to GNU AS: 3.6.1.1 Strings */ + constexpr int kBufSize = 5; + constexpr int kFirstChar = 0; + constexpr int kSecondChar = 1; + constexpr int kThirdChar = 2; + constexpr int kLastChar = 4; + char buf[kBufSize]; + if (isprint(*str)) { + buf[kFirstChar] = *str; + buf[kSecondChar] = 0; + if (*str == '\\' || *str == '\"') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = *str; + buf[kThirdChar] = 0; + } + Emit(buf); + } else if (*str == '\b') { + Emit("\\b"); + } else if (*str == '\n') { + Emit("\\n"); + } else if (*str == '\r') { + Emit("\\r"); + } else if (*str == '\t') { + Emit("\\t"); + } else if (*str == '\0') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = '0'; + buf[kThirdChar] = 0; + Emit(buf); + } else { + /* all others, print as number */ + (void)snprintf_s(buf, sizeof(buf), 4, "\\%03o", (*str) & 0xFF); /* 4: max store chars */ + buf[kLastChar] = '\0'; + Emit(buf); + } + str++; + } + Emit("\"\n"); +} + +void AsmAssembler::EmitIndirectString(int64 strSymIdx, bool belongsToDataSec) +{ + (void)belongsToDataSec; + EmitSizeDirective(k8Bytes, strSymIdx, true); +} + +void AsmAssembler::EmitIntValue(int64 value, size_t elemSize, bool belongsToDataSec) +{ + (void)belongsToDataSec; + EmitSizeDirective(elemSize, value, false); +} + +void AsmAssembler::EmitAddrValue(int64 symIdx, int32 symAddrOfs, int32 structFieldOfs, bool belongsToDataSec) +{ + (void)belongsToDataSec; + Emit("\t.quad\t"); + std::string name = GetNameFromSymMap(symIdx); + Emit(name); + if (symAddrOfs != 0) { + Emit(" + "); + Emit(symAddrOfs); + } + if (structFieldOfs != 0) { + Emit(" + "); + Emit(structFieldOfs); + } + Emit("\n"); +} + +void AsmAssembler::EmitAddrOfFuncValue(int64 symIdx, bool belongsToDataSec) +{ + (void)belongsToDataSec; + EmitSizeDirective(k8Bytes, symIdx, true); +} + +void AsmAssembler::EmitLabelValue(int64 symIdx, bool belongsToDataSec) +{ + (void)belongsToDataSec; + EmitSizeDirective(k8Bytes, symIdx, true); +} + +void AsmAssembler::EmitBitFieldValue(uint64 combineBitFieldValue, bool belongsToDataSec) +{ + (void)belongsToDataSec; + EmitSizeDirective(k1Byte, combineBitFieldValue, false); +} + +/* emit debug info */ +void AsmAssembler::EmitHexUnsigned(uint64 num) +{ + std::ios::fmtflags flag(this->outStream.flags()); + this->outStream << "0x" << std::hex << num; + (void)this->outStream.flags(flag); +} + +void AsmAssembler::EmitDecUnsigned(uint64 num) +{ + std::ios::fmtflags flag(outStream.flags()); + outStream << std::dec << num; + (void)outStream.flags(flag); +} + +void AsmAssembler::EmitDecSigned(int64 num) +{ + std::ios::fmtflags flag(outStream.flags()); + outStream << std::dec << num; + (void)outStream.flags(flag); +} + +void AsmAssembler::EmitDIHeader() +{ + Emit("\t.section ." + std::string("c_text") + ",\"ax\"\n"); + Emit(".L" XSTR(TEXT_BEGIN) ":\n"); +} + +void AsmAssembler::EmitDIFooter() +{ + Emit("\t.section ." + std::string("c_text") + ",\"ax\"\n"); + Emit(".L" XSTR(TEXT_END) ":\n"); +} + +void AsmAssembler::EmitDIHeaderFileInfo() +{ + Emit("// dummy header file 1\n"); + Emit("// dummy header file 2\n"); + Emit("// dummy header file 3\n"); +} + +void AsmAssembler::EmitDIDebugInfoSectionHeader(uint64 debugInfoLength) +{ + /* From DWARF Standard Specification V4. 7.5.1 + collect section size */ + Emit("\t.section\t.debug_info,\"\",@progbits\n"); + /* label to mark start of the .debug_info section */ + Emit(".L" XSTR(DEBUG_INFO_0) ":\n"); + /* $ 7.5.1.1 */ + Emit("\t.4byte\t"); + EmitHexUnsigned(debugInfoLength); + Emit(CMNT "section length\n"); + /* DWARF version. uhalf. */ + Emit("\t.2byte\t"); + /* 4 for version 4. */ + EmitHexUnsigned(kDwarfVersion); + Emit("\n"); + /* debug_abbrev_offset. 4byte for 32-bit, 8byte for 64-bit */ + Emit("\t.4byte\t.L" XSTR(DEBUG_ABBREV_0) "\n"); + /* address size. ubyte */ + Emit("\t.byte\t"); + EmitHexUnsigned(kSizeOfPTR); + Emit("\n"); +} + +void AsmAssembler::EmitDIDebugInfoSectionAbbrevId(bool verbose, uint32 abbrevId, const std::string &dieTagName, + uint32 offset, uint32 size) +{ + if (verbose) { + Emit("\n"); + } + Emit("\t.uleb128 "); + EmitHexUnsigned(abbrevId); + if (verbose) { + Emit(CMNT); + Emit(dieTagName); + Emit(" Offset= "); + EmitHexUnsigned(offset); + Emit(" ("); + EmitDecUnsigned(offset); + Emit(" ), Size= "); + EmitHexUnsigned(size); + Emit(" ("); + EmitDecUnsigned(size); + Emit(" )\n"); + } else { + Emit("\n"); + } +} + +void AsmAssembler::EmitDIFormSpecification(unsigned int dwform) +{ + Emit("\t"); + switch (dwform) { + case DW_FORM_string: + Emit(".string"); + break; + case DW_FORM_strp: + case DW_FORM_data4: + case DW_FORM_ref4: + Emit(".4byte "); + break; + case DW_FORM_data1: + Emit(".byte "); + break; + case DW_FORM_data2: + Emit(".2byte "); + break; + case DW_FORM_data8: + Emit(".8byte "); + break; + case DW_FORM_sec_offset: + Emit(".4byte "); + break; + /* if DWARF64, should be .8byte? */ + case DW_FORM_addr: /* Should we use DWARF64? for now, we generate .8byte as gcc does for DW_FORM_addr */ + Emit(".8byte "); + break; + case DW_FORM_exprloc: + Emit(".uleb128 "); + break; + default: + assert(0 && "NYI"); + break; + } +} + +void AsmAssembler::EmitDwFormString(const std::string &name) +{ + Emit("\""); + Emit(name); + Emit("\""); + Emit(CMNT "len = "); + EmitDecUnsigned(name.length() + 1); +} + +void AsmAssembler::EmitDwFormStrp(uint32 strLabelId, size_t strTableSize) +{ + Emit(".L" XSTR(DEBUG_STR_LABEL)); + outStream << strLabelId; +} + +void AsmAssembler::EmitDwFormData(int32 attrValue, uint8 sizeInByte) +{ + EmitHexUnsigned(attrValue); +} + +void AsmAssembler::EmitDwFormData8() +{ + Emit(".L" XSTR(TEXT_END) "-.L" XSTR(TEXT_BEGIN)); +} + +void AsmAssembler::EmitDwFormData8(uint32 endLabelFuncPuIdx, uint32 startLabelFuncPuIdx, uint32 endLabelIdx, + uint32 startLabelIdx) +{ + outStream << ".L." << endLabelFuncPuIdx << "__" << endLabelIdx; + Emit("-"); + outStream << ".L." << startLabelFuncPuIdx << "__" << startLabelIdx; +} + +void AsmAssembler::EmitLabel(uint32 funcPuIdx, uint32 labIdx) +{ + outStream << ".L." << funcPuIdx << "__" << labIdx; +} + +void AsmAssembler::EmitDwFormSecOffset() +{ + Emit(".L"); + Emit(XSTR(DEBUG_LINE_0)); +} + +void AsmAssembler::EmitDwFormAddr(bool emitTextBegin) +{ + if (emitTextBegin) { + Emit(".L" XSTR(TEXT_BEGIN)); + } else { + Emit("XXX--ADDR--XXX"); + } +} + +void AsmAssembler::EmitDwFormRef4(uint64 offsetOrValue, bool unknownType, bool emitOffset) +{ + if (emitOffset) { + Emit(" OFFSET "); + } + EmitHexUnsigned(offsetOrValue); + if (unknownType) { + Emit(CMNT "Warning: dummy type used"); + } +} + +void AsmAssembler::EmitDwFormExprlocCfa(uint32 dwOp) +{ + EmitHexUnsigned(1); + Emit("\n\t.byte "); + EmitHexUnsigned(dwOp); +} + +void AsmAssembler::EmitDwFormExprlocAddr(uint32 dwOp, const std::string &addrStr) +{ + EmitHexUnsigned(k9ByteSize); + Emit("\n\t.byte "); + EmitHexUnsigned(dwOp); + Emit("\n\t.8byte "); + Emit(addrStr); +} + +void AsmAssembler::EmitDwFormExprlocFbreg(uint32 dwOp, int fboffset, size_t sleb128Size) +{ + EmitHexUnsigned(1 + sleb128Size); + Emit(CMNT "uleb128 size"); + Emit("\n\t.byte "); + EmitHexUnsigned(dwOp); + Emit("\n\t.sleb128 "); + EmitDecSigned(fboffset); +} + +void AsmAssembler::EmitDwFormExprlocBregn(uint32 dwOp, const std::string &dwOpName) +{ + EmitHexUnsigned(k2Bytes); + Emit(CMNT "size"); + Emit("\n\t.byte "); + EmitHexUnsigned(dwOp); + Emit(CMNT); + Emit(dwOpName); + Emit("\n\t.sleb128 "); + EmitDecSigned(0); + Emit(CMNT "offset"); +} + +void AsmAssembler::EmitDwFormExprloc(uintptr elp) +{ + EmitHexUnsigned(elp); +} + +void AsmAssembler::EmitDIDwName(const std::string &dwAtName, const std::string &dwForName) +{ + Emit(CMNT); + Emit(dwAtName); + Emit(" : "); + Emit(dwForName); +} + +void AsmAssembler::EmitDIDebugAbbrevDiae(bool verbose, uint32 abbrevId, uint32 tag, const std::string &dwTagName, + bool withChildren) +{ + if (verbose) { + Emit("\n"); + } + Emit("\t.uleb128 "); + EmitHexUnsigned(abbrevId); + if (verbose) { + Emit(CMNT "Abbrev Entry ID"); + } + Emit("\n"); + /* TAG */ + Emit("\t.uleb128 "); + EmitHexUnsigned(tag); + if (verbose) { + Emit(CMNT); + Emit(dwTagName); + } + Emit("\n"); + /* children? */ + Emit("\t.byte "); + EmitHexUnsigned(static_cast(withChildren)); + if (verbose) { + Emit(withChildren ? CMNT "DW_CHILDREN_yes" : CMNT "DW_CHILDREN_no"); + } + Emit("\n"); +} + +void AsmAssembler::EmitDIDebugAbbrevDiaePairItem(bool verbose, uint32 aplAt, uint32 aplFrom, + const std::string &dwAtName, const std::string &dwFromName) +{ + /* odd entry -- DW_AT_*, even entry -- DW_FORM_* */ + Emit("\t.uleb128 "); + EmitHexUnsigned(aplAt); + if (verbose) { + Emit(CMNT); + Emit(dwAtName); + } + Emit("\n"); + Emit("\t.uleb128 "); + EmitHexUnsigned(aplFrom); + if (verbose) { + Emit(CMNT); + Emit(dwFromName); + } + Emit("\n"); +} + +void AsmAssembler::EmitDIDebugStrSection(const std::vector &strps, const std::vector &debugStrs, + uint64 size, size_t strTableSize) +{ + Emit("\t.section\t.debug_str,\"MS\",@progbits,1\n"); + for (int i = 0; i < static_cast(debugStrs.size()); i++) { + Emit(".L" XSTR(DEBUG_STR_LABEL)); + this->outStream << strps[i]; + Emit(":\n"); + Emit("\t.string \""); + Emit(debugStrs[i]); + Emit("\"\n"); + } +} + +void AsmAssembler::EmitNull(uint64 sizeInByte) +{ + EmitDirective(kZero); + Emit(sizeInByte); + Emit("\n"); +} + +/* start of X64 instructions */ +/* mov */ +void AsmAssembler::Mov(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tmov"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Mov(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + Emit("\tmov"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Mov(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tmov"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Mov(InsnSize insnSize, Reg reg, const Mem &mem) +{ + Emit("\tmov"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Mov(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + Emit("\tmov"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* floating point mov */ +void AsmAssembler::Mov(Reg srcReg, Reg destReg, bool isMovD) { + if (isMovD) { + Emit("\tmovd\t"); + } else { + Emit("\tmovq\t"); + } + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::MovF(const Mem &mem, Reg reg, bool isSingle) { + if (isSingle) { + Emit("\tmovss\t"); + } else { + Emit("\tmovsd\t"); + } + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::MovF(Reg reg, const Mem &mem, bool isSingle) { + if (isSingle) { + Emit("\tmovss\t"); + } else { + Emit("\tmovsd\t"); + } + EmitRegMem(reg, mem); + Emit("\n"); +} + +/* movabs */ +void AsmAssembler::Movabs(const ImmOpnd &immOpnd, Reg reg) +{ + Emit("\tmovabs"); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Movabs(int64 symIdx, Reg reg) +{ + Emit("\tmovabs"); + Emit("\t"); + EmitLabelReg(symIdx, reg); + Emit("\n"); +} + +/* push */ +void AsmAssembler::Push(InsnSize insnSize, Reg reg) +{ + Emit("\tpush"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitReg(reg); + Emit("\n"); +} + +/* pop */ +void AsmAssembler::Pop(InsnSize insnSize, Reg reg) +{ + Emit("\tpop"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitReg(reg); + Emit("\n"); +} + +/* lea */ +void AsmAssembler::Lea(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tlea"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +/* movzx */ +void AsmAssembler::MovZx(InsnSize sSize, InsnSize dSize, Reg srcReg, Reg destReg) +{ + Emit("\tmovz"); + EmitInsnSuffix(sSize); + EmitInsnSuffix(dSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::MovZx(InsnSize sSize, InsnSize dSize, const Mem &mem, Reg reg) +{ + Emit("\tmovz"); + EmitInsnSuffix(sSize); + EmitInsnSuffix(dSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +/* movsx */ +void AsmAssembler::MovSx(InsnSize sSize, InsnSize dSize, Reg srcReg, Reg destReg) +{ + Emit("\tmovs"); + EmitInsnSuffix(sSize); + EmitInsnSuffix(dSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::MovSx(InsnSize sSize, InsnSize dSize, const Mem &mem, Reg reg) +{ + Emit("\tmovs"); + EmitInsnSuffix(sSize); + EmitInsnSuffix(dSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +/* add */ +void AsmAssembler::Add(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tadd"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Add(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + Emit("\tadd"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Add(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tadd"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Add(InsnSize insnSize, Reg reg, const Mem &mem) +{ + Emit("\tadd"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Add(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + Emit("\tadd"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* add floating point */ +void AsmAssembler::Add(Reg srcReg, Reg destReg, bool isSingle) { + if (isSingle) { + Emit("\taddss\t"); + } else { + Emit("\taddsd\t"); + } + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Add(const Mem &mem, Reg reg, bool isSingle) { + if (isSingle) { + Emit("\taddss\t"); + } else { + Emit("\taddsd\t"); + } + EmitMemReg(mem, reg); + Emit("\n"); +} + +/* sub */ +void AsmAssembler::Sub(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tsub"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Sub(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + Emit("\tsub"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Sub(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tsub"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Sub(InsnSize insnSize, Reg reg, const Mem &mem) +{ + Emit("\tsub"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Sub(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + Emit("\tsub"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* sub floating point */ +void AsmAssembler::Sub(Reg srcReg, Reg destReg, bool isSingle) { + if (isSingle) { + Emit("\tsubss\t"); + } else { + Emit("\tsubsd\t"); + } + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Sub(const Mem &mem, Reg reg, bool isSingle) { + if (isSingle) { + Emit("\tsubss\t"); + } else { + Emit("\tsubsd\t"); + } + EmitMemReg(mem, reg); + Emit("\n"); +} + +/* and */ +void AsmAssembler::And(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tand"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::And(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tand"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::And(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + Emit("\tand"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::And(InsnSize insnSize, Reg reg, const Mem &mem) +{ + Emit("\tand"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::And(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + Emit("\tand"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* or */ +void AsmAssembler::Or(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Or(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Or(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + Emit("\tor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Or(InsnSize insnSize, Reg reg, const Mem &mem) +{ + Emit("\tor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Or(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + Emit("\tor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* xor */ +void AsmAssembler::Xor(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\txor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Xor(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + Emit("\txor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Xor(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\txor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Xor(InsnSize insnSize, Reg reg, const Mem &mem) +{ + Emit("\txor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Xor(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + Emit("\txor"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* not */ +void AsmAssembler::Not(InsnSize insnSize, Reg reg) +{ + Emit("\tnot"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Not(InsnSize insnSize, const Mem &mem) +{ + Emit("\tnot"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMem(mem); + Emit("\n"); +} + +/* neg */ +void AsmAssembler::Neg(InsnSize insnSize, Reg reg) +{ + Emit("\tneg"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Neg(InsnSize insnSize, const Mem &mem) +{ + Emit("\tneg"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMem(mem); + Emit("\n"); +} + +/* div & cwd, cdq, cqo */ +void AsmAssembler::Idiv(InsnSize insnSize, Reg reg) +{ + Emit("\tidiv"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Idiv(InsnSize insnSize, const Mem &mem) +{ + Emit("\tidiv"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Div(InsnSize insnSize, Reg reg) +{ + Emit("\tdiv"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Div(InsnSize insnSize, const Mem &mem) +{ + Emit("\tdiv"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Cwd() +{ + Emit("\tcwd\n"); +} + +void AsmAssembler::Cdq() +{ + Emit("\tcdq\n"); +} + +void AsmAssembler::Cqo() +{ + Emit("\tcqo\n"); +} + +/* shl */ +void AsmAssembler::Shl(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tshl"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Shl(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + Emit("\tshl"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Shl(InsnSize insnSize, Reg reg, const Mem &mem) +{ + Emit("\tshl"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Shl(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + Emit("\tshl"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* sar */ +void AsmAssembler::Sar(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tsar"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Sar(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + Emit("\tsar"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Sar(InsnSize insnSize, Reg reg, const Mem &mem) +{ + Emit("\tsar"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Sar(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + Emit("\tsar"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* shr */ +void AsmAssembler::Shr(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tshr"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Shr(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + Emit("\tshr"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Shr(InsnSize insnSize, Reg reg, const Mem &mem) +{ + Emit("\tshr"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Shr(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + Emit("\tshr"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* jmp */ +void AsmAssembler::Jmp(Reg reg) +{ + Emit("\tjmp\t"); + Emit("*"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Jmp(const Mem &mem) +{ + Emit("\tjmp\t"); + Emit("*"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Jmp(int64 symIdx) +{ + Emit("\tjmp\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +/* jump condition */ +void AsmAssembler::Je(int64 symIdx) +{ + Emit("\tje\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Ja(int64 symIdx) +{ + Emit("\tja\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jae(int64 symIdx) +{ + Emit("\tjae\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jne(int64 symIdx) +{ + Emit("\tjne\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jb(int64 symIdx) +{ + Emit("\tjb\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jbe(int64 symIdx) +{ + Emit("\tjbe\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jg(int64 symIdx) +{ + Emit("\tjg\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jge(int64 symIdx) +{ + Emit("\tjge\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jl(int64 symIdx) +{ + Emit("\tjl\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +void AsmAssembler::Jle(int64 symIdx) +{ + Emit("\tjle\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +/* cmp */ +void AsmAssembler::Cmp(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tcmp"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmp(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tcmp"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmp(InsnSize insnSize, Reg reg, const Mem &mem) +{ + Emit("\tcmp"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegMem(reg, mem); + Emit("\n"); +} + +void AsmAssembler::Cmp(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + Emit("\tcmp"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolReg(immOpnd.first, immOpnd.second, reg); + Emit("\n"); +} + +void AsmAssembler::Cmp(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + Emit("\tcmp"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitImmOrSymbolMem(immOpnd.first, immOpnd.second, mem); + Emit("\n"); +} + +/* test */ +void AsmAssembler::Test(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\ttest"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +/* set */ +void AsmAssembler::Setbe(Reg reg) +{ + Emit("\tsetbe\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setbe(const Mem &mem) +{ + Emit("\tsetbe\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Setle(Reg reg) +{ + Emit("\tsetle\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setle(const Mem &mem) +{ + Emit("\tsetle\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Setae(Reg reg) +{ + Emit("\tsetae\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setae(const Mem &mem) +{ + Emit("\tsetae\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Setge(Reg reg) +{ + Emit("\tsetge\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setge(const Mem &mem) +{ + Emit("\tsetge\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Setne(Reg reg) +{ + Emit("\tsetne\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setne(const Mem &mem) +{ + Emit("\tsetne\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Setb(Reg reg) +{ + Emit("\tsetb\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setb(const Mem &mem) +{ + Emit("\tsetb\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Setl(Reg reg) +{ + Emit("\tsetl\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setl(const Mem &mem) +{ + Emit("\tsetl\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Seta(Reg reg) +{ + Emit("\tseta\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Seta(const Mem &mem) +{ + Emit("\tseta\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Setg(Reg reg) +{ + Emit("\tsetg\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Setg(const Mem &mem) +{ + Emit("\tsetg\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Sete(Reg reg) +{ + Emit("\tsete\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Sete(const Mem &mem) +{ + Emit("\tsete\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Seto(Reg reg) +{ + Emit("\tseto\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Seto(const Mem &mem) +{ + Emit("\tseto\t"); + EmitMem(mem); + Emit("\n"); +} + +/* cmov */ +void AsmAssembler::Cmova(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tcmova"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmova(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tcmova"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovae(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tcmovae"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovae(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tcmovae"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovb(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tcmovb"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovb(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tcmovb"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovbe(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tcmovbe"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovbe(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tcmovbe"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} +void AsmAssembler::Cmove(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tcmove"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmove(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tcmove"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovg(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tcmovg"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovg(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tcmovg"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovge(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tcmovge"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovge(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tcmovge"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovl(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tcmovl"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovl(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tcmovl"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovle(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tcmovle"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovle(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tcmovle"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovo(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tcmovo"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cmovne(InsnSize insnSize, const Mem &mem, Reg reg) +{ + Emit("\tcmovne"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +void AsmAssembler::Cmovne(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\tcmovne"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +/* call */ +void AsmAssembler::Call(InsnSize insnSize, Reg reg) +{ + Emit("\tcall"); + EmitInsnSuffix(insnSize); + Emit("\t*"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Call(InsnSize insnSize, const Mem &mem) +{ + Emit("\tcall"); + EmitInsnSuffix(insnSize); + Emit("\t*"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Call(InsnSize insnSize, int64 symIdx) +{ + Emit("\tcall"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitLabel(symIdx); + Emit("\n"); +} + +/* ret */ +void AsmAssembler::Ret() +{ + Emit("\tret\n"); +} + +/* leave */ +void AsmAssembler::Leave() +{ + Emit("\tleave\n"); +} + +/* imul */ +void AsmAssembler::Imul(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\timul"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +/* mul float */ +void AsmAssembler::Mul(Reg srcReg, Reg destReg, bool isSingle) { + if (isSingle) { + Emit("\tmulss\t"); + } else { + Emit("\tmulsd\t"); + } + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Mul(const Mem &mem, Reg reg, bool isSingle) { + if (isSingle) { + Emit("\tmulss\t"); + } else { + Emit("\tmulsd\t"); + } + EmitMemReg(mem, reg); + Emit("\n"); +} + +/* nop */ +void AsmAssembler::Nop(InsnSize insnSize, const Mem &mem) +{ + Emit("\tnop"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitMem(mem); + Emit("\n"); +} + +void AsmAssembler::Nop() +{ + Emit("\tnop\n"); +} + +/* byte swap */ +void AsmAssembler::Bswap(InsnSize insnSize, Reg reg) +{ + Emit("\tbswap"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitReg(reg); + Emit("\n"); +} + +void AsmAssembler::Xchg(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + Emit("\txchg"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +/* pseudo insn */ +void AsmAssembler::DealWithPseudoInst(const std::string &insn) +{ + Emit("\t"); + Emit(insn); + Emit("\n"); +} + +/* floating point */ +void AsmAssembler::MovF(Reg srcReg, Reg destReg, bool isSingle) { + if (isSingle) { + Emit("\tmovss\t"); + } else { + Emit("\tmovsd\t"); + } + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +/* floating point and */ +void AsmAssembler::And(Reg srcReg, Reg destReg, bool isSingle) { + if (isSingle) { + Emit("\tandps\t"); + } else { + Emit("\tandpd\t"); + } + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::And(const Mem &mem, Reg reg, bool isSingle) { + if (isSingle) { + Emit("\tandps\t"); + } else { + Emit("\tandpd\t"); + } + EmitMemReg(mem, reg); + Emit("\n"); +} + +/* floating div */ +void AsmAssembler::Divsd(Reg srcReg, Reg destReg) { + Emit("\tdivsd\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Divsd(const Mem &mem, Reg reg) { + Emit("\tdivsd\t"); + EmitMemReg(mem, reg); + Emit("\n"); +} + +/* convert int2float */ +void AsmAssembler::Cvtsi2ss(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcvtsi2ss"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cvtsi2sd(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcvtsi2sd"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +/*convert float2int */ +void AsmAssembler::Cvttsd2si(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcvttsd2si"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cvttss2si(InsnSize insnSize, Reg srcReg, Reg destReg) { + Emit("\tcvtss2si"); + EmitInsnSuffix(insnSize); + Emit("\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +/* convert float2float */ +void AsmAssembler::Cvtss2sd(Reg srcReg, Reg destReg) { + Emit("\tcvtss2sd\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Cvtsd2ss(Reg srcReg, Reg destReg) { + Emit("\tcvtsd2ss\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +/* unordered compare */ +void AsmAssembler::Ucomisd(Reg srcReg, Reg destReg) { + Emit("\tucomisd\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} + +void AsmAssembler::Ucomiss(Reg srcReg, Reg destReg) { + Emit("\tucomiss\t"); + EmitRegReg(srcReg, destReg); + Emit("\n"); +} +/* end of X64 instructions */ +} /* namespace assembler */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/elf_assembler.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/elf_assembler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b79ba808a802a3797de00bee7c12e3e8c22c018b --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/elf_assembler.cpp @@ -0,0 +1,2270 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "assembler/elf_assembler.h" + +namespace assembler { +/* These used in ModRM mode when an instruction is encoded. */ +const uint8 kSubModReg = 5; +const uint8 kAndModReg = 4; +const uint8 kOrModReg = 1; +const uint8 kXorModReg = 6; +const uint8 kNotModReg = 2; +const uint8 kNegModReg = 3; +const uint8 kIdivModReg = 7; +const uint8 kDivModReg = 6; +const uint8 kShlModReg = 4; +const uint8 kSarModReg = 7; +const uint8 kShrModReg = 5; +const uint8 kJmpModReg = 4; +const uint8 kCmpModReg = 7; +const uint8 kCallModReg = 2; + +/* override function in base class */ +void ElfAssembler::InitialFileInfo(const std::string &inputFileName) +{ + /* Initialize some sections that must be used. */ + DataSection *nullDataSection = new DataSection(" ", SHT_NULL, 0, 0); + RegisterSection(*nullDataSection); + strTabSection = new StringSection(".strtab", SHT_STRTAB, 0, 1); + RegisterSection(*strTabSection); + textSection = new DataSection(".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, k8Bits); + RegisterSection(*textSection); + symbolTabSection = new SymbolSection(".symtab", SHT_SYMTAB, 0, k8Bits, *strTabSection); + RegisterSection(*symbolTabSection); +} + +void ElfAssembler::EmitVariable(int64 symIdx, uint64 sizeInByte, uint8 alignInByte, SymbolAttr symAttr, + SectionKind sectionKind) +{ + switch (sectionKind) { + case kSBss: + case kSComm: + case kSTbss: + EmitBssSectionVar(symIdx, sizeInByte, alignInByte, symAttr); + break; + case kSTdata: + case kSData: + EmitDataSectionVar(symIdx); + break; + case kSRodata: + if (rodataSection == nullptr) { + rodataSection = new DataSection(".rodata", SHT_PROGBITS, SHF_ALLOC, k8Bits); + RegisterSection(*rodataSection); + } + UpdateLabel(symIdx, LabelType::kConst, rodataSection->GetDataSize()); + break; + case kSText: + UpdateLabel(symIdx); + break; + case kSDebugInfo: + case kSDebugAbbrev: + case kSDebugStr: + default: + assert(false && "unprocessed Section in EmitVariable"); + break; + } +} + +void ElfAssembler::EmitFloatValue(int64 symIdx, int64 value, size_t valueSize) +{ + auto reloffset = codeBuff.size(); + Encodeb(value, valueSize); + + if (valueSize == 4) { + UpdateLabel(symIdx, LabelType::kFloatLabel, reloffset); + } else if (valueSize == 8) { + UpdateLabel(symIdx, LabelType::kDoubleLabel, reloffset); + } else { + CHECK_FATAL(false, "--Err: EmitFloatValue only handle float and double value"); + } +} + +void ElfAssembler::EmitBssSectionVar(int64 symIdx, uint64 sizeInByte, uint8 alignInByte, SymbolAttr symAttr) +{ + if (bssSection == nullptr) { + bssSection = new DataSection(".bss", SHT_NOBITS, SHF_WRITE | SHF_ALLOC, k8Bits); + RegisterSection(*bssSection); + } + uint64 bssCurSize = bssSection->GetSectionSize(); + bssSection->SetSectionSize(static_cast(bssCurSize + sizeInByte)); + if (symAttr == kSALocal) { + const std::string &symbolName = GetNameFromSymMap(symIdx, true); + auto nameIndex = strTabSection->AddString(symbolName); + AddSymToSymTab({static_cast(nameIndex), static_cast((STB_LOCAL << 4) + (STT_OBJECT & 0xf)), 0, + bssSection->GetIndex(), bssCurSize, sizeInByte}, + symIdx); + UpdateLabel(symIdx, LabelType::kLocalUninitialized, static_cast(bssCurSize)); + } else { + const std::string &symbolName = GetNameFromSymMap(symIdx); + auto nameIndex = strTabSection->AddString(symbolName); + AddSymToSymTab({static_cast(nameIndex), static_cast((STB_GLOBAL << 4) + (STT_OBJECT & 0xf)), 0, + SHN_COMMON, static_cast
(alignInByte), sizeInByte}, + symIdx); + UpdateLabel(symIdx, LabelType::kGlobalUninitialized, static_cast(bssCurSize)); + } +} + +void ElfAssembler::EmitDataSectionVar(int64 symIdx) +{ + if (dataSection == nullptr) { + dataSection = new DataSection(".data", SHT_PROGBITS, SHF_WRITE | SHF_ALLOC, k8Bits); + RegisterSection(*dataSection); + } + uint32 pos = dataSection->GetDataSize(); + UpdateLabel(symIdx, LabelType::kStatic, pos); +} + +void ElfAssembler::EmitFunctionHeader(int64 symIdx, SymbolAttr funcAttr, const std::string *secName) +{ + const auto &emitMemoryManager = maplebe::CGOptions::GetInstance().GetEmitMemoryManager(); + if (emitMemoryManager.funcAddressSaver != nullptr) { + const std::string &funcName = GetNameFromSymMap(symIdx); + emitMemoryManager.funcAddressSaver(emitMemoryManager.codeSpace, funcName, static_cast(codeBuff.size())); + } + UpdateLabel(symIdx, LabelType::kFunc, static_cast(codeBuff.size())); +} + +void ElfAssembler::EmitBBLabel(int64 labelSymIdx, bool genVerboseInfo, uint32 freq, const std::string *mirName) +{ + UpdateLabel(labelSymIdx, LabelType::kBBLabel, static_cast(codeBuff.size())); +} + +void ElfAssembler::EmitJmpTableElem(int64 jmpLabelIdx, const std::vector &labelSymIdxs) +{ + UpdateLabel(jmpLabelIdx, LabelType::kJmpLabel, static_cast(codeBuff.size())); + const size_t kLabelSize = 8; + for (auto labelSymIdx : labelSymIdxs) { + AppendFixup(labelSymIdx, kAbsolute64, {static_cast(codeBuff.size()), kLabelSize}, fixups); + uint8 imm = 0; + Encodeb(imm, kLabelSize); + } +} + +void ElfAssembler::EmitFunctionFoot(int64 symIdx, SymbolAttr funcAttr) +{ + uint64 funcSymValue = static_cast(GetLabelRelOffset(symIdx)); + uint64 funcSymSize = static_cast(GetLabelSize(symIdx)); + uint8 funcSymType = STB_GLOBAL; + switch (funcAttr) { + case kSALocal: + funcSymType = STB_LOCAL; + break; + case kSAGlobal: + funcSymType = STB_GLOBAL; + break; + case kSAWeak: + funcSymType = STB_WEAK; + break; + case kSAStatic: + case kSAHidden: + default: + assert(false && "unkonwn/unsupport SymbolAttr in EmitFunctionFoot"); + break; + } + const std::string &symbolName = GetNameFromSymMap(symIdx); + auto nameIndex = strTabSection->AddString(symbolName); + AddSymToSymTab({static_cast(nameIndex), + static_cast((funcSymType << kLeftShift4Bits) + (STT_FUNC & 0xf)), 0, textSection->GetIndex(), + funcSymValue, funcSymSize}, + symIdx); +} + +void ElfAssembler::EmitDirectString(const std::string &str, bool belongsToDataSec, int64 strSymIdx, bool emitAscii) +{ + /* Add a terminator to a string. */ + std::string ustr = str; + ustr += '\0'; + if (strSymIdx != 0) { + if (dataSection == nullptr) { + dataSection = new DataSection(".data", SHT_PROGBITS, SHF_WRITE | SHF_ALLOC, k8Bits); + RegisterSection(*dataSection); + } + uint32 pos = dataSection->GetDataSize(); + UpdateLabel(strSymIdx, LabelType::kStrLabel, pos); + dataSection->AppendData(ustr.data(), ustr.size()); + const size_t kStrAlignSize = 8; + /* append size, append 0 when align need. */ + size_t appendSize = kStrAlignSize - ustr.size() % kStrAlignSize; + int64 appendData = 0; + dataSection->AppendData(appendData, appendSize); + } else { + if (belongsToDataSec) { + dataSection->AppendData(ustr.data(), ustr.size()); + } else { + rodataSection->AppendData(ustr.data(), ustr.size()); + } + } +} + +void ElfAssembler::EmitIndirectString(int64 strSymIdx, bool belongsToDataSec) +{ + const size_t kStrAddrSize = 8; + uint32 pos = 0; + int64 addr = 0; + if (belongsToDataSec) { + pos = dataSection->GetDataSize(); + dataSection->AppendData(addr, kStrAddrSize); + AppendFixup(strSymIdx, kAbsolute64, {pos, kStrAddrSize}, dataFixups); + } else { + pos = rodataSection->GetDataSize(); + rodataSection->AppendData(addr, kStrAddrSize); + AppendFixup(strSymIdx, kAbsolute64, {pos, kStrAddrSize}, rodataFixups); + } +} + +void ElfAssembler::EmitIntValue(int64 value, size_t valueSize, bool belongsToDataSec) +{ + if (belongsToDataSec) { + dataSection->AppendData(value, valueSize); + } else { + rodataSection->AppendData(value, valueSize); + } +} + +void ElfAssembler::EmitAddrValue(int64 symIdx, int32 symAddrOfs, int32 structFieldOfs, bool belongsToDataSec) +{ + const size_t kAddrSize = 8; + uint32 pos = 0; + int64 addr = 0; + if (belongsToDataSec) { + pos = dataSection->GetDataSize(); + dataSection->AppendData(addr, kAddrSize); + AppendFixup(symIdx, kAbsolute64, {pos, kAddrSize}, dataFixups, symAddrOfs); + } else { + pos = rodataSection->GetDataSize(); + rodataSection->AppendData(addr, kAddrSize); + AppendFixup(symIdx, kAbsolute64, {pos, kAddrSize}, rodataFixups, symAddrOfs); + } +} + +void ElfAssembler::EmitAddrOfFuncValue(int64 symIdx, bool belongsToDataSec) +{ + EmitLabelValue(symIdx, belongsToDataSec); +} + +void ElfAssembler::EmitLabelValue(int64 symIdx, bool belongsToDataSec) +{ + const size_t kAddrSize = 8; + uint32 pos = 0; + int64 addr = 0; + if (belongsToDataSec) { + pos = dataSection->GetDataSize(); + dataSection->AppendData(addr, kAddrSize); + AppendFixup(symIdx, kAbsolute64, {pos, kAddrSize}, dataFixups); + } else { + pos = rodataSection->GetDataSize(); + rodataSection->AppendData(addr, kAddrSize); + AppendFixup(symIdx, kAbsolute64, {pos, kAddrSize}, rodataFixups); + } +} + +void ElfAssembler::EmitBitFieldValue(uint64 combineBitFieldValue, bool belongsToDataSec) +{ + if (belongsToDataSec) { + dataSection->AppendData(static_cast(combineBitFieldValue), 1); + } else { + rodataSection->AppendData(static_cast(combineBitFieldValue), 1); + } +} + +void ElfAssembler::EmitNull(uint64 sizeInByte) +{ + int64 data = 0; + dataSection->AppendData(data, static_cast(sizeInByte)); +} + +void ElfAssembler::PostEmitVariable(int64 symIdx, SymbolAttr symAttr, uint64 sizeInByte, bool belongsToTextSec) +{ + Label *label = labelManager.at(symIdx); + uint64 pos = static_cast(label->GetRelOffset()); + auto secIdx = belongsToTextSec ? textSection->GetIndex() : dataSection->GetIndex(); + if (symAttr == kSALocal) { + const std::string &symbolName = GetNameFromSymMap(symIdx, true); + auto index = strTabSection->AddString(symbolName); + AddSymToSymTab({static_cast(index), static_cast((STB_LOCAL << 4) + (STT_OBJECT & 0xf)), 0, + secIdx, pos, sizeInByte}, + symIdx); + } else { + const std::string &symbolName = GetNameFromSymMap(symIdx); + auto index = strTabSection->AddString(symbolName); + uint8 symInfo = symAttr == kSAGlobal ? STB_GLOBAL : STB_WEAK; + AddSymToSymTab({static_cast(index), static_cast((symInfo << 4) + (STT_OBJECT & 0xf)), 0, + secIdx, pos, sizeInByte}, + symIdx); + } +} + +void ElfAssembler::FinalizeFileInfo() +{ + AppendSymsToSymTabSec(); + HandleTextSectionFixup(); + HandleDataSectionFixup(); + HandleRodataSectionFixup(); + HandleDebugInfoSectionFixup(); + WriteElfFile(); +} + +/* encode function */ +void ElfAssembler::OpReg(Reg reg, uint8 opCode1, uint8 opCode2, uint8 modReg) +{ + if (HasOpndSizePrefix(reg)) { + Encodeb(0x66); + } + uint8 rex = GetRex(reg); + if (rex != 0) { + Encodeb(rex); + } + Encodeb(opCode1 | (GetRegSize(reg) == k8Bits ? 0 : 1)); + if (opCode2 != 0) { + Encodeb(opCode2); + } + uint8 modrm = GetRegCodeId(reg); + SetModRM(GetMod(reg), modReg, modrm); +} + +void ElfAssembler::OpMem(const Mem &mem, uint8 opCode1, uint8 opCode2, uint8 modReg) +{ + if (HasOpndSizePrefix(mem)) { + Encodeb(0x66); + } + + if (HasAddrSizePrefix(mem)) { + Encodeb(0x67); + } + + uint8 rex = GetRex(mem); + if (rex != 0) { + Encodeb(rex); + } + Encodeb(opCode1 | (mem.size == k8Bits ? 0 : 1)); + if (opCode2 != 0) { + Encodeb(opCode2); + } + uint8 modrm = 0; + if (!HasSIB(mem)) { + modrm = GetRegCodeId(mem.base); + } else { + modrm = 0b100; /* r/m=b100, use SIB */ + } + SetModRM(GetMod(mem), modReg, modrm); + if (HasSIB(mem)) { + Encodeb(GetSIB(mem)); + } + OpDisp(mem); +} + +void ElfAssembler::OpDisp(const Mem &mem) +{ + int64 symIdx = mem.disp.first; + uint64 offset = static_cast(mem.disp.second); + if (symIdx != 0) { + if (!CanEncodeLabel(symIdx)) { + size_t offsetSize = 4; + UpdateLabel(symIdx); + AppendFixup(symIdx, kRelative, {static_cast(codeBuff.size()), offsetSize}, fixups, mem.disp.second); + uint8 imm = 0; + Encodeb(imm, offsetSize); + } + } else if (offset == 0) { + if (mem.memType == kOnlyBase && (mem.base == RBP || mem.base == R13)) { + Encodeb(offset); + } else if (mem.base == RIP) { + Encoded(offset); + } else { + return; + } + } else { + if (mem.base != RIP && Is8Bits(offset)) { + Encodeb(offset); /* 8-bit displacement */ + } else { + Encoded(offset); /* 32-bit displacement */ + } + } +} + +void ElfAssembler::OpRR(Reg reg1, Reg reg2, uint8 opCode1, uint8 opCode2, bool extInsn) +{ + if (!extInsn && (HasOpndSizePrefix(reg1) || HasOpndSizePrefix(reg2))) { + Encodeb(0x66); + } + uint8 rex = extInsn ? GetRex(reg2, reg1) : GetRex(reg1, reg2); + if (rex != 0) { + Encodeb(rex); + } + Encodeb(opCode1 | (GetRegSize(reg1) == k8Bits ? 0 : 1)); + if (opCode2 != 0) { + Encodeb(opCode2); + } + uint8 modrm = extInsn ? GetModRM(reg2, reg1) : GetModRM(reg1, reg2); + if (modrm != 0) { + Encodeb(modrm); + } +} + +void ElfAssembler::OpRM(Reg reg, const Mem &mem, uint8 opCode1, uint8 opCode2, bool extInsn) +{ + if (!extInsn && HasOpndSizePrefix(reg)) { + Encodeb(0x66); + } + if (!extInsn && HasAddrSizePrefix(mem)) { + Encodeb(0x67); + } + uint8 rex = GetRex(mem, reg); + if (rex != 0) { + Encodeb(rex); + } + Encodeb(opCode1 | (GetRegSize(reg) == k8Bits ? 0 : 1)); + if (opCode2 != 0) { + Encodeb(opCode2); + } + uint8 modrm = GetModRM(reg, mem); + Encodeb(modrm); + if (HasSIB(mem)) { + Encodeb(GetSIB(mem)); + } + OpDisp(mem); +} + +void ElfAssembler::OpImmAndReg(const ImmOpnd &immOpnd, Reg reg, uint8 opCode, uint8 modReg) +{ + bool isSymbol = immOpnd.second; + uint32 imm = static_cast(immOpnd.first); /* When isSymbol is true, this is index. */ + uint8 immBit = Is8Bits(imm) ? k8Bits : (Is16Bits(imm) ? k16Bits : k32Bits); + uint8 regSize = GetRegSize(reg); + if (regSize == k8Bits) { + immBit = k8Bits; + } + if (immBit == k16Bits && (regSize == k64Bits || regSize == k32Bits)) { + immBit = k32Bits; /* if 32/64bit mode, imm val can not use 16-bit. */ + } + immBit = isSymbol ? k32Bits : immBit; + if (GetRegId(reg) == 0 && (regSize == immBit || (regSize == k64Bits && immBit == k32Bits))) { + if (HasOpndSizePrefix(reg)) { + Encodeb(0x66); + } + if (GetRex(reg)) { + Encodeb(GetRex(reg)); + } + Encodeb(opCode | 0x4 | (immBit == k8Bits ? 0 : 1)); + } else { + uint8 tmp = immBit < std::min(static_cast(regSize), 32U) ? 2 : 0; + OpReg(reg, 0x80 | tmp, 0, modReg); + } + if (isSymbol) { + if (!CanEncodeLabel(immOpnd.first)) { + UpdateLabel(immOpnd.first); + AppendFixup(immOpnd.first, kRelative, {static_cast(codeBuff.size()), immBit / k8Bits}, fixups); + imm = 0; + Encodeb(imm, immBit / k8Bits); + } + } else { + Encodeb(imm, immBit / k8Bits); + } +} + +void ElfAssembler::OpImmAndMem(const ImmOpnd &immOpnd, const Mem &mem, uint8 modReg) +{ + bool isSymbol = immOpnd.second; + uint32 imm = static_cast(immOpnd.first); /* When isSymbol is true, this is index. */ + if (isSymbol) { + if (!CanEncodeLabel(immOpnd.first)) { + size_t offsetSize = 4; + UpdateLabel(immOpnd.first); + AppendFixup(immOpnd.first, kRelative, {static_cast(codeBuff.size()), offsetSize}, fixups); + imm = 0; + OpMem(mem, 0x80, 0, modReg); + Encodeb(imm, offsetSize); + } + } else { + uint8 immBit = Is8Bits(imm) ? k8Bits : (Is16Bits(imm) ? k16Bits : k32Bits); + if (mem.size == k8Bits) { + immBit = k8Bits; + } + if (immBit == k16Bits && (mem.size == k64Bits || mem.size == k32Bits)) { + immBit = k32Bits; /* if 32/64bit mode, imm val can not use 16-bit. */ + } + uint8 tmp = immBit < std::min(static_cast(mem.size), 32U) ? 2 : 0; + OpMem(mem, 0x80 | tmp, 0, modReg); + Encodeb(imm, immBit / k8Bits); + } +} + +void ElfAssembler::MovRegAndDisp(Reg reg, const Mem &mem, uint8 opCode) +{ + if (HasOpndSizePrefix(reg)) { + Encodeb(0x66); + } + if (HasAddrSizePrefix(mem)) { + Encodeb(0x67); + } + uint8 rex = GetRex(mem, reg); + if (rex != 0) { + Encodeb(rex); + } + Encodeb(opCode | (GetRegSize(reg) == k8Bits ? 0 : 1)); + int64 symIdx = mem.disp.first; + uint64 offset = static_cast(mem.disp.second); + if (symIdx != 0) { + size_t offsetSize = k8Bits; + Encodeb(static_cast(0), offsetSize); + UpdateLabel(symIdx); + AppendFixup(symIdx, kAbsolute64, {static_cast(codeBuff.size()), offsetSize}, fixups); + } + if (Is64Bits(offset)) { + Encodeq(offset); + } else { + Encoded(offset); + } +} + +void ElfAssembler::OpPushPop(Reg reg, uint8 code) +{ + if (HasOpndSizePrefix(reg)) { + Encodeb(0x66); + } + if (IsRegExt(reg)) { + Encodeb(0x41); /* Rex prefix */ + } + Encodeb(code | GetRegCodeId(reg)); +} + +void ElfAssembler::JmpToLabel(int64 labelIdx, uint8 opCode1, uint8 opCode2, size_t offsetSize) +{ + Encodeb(opCode1); + if (opCode2 != 0) { + Encodeb(opCode2); + } + if (!CanEncodeLabel(labelIdx)) { + UpdateLabel(labelIdx); + AppendFixup(labelIdx, kRelative, {static_cast(codeBuff.size()), offsetSize}, fixups); + uint8 imm = 0; + Encodeb(imm, offsetSize); + } +} + +void ElfAssembler::OpCmovcc(Reg srcReg, Reg dstReg, uint8 opCode1, uint8 opCode2) +{ + if (HasOpndSizePrefix(srcReg) || HasOpndSizePrefix(dstReg)) { + Encodeb(0x66); + } + uint8 rex = GetRex(dstReg, srcReg); + if (rex != 0) { + Encodeb(rex); + } + Encodeb(opCode1); + Encodeb(opCode2); + uint8 modrm = GetModRM(dstReg, srcReg); + if (modrm != 0) { + Encodeb(modrm); + } +} + +void ElfAssembler::UpdateLabel(int64 labelIdx, LabelType type, uint32 relOffset) +{ + if (labelManager.count(labelIdx) == 0) { + Label *label = new Label(labelIdx, relOffset, type); + (void)labelManager.emplace(labelIdx, label); + } else { + Label *label = labelManager.at(labelIdx); + if (type != LabelType::kLNone) { + label->SetLabelType(type); + } + if (relOffset != 0xFFFFFFFFU) { + label->SetRelOffset(relOffset); + } + } +} + +bool ElfAssembler::CanEncodeLabel(int64 labelIdx) +{ + if (labelManager.count(labelIdx) != 0) { + Label *label = labelManager.at(labelIdx); + uint32 relOffset = label->GetRelOffset(); + LabelType labelType = label->GetLabelType(); + bool canEncode = (labelType == LabelType::kBBLabel || labelType == LabelType::kFunc || + labelType == LabelType::kDoubleLabel || labelType == LabelType::kFloatLabel); + if (canEncode && relOffset != 0xFFFFFFFFU) { + size_t offsetSize = 4; + uint64 offset = static_cast((relOffset - codeBuff.size()) - offsetSize); + Encodeb(offset, offsetSize); + return true; + } + } + return false; +} + +uint32 ElfAssembler::GetLabelSize(int64 labelIdx) const +{ + return static_cast(codeBuff.size()) - GetLabelRelOffset(labelIdx); +} + +uint32 ElfAssembler::GetLabelRelOffset(int64 labelIdx) const +{ + if (labelManager.count(labelIdx) != 0) { + Label *label = labelManager.at(labelIdx); + assert(label->GetRelOffset() != 0xFFFFFFFFU && "label's relOffset doesn't exist"); + return label->GetRelOffset(); + } + return 0; +} + +void ElfAssembler::AppendFixup(int64 labelIdx, FixupKind kind, const std::pair &offsetPair, + std::vector &tmpFixups, int64 disp) +{ + tmpFixups.push_back(new Fixup(labelIdx, kind, offsetPair, disp)); +} + +/* elf file */ +void ElfAssembler::InitElfHeader() +{ + header.e_ident[EI_MAG0] = ELFMAG0; + header.e_ident[EI_MAG1] = ELFMAG1; + header.e_ident[EI_MAG2] = ELFMAG2; + header.e_ident[EI_MAG3] = ELFMAG3; + header.e_ident[EI_CLASS] = ELFCLASS64; + header.e_ident[EI_DATA] = ELFDATA2LSB; + header.e_ident[EI_VERSION] = EV_CURRENT; + header.e_ident[EI_OSABI] = ELFOSABI_NONE; /* ELFOSABI_NONE represents UNIX System V */ + header.e_ident[EI_ABIVERSION] = 0; + (void)std::fill_n(&header.e_ident[EI_PAD], EI_NIDENT - EI_PAD, 0); + header.e_type = ET_REL; + header.e_machine = EM_X86_64; + header.e_version = EV_CURRENT; + header.e_entry = 0; + header.e_phoff = 0; + header.e_shoff = 0; /* later get */ + header.e_flags = 0; /* The Intel architecture defines no flags; so this member contains zero. */ + header.e_ehsize = sizeof(FileHeader); + header.e_phentsize = 0; + header.e_phnum = 0; + header.e_shentsize = sizeof(SectionHeader); + header.e_shnum = static_cast(sections.size()); + header.e_shstrndx = strTabSection->GetIndex(); +} + +void ElfAssembler::RegisterSection(Section §ion) +{ + sections.push_back(§ion); + section.SetIndex(static_cast(sections.size() - 1)); +} + +void ElfAssembler::LayoutSections() +{ + globalOffset = sizeof(FileHeader); + globalOffset = Alignment::Align(globalOffset, k8Bits); + + for (auto *section : sections) { + section->SetSectionHeaderNameIndex(static_cast(strTabSection->AddString(section->GetName()))); + } + + for (auto *section : sections) { + globalOffset = Alignment::Align(globalOffset, section->GetAlign()); + /* lay out section */ + UpdateSectionOffset(*section); + if (section->GetType() != SHT_NOBITS) { + section->GenerateData(); + } + UpdateGlobalOffset(*section); + } + + globalOffset = Alignment::Align(globalOffset, 16U); + header.e_shoff = globalOffset; + header.e_shnum = static_cast(sections.size()); +} + +void ElfAssembler::UpdateSectionOffset(Section §ion) +{ + if (section.GetType() != SHT_NOBITS) { + section.SetOffset(globalOffset); + } else { + section.SetOffset(0); + } +} + +void ElfAssembler::UpdateGlobalOffset(Section §ion) +{ + if (section.GetType() != SHT_NOBITS) { + globalOffset += section.GetSectionSize(); + } +} + +void ElfAssembler::SetFileOffset(uint64 offset) +{ + (void)outStream.seekp(offset); +} + +/* symIdx is the key used to get symbol's index in .symtab */ +void ElfAssembler::AddSymToSymTab(const Symbol &symbol, int64 symIdx) +{ + const int kGetHigh4Bits = 4; + if ((symbol.st_info >> kGetHigh4Bits) == STB_LOCAL) { + localSymTab.push_back(std::make_pair(symbol, symIdx)); + } else { + symTab.push_back(std::make_pair(symbol, symIdx)); + } +} + +void ElfAssembler::AppendRela(const Label &label, const std::pair &offsetPair, uint64 type, + Sxword addend) +{ + LabelType labelType = label.GetLabelType(); + int64 relOffset = static_cast(label.GetRelOffset()); + uint64 offset = static_cast(offsetPair.first); + int64 offsetSize = static_cast(offsetPair.second); + if (labelType == LabelType::kConst) { + int64 rodataSecSymIdx = ~rodataSection->GetIndex() + 1; + relaSection->AppendRela( + {offset, + static_cast((symbolTabSection->GetIdxInSymbols(rodataSecSymIdx) << kLeftShift32Bits) + + (type & 0xffffffff)), + relOffset}); + } else if (labelType == LabelType::kGlobal) { + addend -= offsetSize; + relaSection->AppendRela( + {offset, + static_cast((symbolTabSection->GetIdxInSymbols(label.GetlabelIdx()) << kLeftShift32Bits) + + (type & 0xffffffff)), + addend}); + } else if (labelType == LabelType::kStatic) { + addend += relOffset - offsetSize; + int64 dataSecSymIdx = ~dataSection->GetIndex() + 1; + relaSection->AppendRela( + {offset, + static_cast((symbolTabSection->GetIdxInSymbols(dataSecSymIdx) << kLeftShift32Bits) + + (type & 0xffffffff)), + addend}); + } else if (labelType == LabelType::kLocalUninitialized) { + addend = addend + relOffset - offsetSize; + int64 bssSecSymIdx = ~bssSection->GetIndex() + 1; + relaSection->AppendRela( + {offset, + static_cast((symbolTabSection->GetIdxInSymbols(bssSecSymIdx) << kLeftShift32Bits) + + (type & 0xffffffff)), + addend}); + } else if (labelType == LabelType::kGlobalUninitialized) { + addend = addend - offsetSize; + relaSection->AppendRela( + {offset, + static_cast((symbolTabSection->GetIdxInSymbols(label.GetlabelIdx()) << kLeftShift32Bits) + + (type & 0xffffffff)), + addend}); + } else if (labelType == LabelType::kJmpLabel) { + type = R_X86_64_32; + addend = relOffset; + int64 textSecSymIdx = ~textSection->GetIndex() + 1; + relaSection->AppendRela( + {offset, + static_cast((symbolTabSection->GetIdxInSymbols(textSecSymIdx) << kLeftShift32Bits) + + (type & 0xffffffff)), + addend}); + } else if (labelType == LabelType::kBBLabel) { + addend = relOffset; + int64 textSecSymIdx = ~textSection->GetIndex() + 1; + relaSection->AppendRela( + {offset, + static_cast((symbolTabSection->GetIdxInSymbols(textSecSymIdx) << kLeftShift32Bits) + + (type & 0xffffffff)), + addend}); + } else if (labelType == LabelType::kFunc || + (label.GetRelOffset() == 0xFFFFFFFFU && labelType == LabelType::kLNone)) { + int64 labelIdx = label.GetlabelIdx(); + if (!symbolTabSection->ExistSymInSymbols(labelIdx)) { + symbolTabSection->AppendSymbol({static_cast(strTabSection->AddString(GetNameFromSymMap(labelIdx))), + static_cast((STB_GLOBAL << kLeftShift4Bits) + (STT_NOTYPE & 0xf)), 0, + 0, 0, 0}); + symbolTabSection->AppendIdxInSymbols(labelIdx); + } + relaSection->AppendRela({offsetPair.first, + static_cast((symbolTabSection->GetIdxInSymbols(labelIdx) << kLeftShift32Bits) + + (type & 0xffffffff)), + addend}); + } else { + assert(false && "unsupported label type in func AddRela"); + } +} + +uint64 ElfAssembler::GetRelaType(FixupKind kind) const +{ + switch (kind) { + case kRelative: + return R_X86_64_PC32; + case kRelative64: + return R_X86_64_PC64; + case kAbsolute: + return R_X86_64_32; + case kAbsolute64: + return R_X86_64_64; + case kPLT: + return R_X86_64_PLT32; + case kFNone: + return R_X86_64_NONE; + } +} + +void ElfAssembler::HandleTextSectionFixup() +{ + if (!fixups.empty()) { + relaSection = + new RelaSection(".rela.text", SHT_RELA, SHF_INFO_LINK, textSection->GetIndex(), k8Bits, *symbolTabSection); + RegisterSection(*relaSection); + } + + for (auto fixup : fixups) { + int64 labelIdx = fixup->GetlabelIdx(); + if (labelManager.count(labelIdx) == 0) { + continue; + } + + const std::pair &offsetPair = fixup->GetOffset(); + Label *label = labelManager.at(labelIdx); + uint32 relOffset = label->GetRelOffset(); + LabelType labelType = label->GetLabelType(); + + FixupKind fixupKind = fixup->GetFixupKind(); + if ((fixupKind == kRelative || fixupKind == kRelative64) && + (labelType == LabelType::kBBLabel || labelType == LabelType::kFunc)) { + FixupEncode(offsetPair.first, relOffset, offsetPair.second); + fixup->SetFixupKind(kFNone); + } + + if (relOffset != 0xFFFFFFFFU && fixupKind == kPLT) { + FixupEncode(offsetPair.first, relOffset, offsetPair.second); + fixup->SetFixupKind(kFNone); + } + + fixupKind = fixup->GetFixupKind(); + uint64 type = GetRelaType(fixupKind); + int64 addend = (fixupKind == kAbsolute || fixupKind == kAbsolute64) ? 0 : -0x4; + if (fixupKind != kFNone) { + addend = labelType == LabelType::kGlobalUninitialized || labelType == LabelType::kLocalUninitialized || + labelType == LabelType::kGlobal || labelType == LabelType::kStatic + ? fixup->GetDisp() + : addend; + AppendRela(*label, offsetPair, type, addend); + } + } + textSection->AppendData(codeBuff.data(), codeBuff.size()); +} + +void ElfAssembler::HandleDataSectionFixup() +{ + if (!dataFixups.empty()) { + relaDataSection = + new RelaSection(".rela.data", SHT_RELA, SHF_INFO_LINK, dataSection->GetIndex(), k8Bits, *symbolTabSection); + RegisterSection(*relaDataSection); + } + for (auto fixup : dataFixups) { + int64 labelIdx = fixup->GetlabelIdx(); + std::pair offset = fixup->GetOffset(); + const uint32 relocType = R_X86_64_64; + if (labelManager.count(labelIdx) == 0) { + continue; + } + Label *label = labelManager.at(labelIdx); + LabelType labelType = label->GetLabelType(); + int64 addend = 0; + int64 relOffset = static_cast(label->GetRelOffset()); + if (labelType == LabelType::kGlobalUninitialized) { + addend = fixup->GetDisp(); + uint64 pos = symbolTabSection->GetIdxInSymbols(labelIdx); + relaDataSection->AppendRela( + {offset.first, static_cast((pos << kLeftShift32Bits) + (relocType & 0xffffffff)), addend}); + } else if (labelType == LabelType::kLocalUninitialized) { + addend = fixup->GetDisp(); + int64 bssSecSymIdx = ~bssSection->GetIndex() + 1; + relaDataSection->AppendRela( + {offset.first, + static_cast((symbolTabSection->GetIdxInSymbols(bssSecSymIdx) << kLeftShift32Bits) + + (relocType & 0xffffffff)), + addend}); + } else if (labelType == LabelType::kFunc) { + uint64 pos = symbolTabSection->GetIdxInSymbols(labelIdx); + relaDataSection->AppendRela( + {offset.first, static_cast((pos << kLeftShift32Bits) + (relocType & 0xffffffff)), addend}); + } else if (labelType == LabelType::kStrLabel || labelType == LabelType::kGlobal || + labelType == LabelType::kStatic) { + uint64 pos = symbolTabSection->GetIdxInSymbols(~dataSection->GetIndex() + 1); + addend = (labelType == LabelType::kGlobal || labelType == LabelType::kStatic) ? fixup->GetDisp() + relOffset + : relOffset; + relaDataSection->AppendRela( + {offset.first, static_cast((pos << kLeftShift32Bits) + (relocType & 0xffffffff)), addend}); + } else { + addend = relOffset; + int64 textSecSymIdx = ~textSection->GetIndex() + 1; + relaDataSection->AppendRela( + {offset.first, + static_cast((symbolTabSection->GetIdxInSymbols(textSecSymIdx) << kLeftShift32Bits) + + (relocType & 0xffffffff)), + addend}); + } + } +} + +void ElfAssembler::HandleRodataSectionFixup() +{ + if (!rodataFixups.empty()) { + relaRodataSection = new RelaSection(".rela.rodata", SHT_RELA, SHF_INFO_LINK, rodataSection->GetIndex(), k8Bits, + *symbolTabSection); + RegisterSection(*relaRodataSection); + } + for (auto fixup : rodataFixups) { + int64 labelIdx = fixup->GetlabelIdx(); + std::pair offset = fixup->GetOffset(); + const uint32 relocType = R_X86_64_64; + if (labelManager.count(labelIdx) == 0) { + continue; + } + Label *label = labelManager.at(labelIdx); + LabelType labelType = label->GetLabelType(); + int64 addend = 0; + int64 relOffset = static_cast(label->GetRelOffset()); + if (labelType == LabelType::kGlobalUninitialized || labelType == LabelType::kLocalUninitialized) { + addend = relOffset; + uint64 pos = symbolTabSection->GetIdxInSymbols(~textSection->GetIndex() + 1); + relaRodataSection->AppendRela( + {offset.first, static_cast((pos << kLeftShift32Bits) + (relocType & 0xffffffff)), addend}); + } + } +} + +void ElfAssembler::WriteElfFile() +{ + /* Init elf file header */ + InitElfHeader(); + + LayoutSections(); + + /* write header */ + Emit(&header, sizeof(header)); + + /* write sections */ + for (auto *section : sections) { + if (section->GetType() == SHT_NOBITS) { + continue; + } + SetFileOffset(section->GetOffset()); + section->WriteSection(outStream); + if (section == textSection) { + const auto &emitMemoryManager = maplebe::CGOptions::GetInstance().GetEmitMemoryManager(); + uint8 *memSpace = emitMemoryManager.allocateDataSection( + emitMemoryManager.codeSpace, section->GetSectionSize(), section->GetAlign(), section->GetName()); + memcpy_s(memSpace, section->GetSectionSize(), textSection->GetData().data(), textSection->GetDataSize()); + } + } + + /* write section table */ + SetFileOffset(header.e_shoff); + for (auto *section : sections) { + Emit(§ion->GetSectionHeader(), sizeof(section->GetSectionHeader())); + } +} + +/* Append the symbol of non-empty and necessary section to symbol table section. */ +void ElfAssembler::AppendSecSymsToSymTabSec() +{ + for (Section *section : sections) { + if (section->GetType() != SHT_PROGBITS && section->GetType() != SHT_NOBITS) { + continue; + } + DataSection *dataSec = static_cast(section); + if (section->GetFlags() == (SHF_ALLOC | SHF_EXECINSTR) || section->GetSectionSize() != 0 || + (dataSec != nullptr && dataSec->GetDataSize() != 0)) { + auto nameIndex = strTabSection->AddString(section->GetName()); + symbolTabSection->AppendSymbol({static_cast(nameIndex), + static_cast((STB_LOCAL << kLeftShift4Bits) + (STT_SECTION & 0xf)), 0, + section->GetIndex(), 0, 0}); + /* Indexed by the inverse of the section index. */ + int64 secSymIdx = ~section->GetIndex() + 1; + symbolTabSection->AppendIdxInSymbols(secSymIdx); + } + } +} + +void ElfAssembler::AppendSymsToSymTabSec() +{ + /* emit local symbol */ + for (auto elem : localSymTab) { + Symbol symbol = elem.first; + int64 symIdx = elem.second; + symbolTabSection->AppendSymbol(symbol); + symbolTabSection->AppendIdxInSymbols(symIdx); + } + + /* Append section symbol that may be used in relocation item, section is local. */ + AppendSecSymsToSymTabSec(); + + /* set .symtab's info : index of the first non-local symbol */ + symbolTabSection->SetInfo(symbolTabSection->GetSymbolsSize()); + + /* emit global and other symbol */ + for (auto elem : symTab) { + const Symbol &symbol = elem.first; + int64 symIdx = elem.second; + symbolTabSection->AppendSymbol(symbol); + symbolTabSection->AppendIdxInSymbols(symIdx); + } +} + +/* emit debug info */ +void ElfAssembler::EmitDIDebugInfoSectionHeader(uint64 debugInfoLength) +{ + debugInfoSection = new DataSection(".debug_info", SHT_PROGBITS, 0, 1); + RegisterSection(*debugInfoSection); + /* length of .debug_info section, 4 bytes */ + size_t debugInfoLenSize = 4; + debugInfoSection->AppendData(static_cast(debugInfoLength), debugInfoLenSize); + size_t dwarfVersionSize = 2; + /* DWARF version, 2 bytes */ + debugInfoSection->AppendData(static_cast(kDwarfVersion), dwarfVersionSize); + /* debug_abbrev_offset. 4 bytes for dwarf32, 8 bytes for dwarf64 */ + int64 debugAbbrevOffset = 0; + size_t debugAbbrevOffsetSize = 4; + /* If labelSymIdx equals LLONG_MAX, there is not a real label bound to the fixup. */ + AppendFixup(LLONG_MAX, kAbsolute, {debugInfoSection->GetDataSize(), debugAbbrevOffsetSize}, debugInfoFixups); + debugInfoSection->AppendData(debugAbbrevOffset, debugAbbrevOffsetSize); + /* address size. 1 byte */ + size_t byteOfkSizeOfPTR = 1; + debugInfoSection->AppendData(kSizeOfPTR, byteOfkSizeOfPTR); +} + +void ElfAssembler::EmitDIDebugInfoSectionAbbrevId(bool verbose, uint32 abbrevId, const std::string &dieTagName, + uint32 offset, uint32 size) +{ + auto abbrevIdUleb128 = EncodeULEB128(abbrevId); + debugInfoSection->AppendData(&abbrevIdUleb128, abbrevIdUleb128.size()); +} + +/* EmitDIAttrValue */ +void ElfAssembler::EmitDwFormString(const std::string &name) +{ + debugInfoSection->AppendData(&name, name.size()); +} + +void ElfAssembler::EmitDwFormStrp(uint32 strLabelId, size_t strTableSize) +{ + int64 labelSymIdx = CalculateStrLabelSymIdx(static_cast(strLabelId), static_cast(strTableSize)); + UpdateLabel(labelSymIdx, LabelType::kDebugStrLabel); + int64 strLabelOffset = 0; + size_t strLabelOffsetSize = 4; + AppendFixup(labelSymIdx, kAbsolute, {debugInfoSection->GetDataSize(), strLabelOffsetSize}, debugInfoFixups); + debugInfoSection->AppendData(strLabelOffset, strLabelOffsetSize); +} + +void ElfAssembler::EmitDwFormData(int32 attrValue, uint8 sizeInByte) +{ + debugInfoSection->AppendData(attrValue, sizeInByte); +} + +void ElfAssembler::EmitDwFormData8() +{ + int64 addr = 0; + size_t addrSizeInByte = 8; + debugInfoSection->AppendData(addr, addrSizeInByte); +} + +void ElfAssembler::EmitDwFormData8(uint32 endLabelFuncPuIdx, uint32 startLabelFuncPuIdx, uint32 endLabelIdx, + uint32 startLabelIdx) +{ + int64 addr = 0; + size_t addrSizeInByte = 8; + debugInfoSection->AppendData(addr, addrSizeInByte); +} + +void ElfAssembler::EmitLabel(uint32 funcPuIdx, uint32 labIdx) +{ + int64 labSymIdx = CalculateLabelSymIdx(funcPuIdx, labIdx); + UpdateLabel(labIdx); + int64 addr = 0; + size_t addrSizeInByte = 8; + AppendFixup(labSymIdx, kAbsolute64, {debugInfoSection->GetDataSize(), addrSizeInByte}, debugInfoFixups); + debugInfoSection->AppendData(addr, addrSizeInByte); +} + +void ElfAssembler::EmitDwFormSecOffset() +{ + int64 lineLabelOffset = 0; + size_t lineLabelOffsetSize = 4; + /* If labelSymIdx equals - 2, there is not a real label bound to the fixup. */ + AppendFixup(LLONG_MAX - 2, kAbsolute, {debugInfoSection->GetDataSize(), lineLabelOffsetSize}, debugInfoFixups); + debugInfoSection->AppendData(lineLabelOffset, lineLabelOffsetSize); +} + +void ElfAssembler::EmitDwFormAddr(bool emitTextBegin) +{ + if (emitTextBegin) { + int64 addr = 0; + size_t addrSizeInByte = 8; + /* If labelSymIdx equals LLONG_MAX - 1, there is not a real label bound to the fixup. */ + AppendFixup(LLONG_MAX - 1, kAbsolute64, {debugInfoSection->GetDataSize(), addrSizeInByte}, debugInfoFixups); + debugInfoSection->AppendData(addr, addrSizeInByte); + } +} + +void ElfAssembler::EmitDwFormRef4(uint64 offsetOrValue, bool unknownType, bool emitOffset) +{ + size_t offsetOrValueSize = 4; + debugInfoSection->AppendData(static_cast(offsetOrValue), offsetOrValueSize); +} + +void ElfAssembler::EmitDwFormExprlocCfa(uint32 dwOp) +{ + debugInfoSection->AppendData(1, 1); + debugInfoSection->AppendData(static_cast(dwOp), 1); +} + +void ElfAssembler::EmitDwFormExprlocAddr(uint32 dwOp, const std::string &addrStr) +{ + debugInfoSection->AppendData(static_cast(k9ByteSize), 1); + debugInfoSection->AppendData(static_cast(dwOp), 1); + size_t addStrSize = 8; + debugInfoSection->AppendData(&addrStr, addStrSize); +} + +void ElfAssembler::EmitDwFormExprlocFbreg(uint32 dwOp, int fboffset, size_t sleb128Size) +{ + auto sleb128SizeEncode = EncodeSLEB128(1 + static_cast(sleb128Size)); + debugInfoSection->AppendData(&sleb128SizeEncode, sleb128SizeEncode.size()); + debugInfoSection->AppendData(static_cast(dwOp), 1); + auto fboffsetSleb128 = EncodeSLEB128(fboffset); + debugInfoSection->AppendData(&fboffsetSleb128, fboffsetSleb128.size()); +} + +void ElfAssembler::EmitDwFormExprlocBregn(uint32 dwOp, const std::string &dwOpName) +{ + debugInfoSection->AppendData(static_cast(k2Bytes), 1); + debugInfoSection->AppendData(static_cast(dwOp), 1); + debugInfoSection->AppendData(&dwOpName, dwOpName.size()); + int64 offset = 0; + debugInfoSection->AppendData(offset, 1); +} + +void ElfAssembler::EmitDwFormExprloc(uintptr elp) +{ + auto elpUleb128 = EncodeULEB128(elp); + debugInfoSection->AppendData(&elpUleb128, elpUleb128.size()); +} + +void ElfAssembler::EmitDIDebugAbbrevDiae(bool verbose, uint32 abbrevId, uint32 tag, const std::string &dwTagName, + bool withChildren) +{ + debugAbbrevSection = new DataSection(".debug_abbrev", SHT_PROGBITS, 0, 1); + RegisterSection(*debugAbbrevSection); + /* Abbrev Entry ID */ + auto abbrevIdUleb128 = EncodeULEB128(abbrevId); + debugAbbrevSection->AppendData(&abbrevIdUleb128, abbrevIdUleb128.size()); + /* TAG */ + auto tagUleb128 = EncodeULEB128(tag); + debugAbbrevSection->AppendData(&tagUleb128, tagUleb128.size()); + /* children */ + auto childrenValue = withChildren ? 1 : 0; + debugAbbrevSection->AppendData(childrenValue, 1); +} + +void ElfAssembler::EmitDIDebugAbbrevDiaePairItem(bool verbose, uint32 aplAt, uint32 aplFrom, + const std::string &dwAtName, const std::string &dwFromName) +{ + /* odd entry -- DW_AT_*, even entry -- DW_FORM_* */ + auto aplAtUleb128 = EncodeULEB128(aplAt); + debugAbbrevSection->AppendData(&aplAtUleb128, aplAtUleb128.size()); + auto aplFromUleb128 = EncodeULEB128(aplFrom); + debugAbbrevSection->AppendData(&aplFromUleb128, aplFromUleb128.size()); +} + +void ElfAssembler::EmitDIDebugSectionEnd(SectionKind secKind) +{ + int64 value = 0; + size_t valueSizeInByte = 1; + switch (secKind) { + case kSDebugInfo: + debugInfoSection->AppendData(value, valueSizeInByte); + break; + case kSDebugAbbrev: + debugAbbrevSection->AppendData(value, valueSizeInByte); + break; + case kSBss: + case kSComm: + case kSData: + case kSRodata: + case kSTbss: + case kSTdata: + case kSText: + case kSDebugStr: + default: + assert(false && "unsupport SectionKind in EmitDIDebugSectionEnd"); + break; + } +} + +void ElfAssembler::EmitDIDebugStrSection(const std::vector &strps, const std::vector &debugStrs, + uint64 size, size_t strTableSize) +{ + debugStrSection = new DataSection(".debug_str", SHT_PROGBITS, SHF_MASKPROC, 1); + RegisterSection(*debugStrSection); + for (int i = 0; i < static_cast(debugStrs.size()); i++) { + int64 strLabSymIdx = CalculateStrLabelSymIdx(size, strps[i], strTableSize); + UpdateLabel(strLabSymIdx, LabelType::kDebugStrLabel, debugStrSection->GetDataSize()); + debugStrSection->AppendData(&debugStrs[i], debugStrs[i].size()); + EmitDIDebugSectionEnd(kSDebugStr); + } +} + +void ElfAssembler::HandleDebugInfoSectionFixup() +{ + if (!debugInfoFixups.empty()) { + relaDebugInfoSection = new RelaSection(".rela.debug_info", SHT_RELA, SHF_INFO_LINK, + debugInfoSection->GetIndex(), k8Bits, *symbolTabSection); + RegisterSection(*relaDebugInfoSection); + } + for (auto fixup : debugInfoFixups) { + int64 labelIdx = fixup->GetlabelIdx(); + const std::pair &offsetPair = fixup->GetOffset(); + FixupKind fixupKind = fixup->GetFixupKind(); + uint64 relocType = GetRelaType(fixupKind); + int64 addend = fixup->GetDisp(); + int64 textSecSymIdx = ~textSection->GetIndex() + 1; + int64 debugLineSecSymIdx = ~debugLineSection->GetIndex() + 1; + int64 abbrevSecSymIdx = ~debugAbbrevSection->GetIndex() + 1; + uint64 pos = labelIdx == LLONG_MAX + ? symbolTabSection->GetIdxInSymbols(debugLineSecSymIdx) + : (labelIdx == LLONG_MAX - 1 ? symbolTabSection->GetIdxInSymbols(textSecSymIdx) + : symbolTabSection->GetIdxInSymbols(abbrevSecSymIdx)); + if (!labelManager.count(labelIdx)) { + relaDebugInfoSection->AppendRela( + {offsetPair.first, static_cast((pos << kLeftShift32Bits) + (relocType & 0xffffffff)), addend}); + continue; + } + Label *label = labelManager.at(labelIdx); + LabelType labelType = label->GetLabelType(); + addend = label->GetRelOffset(); + if (labelType == LabelType::kBBLabel) { + pos = symbolTabSection->GetIdxInSymbols(textSecSymIdx); + } else if (labelType == LabelType::kDebugStrLabel) { + pos = symbolTabSection->GetIdxInSymbols(~debugStrSection->GetIndex() + 1); + } else { + assert(false && "unsupport label type in HandleDebugInfoSectionFixup!"); + } + relaDebugInfoSection->AppendRela( + {offsetPair.first, static_cast((pos << kLeftShift32Bits) + (relocType & 0xffffffff)), addend}); + } +} + +/* start of X64 instructions */ +/* mov */ +void ElfAssembler::Mov(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpRR(srcReg, destReg, 0x88, 0); +} + +void ElfAssembler::Mov(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + bool isSymbol = immOpnd.second; + uint64 imm = static_cast(immOpnd.first); /* When isSymbol is true, this is index. */ + uint8 regSize = GetRegSize(reg); + uint8 regId = GetRegCodeId(reg); + uint8 code = 0xB0 | ((regSize == k8Bits ? 0 : 1) << kLeftShift3Bits); + if (HasOpndSizePrefix(reg)) { + Encodeb(0x66); + } + if (GetRex(reg) != 0) { + Encodeb(GetRex(reg)); + } + if (regSize == k64Bits && (isSymbol || Is32Bits(imm))) { + Encodeb(0xC7); + code = 0xC0; + regSize = k32Bits; + } + size_t offsetSize = isSymbol ? k64Bits : regSize / k8Bits; + Encodeb(code | regId); + if (isSymbol) { + UpdateLabel(immOpnd.first); + AppendFixup(immOpnd.first, kAbsolute64, {static_cast(codeBuff.size()), offsetSize}, fixups); + imm = 0; + } + Encodeb(imm, offsetSize); +} + +void ElfAssembler::Mov(InsnSize insnSize, const Mem &mem, Reg reg) +{ + if (GetRegId(reg) == 0 && mem.memType == kOnlyDisp) { + MovRegAndDisp(reg, mem, 0xA0); + } else { + OpRM(reg, mem, 0x8A, 0); + } +} + +void ElfAssembler::Mov(InsnSize insnSize, Reg reg, const Mem &mem) +{ + if (GetRegId(reg) == 0 && mem.memType == kOnlyDisp) { + MovRegAndDisp(reg, mem, 0xA2); + } else { + OpRM(reg, mem, 0x88, 0); + } +} + +void ElfAssembler::Mov(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + bool isSymbol = immOpnd.second; + uint32 imm = static_cast(immOpnd.first); /* When isSymbol is true, this is index. */ + uint8 immBit = Is8Bits(imm) ? k8Bits : (Is16Bits(imm) ? k16Bits : k32Bits); + if (mem.size == k8Bits) { + immBit = k8Bits; + } + if (immBit == k16Bits && (mem.size == k64Bits || mem.size == k32Bits)) { + immBit = k32Bits; /* if 32/64bit mode, imm val can not use 16-bit. */ + } + immBit = isSymbol ? k64Bits : immBit; + size_t immSize = immBit / k8Bits; + OpMem(mem, 0xC6, 0, 0); + if (isSymbol) { + UpdateLabel(immOpnd.first); + AppendFixup(immOpnd.first, kAbsolute64, {static_cast(codeBuff.size()), immSize}, fixups); + imm = 0; + } + Encodeb(imm, immSize); +} + +/* floating point mov */ +void ElfAssembler::Mov(Reg srcReg, Reg destReg, bool isMovD) { + uint8 srcRegSize = GetRegSize(srcReg); + uint8 destRegSize = GetRegSize(destReg); + if (srcRegSize == k128Bits || destRegSize == k128Bits) { + Encodeb(0x66); + } + if (srcRegSize == k128Bits) { + OpRR(srcReg, destReg, 0x0F, 0x7E); + } else if (destRegSize == k128Bits) { + OpRR(destReg, srcReg, 0x0F, 0x6E); + } +} + +void ElfAssembler::MovF(const Mem &mem, Reg reg, bool isSingle) { + if (isSingle) { + Encodeb(0xF3); + } else { + Encodeb(0xF2); + } + OpRM(reg, mem, 0x0F, 0x10); +} + +void ElfAssembler::MovF(Reg reg, const Mem &mem, bool isSingle) { + if (isSingle) { + Encodeb(0xF3); + } else { + Encodeb(0xF2); + } + OpRM(reg, mem, 0x0F, 0x11); +} + +/* movabs */ +void ElfAssembler::Movabs(const ImmOpnd &immOpnd, Reg reg) +{ + bool isSymbol = immOpnd.second; + uint64 imm = static_cast(immOpnd.first); /* When isSymbol is true, this is index. */ + if (GetRex(reg) != 0) { + Encodeb(GetRex(reg)); + } + Encodeb(0xB8 | GetRegCodeId(reg)); + size_t offsetSize = 8; + if (isSymbol) { + UpdateLabel(immOpnd.first); + AppendFixup(immOpnd.first, kAbsolute64, {static_cast(codeBuff.size()), offsetSize}, fixups); + imm = 0; + } + Encodeb(imm, offsetSize); +} + +void ElfAssembler::Movabs(int64 symIdx, Reg reg) +{ + if (GetRex(reg) != 0) { + Encodeb(GetRex(reg)); + } + Encodeb(0xB8 | GetRegCodeId(reg)); + size_t offsetSize = 8; + size_t offset = codeBuff.size() - offsetSize; + UpdateLabel(symIdx); + AppendFixup(symIdx, kAbsolute64, {offset, offsetSize}, fixups); + uint8 imm = 0; + Encodeb(imm, offsetSize); +} + +/* push */ +void ElfAssembler::Push(InsnSize insnSize, Reg reg) +{ + OpPushPop(reg, 0x50); +} + +/* pop */ +void ElfAssembler::Pop(InsnSize insnSize, Reg reg) +{ + OpPushPop(reg, 0x58); +} + +/* lea */ +void ElfAssembler::Lea(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x8C); +} + +/* movzx */ +void ElfAssembler::MovZx(InsnSize sSize, InsnSize dSize, Reg srcReg, Reg destReg) +{ + OpRR(srcReg, destReg, 0x0F, 0xB6 | (GetRegSize(srcReg) == k8Bits ? 0 : 1), true); +} + +void ElfAssembler::MovZx(InsnSize sSize, InsnSize dSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x0F, 0xB6 | (mem.size == k8Bits ? 0 : 1), true); +} + +/* movsx */ +void ElfAssembler::MovSx(InsnSize sSize, InsnSize dSize, Reg srcReg, Reg destReg) +{ + uint8 code1 = 0x0F; + uint8 code2 = 0xBE | (GetRegSize(srcReg) == k8Bits ? 0 : 1); + if (GetRegSize(srcReg) == k32Bits && GetRegSize(destReg) == k64Bits) { + code1 = 0x63; + code2 = 0; + } + OpRR(srcReg, destReg, code1, code2, true); +} + +void ElfAssembler::MovSx(InsnSize sSize, InsnSize dSize, const Mem &mem, Reg reg) +{ + uint8 code1 = 0x0F; + uint8 code2 = 0xBE | (mem.size == k8Bits ? 0 : 1); + if (mem.size == k32Bits && GetRegSize(reg) == k64Bits) { + code1 = 0x63; + code2 = 0; + } + OpRM(reg, mem, code1, code2, true); +} + +/* add */ +void ElfAssembler::Add(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpRR(srcReg, destReg, 0x00); +} + +void ElfAssembler::Add(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + OpImmAndReg(immOpnd, reg, 0x00, 0); +} + +void ElfAssembler::Add(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x02); +} + +void ElfAssembler::Add(InsnSize insnSize, Reg reg, const Mem &mem) +{ + OpRM(reg, mem, 0x00); +} + +void ElfAssembler::Add(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + OpImmAndMem(immOpnd, mem, 0); +} + +/* add floating point */ +void ElfAssembler::Add(Reg srcReg, Reg destReg, bool isSingle) { + if (isSingle) { + Encodeb(0xF3); + } else{ + Encodeb(0xF2); + } + OpRR(destReg, srcReg, 0x0F, 0x58); +} + +void ElfAssembler::Add(const Mem &mem, Reg reg, bool isSingle) { + if (isSingle) { + Encodeb(0xF3); + } else{ + Encodeb(0xF2); + } + OpRM(reg, mem, 0x0F, 0x58); +} + +/* sub */ +void ElfAssembler::Sub(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpRR(srcReg, destReg, 0x28); +} + +void ElfAssembler::Sub(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + OpImmAndReg(immOpnd, reg, 0x28, kSubModReg); +} + +void ElfAssembler::Sub(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x2A); +} + +void ElfAssembler::Sub(InsnSize insnSize, Reg reg, const Mem &mem) +{ + OpRM(reg, mem, 0x28); +} + +void ElfAssembler::Sub(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + OpImmAndMem(immOpnd, mem, kSubModReg); +} + +/* sub floating point */ +void ElfAssembler::Sub(Reg srcReg, Reg destReg, bool isSingle) { + if (isSingle) { + Encodeb(0xF3); + } else{ + Encodeb(0xF2); + } + OpRR(destReg, srcReg, 0x0F, 0x5c); +} + +void ElfAssembler::Sub(const Mem &mem, Reg reg, bool isSingle) { + if (isSingle) { + Encodeb(0xF3); + } else{ + Encodeb(0xF2); + } + OpRM(reg, mem, 0x0F, 0x5c); +} + + +/* and */ +void ElfAssembler::And(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpRR(srcReg, destReg, 0x20); +} + +void ElfAssembler::And(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x21); +} + +void ElfAssembler::And(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + OpImmAndReg(immOpnd, reg, 0x20, kAndModReg); +} + +void ElfAssembler::And(InsnSize insnSize, Reg reg, const Mem &mem) +{ + OpRM(reg, mem, 0x20); +} + +void ElfAssembler::And(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + OpImmAndMem(immOpnd, mem, kAndModReg); +} + +/* or */ +void ElfAssembler::Or(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpRR(srcReg, destReg, 0x08); +} + +void ElfAssembler::Or(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x0A); +} + +void ElfAssembler::Or(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + OpImmAndReg(immOpnd, reg, 0x08, kOrModReg); +} + +void ElfAssembler::Or(InsnSize insnSize, Reg reg, const Mem &mem) +{ + OpRM(reg, mem, 0x08); +} + +void ElfAssembler::Or(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + OpImmAndMem(immOpnd, mem, kOrModReg); +} + +/* xor */ +void ElfAssembler::Xor(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpRR(srcReg, destReg, 0x30); +} + +void ElfAssembler::Xor(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + OpImmAndReg(immOpnd, reg, 0x30, kXorModReg); +} + +void ElfAssembler::Xor(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x32); +} + +void ElfAssembler::Xor(InsnSize insnSize, Reg reg, const Mem &mem) +{ + OpRM(reg, mem, 0x30); +} + +void ElfAssembler::Xor(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + OpImmAndMem(immOpnd, mem, kXorModReg); +} + +/* not */ +void ElfAssembler::Not(InsnSize insnSize, Reg reg) +{ + OpReg(reg, 0xF6, 0, kNotModReg); +} + +void ElfAssembler::Not(InsnSize insnSize, const Mem &mem) +{ + OpMem(mem, 0xF6, 0, kNotModReg); +} + +/* neg */ +void ElfAssembler::Neg(InsnSize insnSize, Reg reg) +{ + OpReg(reg, 0xF6, 0, kNegModReg); +} + +void ElfAssembler::Neg(InsnSize insnSize, const Mem &mem) +{ + OpMem(mem, 0xF6, 0, kNegModReg); +} + +/* div & cwd, cdq, cqo */ +void ElfAssembler::Idiv(InsnSize insnSize, Reg reg) +{ + OpReg(reg, 0xF6, 0, kIdivModReg); +} + +void ElfAssembler::Idiv(InsnSize insnSize, const Mem &mem) +{ + OpMem(mem, 0xF6, 0, kIdivModReg); +} + +void ElfAssembler::Div(InsnSize insnSize, Reg reg) +{ + OpReg(reg, 0xF6, 0, kDivModReg); +} + +void ElfAssembler::Div(InsnSize insnSize, const Mem &mem) +{ + OpMem(mem, 0xF6, 0, kDivModReg); +} + +void ElfAssembler::Cwd() +{ + Encodeb(0x66); + Encodeb(0x99); +} + +void ElfAssembler::Cdq() +{ + Encodeb(0x99); +} + +void ElfAssembler::Cqo() +{ + Encodeb(0x48); + Encodeb(0x99); +} + +/* shl */ +void ElfAssembler::Shl(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpReg(destReg, 0xD2, 0, kShlModReg); +} + +void ElfAssembler::Shl(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + OpReg(reg, 0xC0, 0, kShlModReg); + Encodeb(static_cast(immOpnd.first)); +} + +void ElfAssembler::Shl(InsnSize insnSize, Reg reg, const Mem &mem) +{ + OpMem(mem, 0xD2, 0, kShlModReg); +} + +void ElfAssembler::Shl(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + OpMem(mem, 0xC0, 0, kShlModReg); + Encodeb(static_cast(immOpnd.first)); +} + +/* sar */ +void ElfAssembler::Sar(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpReg(destReg, 0xD2, 0, kSarModReg); +} + +void ElfAssembler::Sar(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + OpReg(reg, 0xC0, 0, kSarModReg); + Encodeb(static_cast(immOpnd.first)); +} + +void ElfAssembler::Sar(InsnSize insnSize, Reg reg, const Mem &mem) +{ + OpMem(mem, 0xD2, 0, kSarModReg); +} + +void ElfAssembler::Sar(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + OpMem(mem, 0xC0, 0, kSarModReg); + Encodeb(static_cast(immOpnd.first)); +} + +/* shr */ +void ElfAssembler::Shr(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpReg(destReg, 0xD2, 0, kShrModReg); +} + +void ElfAssembler::Shr(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + OpReg(reg, 0xC0, 0, kShrModReg); + Encodeb(static_cast(immOpnd.first)); +} + +void ElfAssembler::Shr(InsnSize insnSize, Reg reg, const Mem &mem) +{ + OpMem(mem, 0xD2, 0, kShrModReg); +} + +void ElfAssembler::Shr(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + OpMem(mem, 0xC0, 0, kShrModReg); + Encodeb(static_cast(immOpnd.first)); +} + +/* jmp */ +void ElfAssembler::Jmp(Reg reg) +{ + OpReg(reg, 0xFF, 0, kJmpModReg); +} + +void ElfAssembler::Jmp(const Mem &mem) +{ + OpMem(mem, 0xFF, 0, kJmpModReg); +} + +void ElfAssembler::Jmp(int64 symIdx) +{ + JmpToLabel(symIdx, 0xE9); +} + +/* jump condition */ +void ElfAssembler::Je(int64 symIdx) +{ + JmpToLabel(symIdx, 0x0F, 0x84); +} + +void ElfAssembler::Ja(int64 symIdx) +{ + JmpToLabel(symIdx, 0x0F, 0x87); +} + +void ElfAssembler::Jae(int64 symIdx) +{ + JmpToLabel(symIdx, 0x0F, 0x83); +} + +void ElfAssembler::Jne(int64 symIdx) +{ + JmpToLabel(symIdx, 0x0F, 0x85); +} + +void ElfAssembler::Jb(int64 symIdx) +{ + JmpToLabel(symIdx, 0x0F, 0x82); +} + +void ElfAssembler::Jbe(int64 symIdx) +{ + JmpToLabel(symIdx, 0x0F, 0x86); +} + +void ElfAssembler::Jg(int64 symIdx) +{ + JmpToLabel(symIdx, 0x0F, 0x8F); +} + +void ElfAssembler::Jge(int64 symIdx) +{ + JmpToLabel(symIdx, 0x0F, 0x8D); +} + +void ElfAssembler::Jl(int64 symIdx) +{ + JmpToLabel(symIdx, 0x0F, 0x8C); +} + +void ElfAssembler::Jle(int64 symIdx) +{ + JmpToLabel(symIdx, 0x0F, 0x8E); +} + +/* cmp */ +void ElfAssembler::Cmp(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpRR(srcReg, destReg, 0x38); +} + +void ElfAssembler::Cmp(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x3A); +} + +void ElfAssembler::Cmp(InsnSize insnSize, Reg reg, const Mem &mem) +{ + OpRM(reg, mem, 0x38); +} + +void ElfAssembler::Cmp(InsnSize insnSize, const ImmOpnd &immOpnd, Reg reg) +{ + OpImmAndReg(immOpnd, reg, 0x38, kCmpModReg); +} + +void ElfAssembler::Cmp(InsnSize insnSize, const ImmOpnd &immOpnd, const Mem &mem) +{ + OpImmAndMem(immOpnd, mem, kCmpModReg); +} + +/* test */ +void ElfAssembler::Test(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpRR(srcReg, destReg, 0x84); +} + +/* setcc */ +void ElfAssembler::Setbe(Reg reg) +{ + OpReg(reg, 0x0F, 0x96, 0); +} + +void ElfAssembler::Setbe(const Mem &mem) +{ + OpMem(mem, 0x0F, 0x96, 0); +} + +void ElfAssembler::Setle(Reg reg) +{ + OpReg(reg, 0x0F, 0x9E, 0); +} + +void ElfAssembler::Setle(const Mem &mem) +{ + OpMem(mem, 0x0F, 0x9E, 0); +} + +void ElfAssembler::Setae(Reg reg) +{ + OpReg(reg, 0x0F, 0x93, 0); +} + +void ElfAssembler::Setae(const Mem &mem) +{ + OpMem(mem, 0x0F, 0x93, 0); +} + +void ElfAssembler::Setge(Reg reg) +{ + OpReg(reg, 0x0F, 0x9D, 0); +} +void ElfAssembler::Setge(const Mem &mem) +{ + OpMem(mem, 0x0F, 0x9D, 0); +} + +void ElfAssembler::Setne(Reg reg) +{ + OpReg(reg, 0x0F, 0x95, 0); +} + +void ElfAssembler::Setne(const Mem &mem) +{ + OpMem(mem, 0x0F, 0x95, 0); +} + +void ElfAssembler::Setb(Reg reg) +{ + OpReg(reg, 0x0F, 0x92, 0); +} + +void ElfAssembler::Setb(const Mem &mem) +{ + OpMem(mem, 0x0F, 0x92, 0); +} + +void ElfAssembler::Setl(Reg reg) +{ + OpReg(reg, 0x0F, 0x9C, 0); +} + +void ElfAssembler::Setl(const Mem &mem) +{ + OpMem(mem, 0x0F, 0x9C, 0); +} + +void ElfAssembler::Seta(Reg reg) +{ + OpReg(reg, 0x0F, 0x97, 0); +} + +void ElfAssembler::Seta(const Mem &mem) +{ + OpMem(mem, 0x0F, 0x97, 0); +} + +void ElfAssembler::Setg(Reg reg) +{ + OpReg(reg, 0x0F, 0x9F, 0); +} + +void ElfAssembler::Setg(const Mem &mem) +{ + OpMem(mem, 0x0F, 0x9F, 0); +} + +void ElfAssembler::Sete(Reg reg) +{ + OpReg(reg, 0x0F, 0x94, 0); +} + +void ElfAssembler::Sete(const Mem &mem) +{ + OpMem(mem, 0x0F, 0x94, 0); +} + +void ElfAssembler::Seto(Reg reg) +{ + OpReg(reg, 0x0F, 0x90, 0); +} + +void ElfAssembler::Seto(const Mem &mem) +{ + OpMem(mem, 0x0F, 0x90, 0); +} + +/* cmov */ +void ElfAssembler::Cmova(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpCmovcc(srcReg, destReg, 0x0F, 0x47); +} + +void ElfAssembler::Cmova(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x0E, 0x47); +} +void ElfAssembler::Cmovae(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpCmovcc(srcReg, destReg, 0x0F, 0x43); +} + +void ElfAssembler::Cmovae(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x0E, 0x43); +} + +void ElfAssembler::Cmovb(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpCmovcc(srcReg, destReg, 0x0F, 0x42); +} + +void ElfAssembler::Cmovb(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x0E, 0x42); +} + +void ElfAssembler::Cmovbe(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpCmovcc(srcReg, destReg, 0x0F, 0x46); +} + +void ElfAssembler::Cmovbe(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x0E, 0x46); +} + +void ElfAssembler::Cmove(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpCmovcc(srcReg, destReg, 0x0F, 0x44); +} + +void ElfAssembler::Cmove(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x0E, 0x44); +} + +void ElfAssembler::Cmovg(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpCmovcc(srcReg, destReg, 0x0F, 0x4F); +} + +void ElfAssembler::Cmovg(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x0E, 0x4F); +} + +void ElfAssembler::Cmovge(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpCmovcc(srcReg, destReg, 0x0F, 0x4D); +} + +void ElfAssembler::Cmovge(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x0E, 0x4D); +} + +void ElfAssembler::Cmovl(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpCmovcc(srcReg, destReg, 0x0F, 0x4C); +} + +void ElfAssembler::Cmovl(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x0E, 0x4C); +} + +void ElfAssembler::Cmovle(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpCmovcc(srcReg, destReg, 0x0F, 0x4E); +} + +void ElfAssembler::Cmovle(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x0E, 0x4E); +} + +void ElfAssembler::Cmovo(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpCmovcc(srcReg, destReg, 0x0F, 0x40); +} + +void ElfAssembler::Cmovne(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpCmovcc(srcReg, destReg, 0x0F, 0x45); +} + +void ElfAssembler::Cmovne(InsnSize insnSize, const Mem &mem, Reg reg) +{ + OpRM(reg, mem, 0x0E, 0x45); +} + +/* call */ +void ElfAssembler::Call(InsnSize insnSize, Reg reg) +{ + // Save to disignate memory + OpReg(reg, 0xFF, 0, kCallModReg); +} + +void ElfAssembler::Call(InsnSize insnSize, const Mem &mem) +{ + OpMem(mem, 0xFF, 0, kCallModReg); +} + +void ElfAssembler::Call(InsnSize insnSize, int64 symIdx) +{ + Encodeb(0xE8); + if (!CanEncodeLabel(symIdx)) { + size_t offsetSize = 4; + UpdateLabel(symIdx, LabelType::kFunc); + AppendFixup(symIdx, kPLT, {static_cast(codeBuff.size()), offsetSize}, fixups); + uint8 imm = 0; + Encodeb(imm, offsetSize); + } +} + +/* ret */ +void ElfAssembler::Ret() +{ + Encodeb(0xC3); +} + +/* leave */ +void ElfAssembler::Leave() +{ + Encodeb(0xC9); +} + +/* imul */ +void ElfAssembler::Imul(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + OpRR(destReg, srcReg, 0x0F, 0xAF); +} + +/* mul float */ +void ElfAssembler::Mul(Reg srcReg, Reg destReg, bool isSingle) { + if (isSingle) { + Encodeb(0xF3); + } else{ + Encodeb(0xF2); + } + OpRR(destReg, srcReg, 0x0F, 0x59); +} + +void ElfAssembler::Mul(const Mem &mem, Reg reg, bool isSingle) { + if (isSingle) { + Encodeb(0xF3); + } else{ + Encodeb(0xF2); + } + OpRM(reg, mem, 0x0F, 0x59); +} + +/* nop */ +void ElfAssembler::Nop(InsnSize insnSize, const Mem &mem) +{ + OpMem(mem, 0x0E, 0x1F, 0); +} + +void ElfAssembler::Nop() +{ + Encodeb(0x90); +} + +/* byte swap */ +void ElfAssembler::Bswap(InsnSize insnSize, Reg reg) +{ + uint8 rex = GetRex(reg); + if (rex != 0) { + Encodeb(rex); + } + Encodeb(0x0F); + Encodeb(0xC8 | GetRegCodeId(reg)); +} + +void ElfAssembler::Xchg(InsnSize insnSize, Reg srcReg, Reg destReg) +{ + /* if the reg is ax, eax or rax */ + if ((GetRegId(srcReg) == 0 || GetRegId(destReg) == 0) && GetRegSize(srcReg) != k8Bits) { + uint8 rex = GetRex(srcReg, destReg); + if (rex != 0) { + Encodeb(rex); + } else if (GetRegSize(srcReg) == k16Bits) { + Encodeb(0x66); + } + uint8 regCodeId = GetRegId(srcReg) == 0 ? GetRegCodeId(destReg) : GetRegCodeId(srcReg); + Encodeb(0x90 | regCodeId); + } else { + OpRR(srcReg, destReg, 0x86); + } +} + +/* floating point */ +void ElfAssembler::MovF(Reg srcReg, Reg destReg, bool isSingle) { + bool isXMM = GetRegSize(srcReg) == k128Bits || GetRegSize(destReg) == k128Bits; + if (isSingle) { + if (isXMM) { + Encodeb(0xF3); + } + OpRR(destReg, srcReg, 0x0F, 0x10); + } else { + if (isXMM) { + Encodeb(0xF2); + } + OpRR(destReg, srcReg, 0x0F, 0x10); + } +} + + /* floating point and */ +void ElfAssembler::And(Reg srcReg, Reg destReg, bool isSingle) { + if (isSingle) { + Encodeb(0x100); + } else{ + Encodeb(0x66); + } + OpRR(destReg, srcReg, 0x0F, 0x54); +} + +void ElfAssembler::And(const Mem &mem, Reg reg, bool isSingle) { + if (isSingle) { + Encodeb(0x100); + } else{ + Encodeb(0x66); + } + OpRM(reg, mem, 0x0F, 0x54); +} + +/* floating div */ +void ElfAssembler::Divsd(Reg srcReg, Reg destReg) { + Encodeb(0xF2); + OpRR(destReg, srcReg, 0x0F, 0x5E); +} + +void ElfAssembler::Divsd(const Mem &mem, Reg reg) { + Encodeb(0xF2); + OpRM(reg, mem, 0x0F, 0x5E); +} + +/* convert int2float */ +void ElfAssembler::Cvtsi2ss(InsnSize insnSize, Reg srcReg, Reg destReg) { + Encodeb(0xF3); + OpRR(destReg, srcReg, 0x0F, 0x2A); +} + +void ElfAssembler::Cvtsi2sd(InsnSize insnSize, Reg srcReg, Reg destReg) { + Encodeb(0xF2); + OpRR(destReg, srcReg, 0x0F, 0x2A); +} + +/*convert float2int */ +void ElfAssembler::Cvttsd2si(InsnSize insnSize, Reg srcReg, Reg destReg) { + Encodeb(0xF2); + OpRR(destReg, srcReg, 0x0F, 0x2C); +} + +void ElfAssembler::Cvttss2si(InsnSize insnSize, Reg srcReg, Reg destReg) { + Encodeb(0xF3); + OpRR(destReg, srcReg, 0x0F, 0x2C); +} + +/* convert float2float */ +void ElfAssembler::Cvtss2sd(Reg srcReg, Reg destReg) { + Encodeb(0xF3); + OpRR(destReg, srcReg, 0x0F, 0x5A); +} + +void ElfAssembler::Cvtsd2ss(Reg srcReg, Reg destReg) { + Encodeb(0xF2); + OpRR(destReg, srcReg, 0x0F, 0x5A); +} + +/* unordered compare */ +void ElfAssembler::Ucomisd(Reg srcReg, Reg destReg) { + Encodeb(0x66); + OpRR(destReg, srcReg, 0x0F, 0x2E); +} + +void ElfAssembler::Ucomiss(Reg srcReg, Reg destReg) { + Encodeb(0x100); + OpRR(destReg, srcReg, 0x0F, 0x2E); +} +/* end of X64 instructions */ + +/* process stackmap */ +void ElfAssembler::RecordStackmap(const std::vector &referenceMap, + const std::vector &deoptVreg2LocationInfo) +{ + const auto &emitMemoryManager = maplebe::CGOptions::GetInstance().GetEmitMemoryManager(); + if (emitMemoryManager.codeSpace == nullptr) { + return; + } + emitMemoryManager.pc2CallSiteInfoSaver(emitMemoryManager.codeSpace, codeBuff.size(), referenceMap); + emitMemoryManager.pc2DeoptInfoSaver(emitMemoryManager.codeSpace, codeBuff.size(), deoptVreg2LocationInfo); +} +} /* namespace assembler */ \ No newline at end of file diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_MPIsel.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_MPIsel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bb94edb9989a2271b71a3887b3416b9140b31ff2 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_MPIsel.cpp @@ -0,0 +1,1539 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_MPISel.h" +#include "x64_memlayout.h" +#include "x64_cgfunc.h" +#include "x64_isa_tbl.h" +#include "x64_cg.h" +#include "isel.h" + +namespace maplebe { +/* Field-ID 0 is assigned to the top level structure. (Field-ID also defaults to 0 if it is not a structure.) */ +MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId) const +{ + PrimType symType; + int32 fieldOffset = 0; + if (fieldId == 0) { + symType = symbol.GetType()->GetPrimType(); + } else { + MIRType *mirType = symbol.GetType(); + DEBUG_ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "non-structure"); + MIRStructType *structType = static_cast(mirType); + symType = structType->GetFieldType(fieldId)->GetPrimType(); + fieldOffset = static_cast(cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first); + } + uint32 opndSz = (symType == PTY_agg) ? k64BitSize : GetPrimTypeBitSize(symType); + return GetOrCreateMemOpndFromSymbol(symbol, opndSz, fieldOffset); +} +MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const +{ + MIRStorageClass storageClass = symbol.GetStorageClass(); + MemOperand *result = nullptr; + RegOperand *stackBaseReg = nullptr; + if ((storageClass == kScAuto) || (storageClass == kScFormal)) { + auto *symloc = static_cast(cgFunc->GetMemlayout()->GetSymAllocInfo(symbol.GetStIndex())); + DEBUG_ASSERT(symloc != nullptr, "sym loc should have been defined"); + stackBaseReg = static_cast(cgFunc)->GetBaseReg(*symloc); + int stOfst = cgFunc->GetBaseOffset(*symloc); + /* Create field symbols in aggregate structure */ + result = &GetCurFunc()->GetOpndBuilder()->CreateMem(opndSize); + result->SetBaseRegister(*stackBaseReg); + result->SetOffsetOperand(GetCurFunc()->GetOpndBuilder()->CreateImm(k64BitSize, stOfst + offset)); + CHECK_FATAL(result != nullptr, "NIY"); + return *result; + } + if ((storageClass == kScGlobal) || (storageClass == kScExtern) || (storageClass == kScPstatic) || + (storageClass == kScFstatic)) { + stackBaseReg = &GetCurFunc()->GetOpndBuilder()->CreatePReg(x64::RIP, k64BitSize, kRegTyInt); + result = &GetCurFunc()->GetOpndBuilder()->CreateMem(opndSize); + ImmOperand &stOfstOpnd = GetCurFunc()->GetOpndBuilder()->CreateImm(symbol, offset, 0); + result->SetBaseRegister(*stackBaseReg); + result->SetOffsetOperand(stOfstOpnd); + CHECK_FATAL(result != nullptr, "NIY"); + return *result; + } + CHECK_FATAL(false, "NIY"); + return *result; +} + +void X64MPIsel::SelectReturn(NaryStmtNode &retNode, Operand &opnd) +{ + MIRType *retType = cgFunc->GetFunction().GetReturnType(); + X64CallConvImpl retLocator(cgFunc->GetBecommon()); + CCLocInfo retMech; + retLocator.LocateRetVal(*retType, retMech); + if (retMech.GetRegCount() == 0) { + return; + } + std::vector retRegs; + if (!cgFunc->GetFunction().StructReturnedInRegs() || retNode.Opnd(0)->GetOpCode() == OP_constval) { + PrimType oriPrimType = retMech.GetPrimTypeOfReg0(); + regno_t retReg = retMech.GetReg0(); + DEBUG_ASSERT(retReg != kRinvalid, "NIY"); + RegOperand &retOpnd = cgFunc->GetOpndBuilder()->CreatePReg(retReg, GetPrimTypeBitSize(oriPrimType), + cgFunc->GetRegTyFromPrimTy(oriPrimType)); + retRegs.push_back(&retOpnd); + SelectCopy(retOpnd, opnd, oriPrimType, retNode.Opnd(0)->GetPrimType()); + } else { + CHECK_FATAL(opnd.IsMemoryAccessOperand(), "NIY"); + MemOperand &memOpnd = static_cast(opnd); + ImmOperand *offsetOpnd = memOpnd.GetOffsetOperand(); + RegOperand *baseOpnd = memOpnd.GetBaseRegister(); + + PrimType oriPrimType0 = retMech.GetPrimTypeOfReg0(); + regno_t retReg0 = retMech.GetReg0(); + DEBUG_ASSERT(retReg0 != kRinvalid, "NIY"); + RegOperand &retOpnd0 = cgFunc->GetOpndBuilder()->CreatePReg(retReg0, GetPrimTypeBitSize(oriPrimType0), + cgFunc->GetRegTyFromPrimTy(oriPrimType0)); + MemOperand &rhsMemOpnd0 = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(oriPrimType0)); + rhsMemOpnd0.SetBaseRegister(*baseOpnd); + rhsMemOpnd0.SetOffsetOperand(*offsetOpnd); + retRegs.push_back(&retOpnd0); + SelectCopy(retOpnd0, rhsMemOpnd0, oriPrimType0); + + regno_t retReg1 = retMech.GetReg1(); + if (retReg1 != kRinvalid) { + PrimType oriPrimType1 = retMech.GetPrimTypeOfReg1(); + RegOperand &retOpnd1 = cgFunc->GetOpndBuilder()->CreatePReg(retReg1, GetPrimTypeBitSize(oriPrimType1), + cgFunc->GetRegTyFromPrimTy(oriPrimType1)); + MemOperand &rhsMemOpnd1 = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(oriPrimType1)); + ImmOperand &newOffsetOpnd = static_cast(*offsetOpnd->Clone(*cgFunc->GetMemoryPool())); + newOffsetOpnd.SetValue(newOffsetOpnd.GetValue() + GetPrimTypeSize(oriPrimType0)); + rhsMemOpnd1.SetBaseRegister(*baseOpnd); + rhsMemOpnd1.SetOffsetOperand(newOffsetOpnd); + retRegs.push_back(&retOpnd1); + SelectCopy(retOpnd1, rhsMemOpnd1, oriPrimType1); + } + } + /* for optimization ,insert pseudo ret ,in case rax,rdx is removed*/ + SelectPseduoForReturn(retRegs); +} + +void X64MPIsel::SelectPseduoForReturn(std::vector &retRegs) +{ + for (auto retReg : retRegs) { + MOperator mop = x64::MOP_pseudo_ret_int; + Insn &pInsn = cgFunc->GetInsnBuilder()->BuildInsn(mop, X64CG::kMd[mop]); + cgFunc->GetCurBB()->AppendInsn(pInsn); + pInsn.AddOpndChain(*retReg); + } +} + +void X64MPIsel::SelectReturn() +{ + /* jump to epilogue */ + MOperator mOp = x64::MOP_jmpq_l; + LabelNode *endLabel = cgFunc->GetEndLabel(); + auto endLabelName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(endLabel->GetLabelIdx()); + LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(endLabelName.c_str(), endLabel->GetLabelIdx()); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + cgFunc->GetExitBBsVec().emplace_back(cgFunc->GetCurBB()); +} + +void X64MPIsel::CreateCallStructParamPassByStack(MemOperand &memOpnd, int32 symSize, int32 baseOffset) +{ + int32 copyTime = RoundUp(symSize, GetPointerSize()) / GetPointerSize(); + for (int32 i = 0; i < copyTime; ++i) { + MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = + static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + newImmOpnd.SetValue(newImmOpnd.GetValue() + i * GetPointerSize()); + addrMemOpnd.SetOffsetOperand(newImmOpnd); + RegOperand &spOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, kRegTyInt); + Operand &stMemOpnd = + cgFunc->GetOpndBuilder()->CreateMem(spOpnd, (baseOffset + i * GetPointerSize()), k64BitSize); + SelectCopy(stMemOpnd, addrMemOpnd, PTY_u64); + } +} + +void X64MPIsel::CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum) +{ + CHECK_FATAL(parmNum < kMaxStructParamByReg, "Exceeded maximum allowed fp parameter registers for struct passing"); + RegOperand &parmOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regNo, k64BitSize, kRegTyInt); + MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + newImmOpnd.SetValue(newImmOpnd.GetValue() + parmNum * GetPointerSize()); + addrMemOpnd.SetOffsetOperand(newImmOpnd); + paramPassByReg.push_back({&parmOpnd, &addrMemOpnd, PTY_a64}); +} + +std::tuple X64MPIsel::GetMemOpndInfoFromAggregateNode(BaseNode &argExpr) +{ + /* get mirType info */ + auto [fieldId, mirType] = GetFieldIdAndMirTypeFromMirNode(argExpr); + MirTypeInfo symInfo = GetMirTypeInfoFormFieldIdAndMirType(fieldId, mirType); + /* get symbol memOpnd info */ + MemOperand *symMemOpnd = nullptr; + if (argExpr.GetOpCode() == OP_dread) { + AddrofNode &dread = static_cast(argExpr); + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dread.GetStIdx()); + symMemOpnd = &GetOrCreateMemOpndFromSymbol(*symbol, dread.GetFieldID()); + } else if (argExpr.GetOpCode() == OP_iread) { + IreadNode &iread = static_cast(argExpr); + symMemOpnd = GetOrCreateMemOpndFromIreadNode(iread, symInfo.primType, symInfo.offset); + } else { + CHECK_FATAL(false, "unsupported opcode"); + } + return {symMemOpnd, symInfo.size, mirType}; +} + +void X64MPIsel::SelectParmListForAggregate(BaseNode &argExpr, X64CallConvImpl &parmLocator, bool isArgUnused) +{ + auto [argOpnd, argSize, mirType] = GetMemOpndInfoFromAggregateNode(argExpr); + DEBUG_ASSERT(argOpnd->IsMemoryAccessOperand(), "wrong opnd"); + MemOperand &memOpnd = static_cast(*argOpnd); + + CCLocInfo ploc; + parmLocator.LocateNextParm(*mirType, ploc); + if (isArgUnused) { + return; + } + + /* create call struct param pass */ + if (argSize > k16ByteSize || ploc.reg0 == kRinvalid) { + CreateCallStructParamPassByStack(memOpnd, argSize, ploc.memOffset); + } else { + CHECK_FATAL(ploc.fpSize == 0, "Unknown call parameter state"); + CreateCallStructParamPassByReg(memOpnd, ploc.reg0, 0); + if (ploc.reg1 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg1, 1); + } + if (ploc.reg2 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg2, 2); + } + if (ploc.reg3 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg3, 3); + } + } +} + +/* + * SelectParmList generates an instrunction for each of the parameters + * to load the parameter value into the corresponding register. + * We return a list of registers to the call instruction because + * they may be needed in the register allocation phase. + * fp Num is a return value which is the number of vector + * registers used; + */ +void X64MPIsel::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, uint32 &fpNum) +{ + paramPassByReg.clear(); + fpNum = 0; + /* for IcallNode, the 0th operand is the function pointer */ + size_t argBegin = 0; + if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto) { + ++argBegin; + } + + MIRFunction *callee = nullptr; + if (naryNode.GetOpCode() == OP_call) { + PUIdx calleePuIdx = static_cast(naryNode).GetPUIdx(); + callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx); + } + X64CallConvImpl parmLocator(cgFunc->GetBecommon(), X64CallConvImpl::GetCallConvKind(naryNode)); + CCLocInfo ploc; + for (size_t i = argBegin; i < naryNode.NumOpnds(); ++i) { + BaseNode *argExpr = naryNode.Opnd(i); + DEBUG_ASSERT(argExpr != nullptr, "not null check"); + PrimType primType = argExpr->GetPrimType(); + DEBUG_ASSERT(primType != PTY_void, "primType should not be void"); + bool isArgUnused = (callee != nullptr && callee->GetFuncDesc().IsArgUnused(i)); + if (primType == PTY_agg) { + SelectParmListForAggregate(*argExpr, parmLocator, isArgUnused); + continue; + } + + Operand *argOpnd = HandleExpr(naryNode, *argExpr); + DEBUG_ASSERT(argOpnd != nullptr, "not null check"); + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(primType)]; + parmLocator.LocateNextParm(*mirType, ploc); + + /* skip unused args */ + if (isArgUnused) { + continue; + } + + if (ploc.reg0 != x64::kRinvalid) { + /* load to the register. */ + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + paramPassByReg.push_back({&parmRegOpnd, argOpnd, primType}); + if (x64::IsFPSIMDRegister(static_cast(ploc.reg0))) { + fpNum++; + } + } else { + /* load to stack memory */ + RegOperand &baseOpnd = + cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, cgFunc->GetRegTyFromPrimTy(primType)); + MemOperand &actMemOpnd = + cgFunc->GetOpndBuilder()->CreateMem(baseOpnd, ploc.memOffset, GetPrimTypeBitSize(primType)); + SelectCopy(actMemOpnd, *argOpnd, primType); + } + DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NIY"); + } + + /* param pass by reg */ + for (auto [regOpnd, argOpnd, primType] : paramPassByReg) { + DEBUG_ASSERT(regOpnd != nullptr, "not null check"); + DEBUG_ASSERT(argOpnd != nullptr, "not null check"); + SelectCopy(*regOpnd, *argOpnd, primType); + srcOpnds.PushOpnd(*regOpnd); + } +} + +RegOperand &X64MPIsel::SelectSpecialRegread(PregIdx pregIdx, PrimType primType) +{ + switch (-pregIdx) { + case kSregFp: { + return cgFunc->GetOpndBuilder()->CreatePReg(x64::RFP, k64BitSize, cgFunc->GetRegTyFromPrimTy(primType)); + } + case kSregSp: { + return cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, cgFunc->GetRegTyFromPrimTy(primType)); + } + default: { + CHECK_FATAL(false, "ERROR: Not supported special register!"); + } + } +} + +bool X64MPIsel::IsParamStructCopy(const MIRSymbol &symbol) +{ + if (symbol.GetStorageClass() == kScFormal && + cgFunc->GetBecommon().GetTypeSize(symbol.GetTyIdx().GetIdx()) > k16ByteSize) { + return true; + } + return false; +} + +void X64MPIsel::SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) +{ + CHECK_FATAL((aggSize > 0) && (aggSize <= k16ByteSize), "out of range."); + RegOperand *baseOpnd = symbolMem.GetBaseRegister(); + int32 stOffset = symbolMem.GetOffsetOperand()->GetValue(); + bool isCopyOneReg = (aggSize <= k8ByteSize); + int32 extraSize = (aggSize % k8ByteSize) * kBitsPerByte; + if (extraSize == 0) { + extraSize = k64BitSize; + } else if (extraSize <= k8BitSize) { + extraSize = k8BitSize; + } else if (extraSize <= k16BitSize) { + extraSize = k16BitSize; + } else if (extraSize <= k32BitSize) { + extraSize = k32BitSize; + } else { + extraSize = k64BitSize; + } + /* generate move from return registers(rax, rdx) to mem of symbol */ + PrimType extraTy = GetIntegerPrimTypeFromSize(false, extraSize); + /* mov %rax mem */ + RegOperand ®Rhs0 = + cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, (isCopyOneReg ? extraSize : k64BitSize), kRegTyInt); + MemOperand &memSymbo0 = cgFunc->GetOpndBuilder()->CreateMem(*baseOpnd, static_cast(stOffset), + isCopyOneReg ? extraSize : k64BitSize); + SelectCopy(memSymbo0, regRhs0, isCopyOneReg ? extraTy : PTY_u64); + /* mov %rdx mem */ + if (!isCopyOneReg) { + RegOperand ®Rhs1 = cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, extraSize, kRegTyInt); + MemOperand &memSymbo1 = + cgFunc->GetOpndBuilder()->CreateMem(*baseOpnd, static_cast(stOffset + k8ByteSize), extraSize); + SelectCopy(memSymbo1, regRhs1, extraTy); + } + return; +} + +void X64MPIsel::SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) +{ + /* in x86-64, 8 bytes data is copied at a time */ + uint32 copyTimes = copySize / k8ByteSize; + uint32 extraCopySize = copySize % k8ByteSize; + ImmOperand *stOfstLhs = lhs.GetOffsetOperand(); + ImmOperand *stOfstRhs = rhs.GetOffsetOperand(); + RegOperand *baseLhs = lhs.GetBaseRegister(); + RegOperand *baseRhs = rhs.GetBaseRegister(); + if (copySize < 40U) { + for (int32 i = 0; i < copyTimes; ++i) { + /* prepare dest addr */ + MemOperand &memOpndLhs = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + memOpndLhs.SetBaseRegister(*baseLhs); + ImmOperand &newStOfstLhs = static_cast(*stOfstLhs->Clone(*cgFunc->GetMemoryPool())); + newStOfstLhs.SetValue(newStOfstLhs.GetValue() + i * k8ByteSize); + memOpndLhs.SetOffsetOperand(newStOfstLhs); + /* prepare src addr */ + MemOperand &memOpndRhs = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + memOpndRhs.SetBaseRegister(*baseRhs); + ImmOperand &newStOfstRhs = static_cast(*stOfstRhs->Clone(*cgFunc->GetMemoryPool())); + newStOfstRhs.SetValue(newStOfstRhs.GetValue() + i * k8ByteSize); + memOpndRhs.SetOffsetOperand(newStOfstRhs); + /* copy data */ + SelectCopy(memOpndLhs, memOpndRhs, PTY_a64); + } + } else { + /* adopt memcpy */ + std::vector opndVec; + opndVec.push_back(PrepareMemcpyParm(lhs, MOP_leaq_m_r)); + opndVec.push_back(PrepareMemcpyParm(rhs, MOP_leaq_m_r)); + opndVec.push_back(PrepareMemcpyParm(copySize)); + SelectLibCall("memcpy", opndVec, PTY_a64, nullptr, PTY_void); + return; + } + /* take care of extra content at the end less than the unit */ + if (extraCopySize == 0) { + return; + } + extraCopySize = ((extraCopySize <= k4ByteSize) ? k4ByteSize : k8ByteSize) * kBitsPerByte; + PrimType extraTy = GetIntegerPrimTypeFromSize(false, extraCopySize); + MemOperand &memOpndLhs = cgFunc->GetOpndBuilder()->CreateMem(extraCopySize); + memOpndLhs.SetBaseRegister(*baseLhs); + ImmOperand &newStOfstLhs = static_cast(*stOfstLhs->Clone(*cgFunc->GetMemoryPool())); + newStOfstLhs.SetValue(newStOfstLhs.GetValue() + copyTimes * k8ByteSize); + memOpndLhs.SetOffsetOperand(newStOfstLhs); + MemOperand &memOpndRhs = cgFunc->GetOpndBuilder()->CreateMem(extraCopySize); + memOpndRhs.SetBaseRegister(*baseRhs); + ImmOperand &newStOfstRhs = static_cast(*stOfstRhs->Clone(*cgFunc->GetMemoryPool())); + newStOfstRhs.SetValue(newStOfstRhs.GetValue() + copyTimes * k8ByteSize); + memOpndRhs.SetOffsetOperand(newStOfstRhs); + SelectCopy(memOpndLhs, memOpndRhs, extraTy); +} + +void X64MPIsel::SelectLibCall(const std::string &funcName, std::vector &opndVec, PrimType primType, + Operand* retOpnd, PrimType retType) +{ + /* generate libcall */ + std::vector pt(opndVec.size(), primType); + SelectLibCallNArg(funcName, opndVec, pt, retOpnd, retType); + return; +} + +void X64MPIsel::SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt, Operand* retOpnd, PrimType retPrimType) +{ + std::string newName = funcName; + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(newName); + st->SetStorageClass(kScExtern); + st->SetSKind(kStFunc); + + /* setup the type of the callee function */ + std::vector vec; + std::vector vecAt; + for (size_t i = 0; i < opndVec.size(); ++i) { + vec.emplace_back(GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]->GetTypeIndex()); + vecAt.emplace_back(TypeAttrs()); + } + + /* only support no return function */ + MIRType *mirRetType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast(retPrimType)); + st->SetTyIdx(cgFunc->GetBecommon().BeGetOrCreateFunctionType(mirRetType->GetTypeIndex(), vec, vecAt)->GetTypeIndex()); + + /* setup actual parameters */ + ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList(); + + X64CallConvImpl parmLocator(cgFunc->GetBecommon()); + CCLocInfo ploc; + for (size_t i = 0; i < opndVec.size(); ++i) { + DEBUG_ASSERT(pt[i] != PTY_void, "primType check"); + MIRType *ty; + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]; + Operand *stOpnd = opndVec[i]; + DEBUG_ASSERT(stOpnd->IsRegister(), "exp result should be reg"); + RegOperand *expRegOpnd = static_cast(stOpnd); + parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { /* load to the register */ + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, expRegOpnd->GetSize(), + cgFunc->GetRegTyFromPrimTy(pt[i])); + SelectCopy(parmRegOpnd, *expRegOpnd, pt[i]); + paramOpnds.PushOpnd(parmRegOpnd); + } + DEBUG_ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } + + MIRSymbol *sym = cgFunc->GetFunction().GetLocalOrGlobalSymbol(st->GetStIdx(), false); + Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*sym); + ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList(); + Insn &callInsn = AppendCall(x64::MOP_callq_l, targetOpnd, paramOpnds, retOpnds); + + bool isFloat = IsPrimitiveFloat(retPrimType); + Insn::RetType insnRetType = isFloat ? Insn::kRegFloat : Insn::kRegInt; + callInsn.SetRetType(insnRetType); + /* no ret function */ + if (retOpnd == nullptr) { + return; + } + + CCLocInfo retMech; + parmLocator.LocateRetVal(*(GlobalTables::GetTypeTable().GetTypeTable().at(retPrimType)), retMech); + if (retMech.GetRegCount() <= 0 || retMech.GetRegCount() > 1) { + CHECK_FATAL(false, "just support one register return"); + } + if (mirRetType != nullptr) { + callInsn.SetRetSize(static_cast(mirRetType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(mirRetType->GetPrimType())); + } + CHECK_FATAL(retOpnd->IsRegister(), "niy"); + RegOperand *regOpnd = static_cast(retOpnd); + regno_t retRegNo = retMech.GetReg0(); + if (regOpnd->GetRegisterNumber() != retRegNo) { + RegOperand &phyRetOpnd = cgFunc->GetOpndBuilder()->CreatePReg(retRegNo, regOpnd->GetSize(), + cgFunc->GetRegTyFromPrimTy(retPrimType)); + SelectCopy(*retOpnd, phyRetOpnd, retPrimType); + } + return; +} + +Operand *X64MPIsel::SelectFloatingConst(MIRConst &floatingConst, PrimType primType) const +{ + CHECK_FATAL(primType == PTY_f64 || primType == PTY_f32, "wrong const"); + uint32 labelIdxTmp = cgFunc->GetLabelIdx(); + Operand *result = nullptr; + if (primType == PTY_f64) { + result = SelectLiteral(static_cast(floatingConst), cgFunc->GetFunction(), labelIdxTmp++); + } else { + result = SelectLiteral(static_cast(floatingConst), cgFunc->GetFunction(), labelIdxTmp++); + } + cgFunc->SetLabelIdx(labelIdxTmp); + return result; +} + +RegOperand *X64MPIsel::PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp) +{ + RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp])); + addrInsn.AddOpndChain(memOperand).AddOpndChain(regResult); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + return ®Result; +} + +RegOperand *X64MPIsel::PrepareMemcpyParm(uint64 copySize) +{ + RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + ImmOperand &sizeOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, copySize); + SelectCopy(regResult, sizeOpnd, PTY_i64); + return ®Result; +} + +void X64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) +{ + /* rhs is Func Return, it must be from Regread */ + if (opndRhs.IsRegister()) { + SelectIntAggCopyReturn(symbolMem, lhsInfo.size); + return; + } + /* In generally, rhs is from Dread/Iread */ + CHECK_FATAL(opndRhs.IsMemoryAccessOperand(), "Aggregate Type RHS must be mem"); + MemOperand &memRhs = static_cast(opndRhs); + SelectAggCopy(symbolMem, memRhs, lhsInfo.size); +} + +void X64MPIsel::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs) +{ + /* mirSymbol info */ + MirTypeInfo symbolInfo = GetMirTypeInfoFromMirNode(stmt); + MIRType *stmtMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx()); + + /* In generally, RHS is from Dread/Iread */ + CHECK_FATAL(opndRhs.IsMemoryAccessOperand(), "Aggregate Type RHS must be mem"); + MemOperand &memRhs = static_cast(opndRhs); + ImmOperand *stOfstSrc = memRhs.GetOffsetOperand(); + RegOperand *baseSrc = memRhs.GetBaseRegister(); + + if (stmtMirType->GetPrimType() == PTY_agg) { + /* generate move to regs for agg return */ + RegOperand *result[kFourRegister] = {nullptr}; /* up to 2 int or 4 fp */ + uint32 numRegs = (symbolInfo.size <= k8ByteSize) ? kOneRegister : kTwoRegister; + PrimType retPrimType = (symbolInfo.size <= k4ByteSize) ? PTY_u32 : PTY_u64; + for (int i = 0; i < numRegs; i++) { + MemOperand &rhsMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(retPrimType)); + rhsMemOpnd.SetBaseRegister(*baseSrc); + ImmOperand &newStOfstSrc = static_cast(*stOfstSrc->Clone(*cgFunc->GetMemoryPool())); + newStOfstSrc.SetValue(newStOfstSrc.GetValue() + i * k8ByteSize); + rhsMemOpnd.SetOffsetOperand(newStOfstSrc); + regno_t regNo = (i == 0) ? x64::RAX : x64::RDX; + result[i] = &cgFunc->GetOpndBuilder()->CreatePReg(regNo, GetPrimTypeBitSize(retPrimType), + cgFunc->GetRegTyFromPrimTy(retPrimType)); + SelectCopy(*(result[i]), rhsMemOpnd, retPrimType); + } + } else { + RegOperand *lhsAddrOpnd = &SelectCopy2Reg(AddrOpnd, stmt.Opnd(0)->GetPrimType()); + MemOperand &symbolMem = + cgFunc->GetOpndBuilder()->CreateMem(*lhsAddrOpnd, symbolInfo.offset, GetPrimTypeBitSize(PTY_u64)); + SelectAggCopy(symbolMem, memRhs, symbolInfo.size); + } +} + +Insn &X64MPIsel::AppendCall(x64::X64MOP_t mOp, Operand &targetOpnd, ListOperand ¶mOpnds, ListOperand &retOpnds) +{ + Insn &callInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + callInsn.AddOpndChain(targetOpnd).AddOpndChain(paramOpnds).AddOpndChain(retOpnds); + cgFunc->GetCurBB()->AppendInsn(callInsn); + cgFunc->GetCurBB()->SetHasCall(); + cgFunc->GetFunction().SetHasCall(); + return callInsn; +} + +void X64MPIsel::SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds) +{ + if (retType == nullptr) { + return; + } + auto retSize = retType->GetSize() * kBitsPerByte; + if (retType->GetPrimType() != PTY_agg || retSize <= k128BitSize) { + if (retSize > k0BitSize) { + retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k64BitSize, kRegTyInt)); + } + if (retSize > k64BitSize) { + retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, k64BitSize, kRegTyInt)); + } + } +} + +void X64MPIsel::SelectCall(CallNode &callNode) +{ + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); + MIRSymbol *fsym = GlobalTables::GetGsymTable().GetSymbolFromStidx(fn->GetStIdx().Idx(), false); + Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*fsym); + + ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList(); + uint32 fpNum = 0; + SelectParmList(callNode, paramOpnds, fpNum); + /* x64abi: rax = with variable arguments passes information about the number of vector registers used */ + if (fn->IsVarargs()) { + ImmOperand &fpNumImm = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, fpNum); + RegOperand &raxOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k64BitSize, kRegTyInt); + SelectCopy(raxOpnd, fpNumImm, PTY_i64); + } + + MIRType *retType = fn->GetReturnType(); + ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList(); + SelectCalleeReturn(retType, retOpnds); + + Insn &callInsn = AppendCall(x64::MOP_callq_l, targetOpnd, paramOpnds, retOpnds); + callInsn.SetRetType(Insn::kRegInt); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + const auto &deoptBundleInfo = callNode.GetDeoptBundleInfo(); + for (const auto &elem : deoptBundleInfo) { + auto valueKind = elem.second.GetMapleValueKind(); + if (valueKind == MapleValue::kPregKind) { + auto *opnd = cgFunc->GetOpndFromPregIdx(elem.second.GetPregIdx()); + CHECK_FATAL(opnd != nullptr, "pregIdx has not been assigned Operand"); + callInsn.AddDeoptBundleInfo(elem.first, *opnd); + } else if (valueKind == MapleValue::kConstKind) { + auto *opnd = SelectIntConst(static_cast(elem.second.GetConstValue()), PTY_i32); + callInsn.AddDeoptBundleInfo(elem.first, *opnd); + } else { + CHECK_FATAL(false, "not supported currently"); + } + } + cgFunc->AppendStackMapInsn(callInsn); +} + +void X64MPIsel::SelectIcall(IcallNode &iCallNode, Operand &opnd0) +{ + RegOperand &targetOpnd = SelectCopy2Reg(opnd0, iCallNode.Opnd(0)->GetPrimType()); + ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList(); + uint32 fpNum = 0; + SelectParmList(iCallNode, paramOpnds, fpNum); + + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iCallNode.GetRetTyIdx()); + if (iCallNode.GetOpCode() == OP_icallproto) { + CHECK_FATAL((retType->GetKind() == kTypeFunction), "NIY, must be func"); + auto calleeType = static_cast(retType); + retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(calleeType->GetRetTyIdx()); + } + ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList(); + SelectCalleeReturn(retType, retOpnds); + + Insn &callInsn = AppendCall(x64::MOP_callq_r, targetOpnd, paramOpnds, retOpnds); + callInsn.SetRetType(Insn::kRegInt); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + const auto &deoptBundleInfo = iCallNode.GetDeoptBundleInfo(); + for (const auto &elem : deoptBundleInfo) { + auto valueKind = elem.second.GetMapleValueKind(); + if (valueKind == MapleValue::kPregKind) { + auto *opnd = cgFunc->GetOpndFromPregIdx(elem.second.GetPregIdx()); + CHECK_FATAL(opnd != nullptr, "pregIdx has not been assigned Operand"); + callInsn.AddDeoptBundleInfo(elem.first, *opnd); + } else if (valueKind == MapleValue::kConstKind) { + auto *opnd = SelectIntConst(static_cast(elem.second.GetConstValue()), PTY_i32); + callInsn.AddDeoptBundleInfo(elem.first, *opnd); + } else { + CHECK_FATAL(false, "not supported currently"); + } + } + cgFunc->AppendStackMapInsn(callInsn); +} + +Operand &X64MPIsel::ProcessReturnReg(PrimType primType, int32 sReg) +{ + return GetTargetRetOperand(primType, sReg); +} + +void X64MPIsel::SelectGoto(GotoNode &stmt) +{ + MOperator mOp = x64::MOP_jmpq_l; + auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset()); + LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset()); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->GetCurBB()->SetKind(BB::kBBGoto); + return; +} + +void X64MPIsel::SelectIgoto(Operand &opnd0) +{ + CHECK_FATAL(opnd0.IsRegister(), "only register implemented!"); + MOperator mOp = x64::MOP_jmpq_r; + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + jmpInsn.AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + return; +} + +/* This function is to generate an inline function to generate the va_list data structure */ +/* type $__va_list align(8), + @__gr_top <* void> align(8), + @__vr_top <* void> align(8), + @__gr_offs i32 align(4), + @__vr_offs i32 align(4)}> + } +*/ +void X64MPIsel::GenCVaStartIntrin(RegOperand &opnd, uint32 stkOffset) +{ + /* FPLR only pushed in regalloc() after intrin function */ + RegOperand &fpOpnd = cgFunc->GetOpndBuilder()->CreatePReg(RFP, k64BitSize, kRegTyInt); + + uint32 fpLrLength = k16BitSize; + /* __stack */ + if (stkOffset != 0) { + stkOffset += fpLrLength; + } + + /* isvary reset StackFrameSize */ + ImmOperand &vaListOnPassArgStackOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset); + RegOperand &vReg = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectAdd(vReg, fpOpnd, vaListOnPassArgStackOffset, GetLoweredPtrType()); + + // The 8-byte data in the a structure needs to use this mop. + MOperator mOp = x64::MOP_movq_r_m; + + /* mem operand in va_list struct (lhs) */ + MemOperand &vaList = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, 0, k64BitSize); + Insn &fillInStkOffsetInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInStkOffsetInsn.AddOpndChain(vReg).AddOpndChain(vaList); + cgFunc->GetCurBB()->AppendInsn(fillInStkOffsetInsn); + + /* __gr_top ; it's the same as __stack before the 1st va_arg */ + stkOffset = 0; + ImmOperand &grTopOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset); + SelectSub(vReg, fpOpnd, grTopOffset, PTY_a64); + + /* mem operand in va_list struct (lhs) */ + MemOperand &vaListGRTop = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k8BitSize, k64BitSize); + Insn &fillInGRTopInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInGRTopInsn.AddOpndChain(vReg).AddOpndChain(vaListGRTop); + cgFunc->GetCurBB()->AppendInsn(fillInGRTopInsn); + + /* __vr_top */ + int32 grAreaSize = static_cast(static_cast(cgFunc->GetMemlayout())->GetSizeOfGRSaveArea()); + stkOffset += grAreaSize; + stkOffset += k8BitSize; + ImmOperand &vaListVRTopOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset); + SelectSub(vReg, fpOpnd, vaListVRTopOffset, PTY_a64); + + MemOperand &vaListVRTop = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k16BitSize, k64BitSize); + Insn &fillInVRTopInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInVRTopInsn.AddOpndChain(vReg).AddOpndChain(vaListVRTop); + cgFunc->GetCurBB()->AppendInsn(fillInVRTopInsn); + + // The 4-byte data in the a structure needs to use this mop. + mOp = x64::MOP_movl_r_m; + + /* __gr_offs */ + int32 grOffs = 0 - grAreaSize; + ImmOperand &vaListGROffsOffset = cgFunc->GetOpndBuilder()->CreateImm(k32BitSize, grOffs); + RegOperand &grOffsRegOpnd = SelectCopy2Reg(vaListGROffsOffset, PTY_a32); + + MemOperand &vaListGROffs = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k24BitSize, k64BitSize); + Insn &fillInGROffsInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInGROffsInsn.AddOpndChain(grOffsRegOpnd).AddOpndChain(vaListGROffs); + cgFunc->GetCurBB()->AppendInsn(fillInGROffsInsn); + + /* __vr_offs */ + int32 vrOffs = static_cast( + 0UL - static_cast(static_cast(cgFunc->GetMemlayout())->GetSizeOfVRSaveArea())); + ImmOperand &vaListVROffsOffset = cgFunc->GetOpndBuilder()->CreateImm(k32BitSize, vrOffs); + RegOperand &vrOffsRegOpnd = SelectCopy2Reg(vaListVROffsOffset, PTY_a32); + + MemOperand &vaListVROffs = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k24BitSize + 4, k64BitSize); + Insn &fillInVROffsInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInVROffsInsn.AddOpndChain(vrOffsRegOpnd).AddOpndChain(vaListVROffs); + cgFunc->GetCurBB()->AppendInsn(fillInVROffsInsn); +} + +void X64MPIsel::SelectOverFlowCall(const IntrinsiccallNode &intrnNode) +{ + DEBUG_ASSERT(intrnNode.NumOpnds() == 2, "must be 2 operands"); + MIRIntrinsicID intrinsic = intrnNode.GetIntrinsic(); + // add + PrimType type = intrnNode.Opnd(0)->GetPrimType(); + CHECK_FATAL(intrnNode.Opnd(0)->GetPrimType() == intrnNode.Opnd(1)->GetPrimType(), "should be same"); + RegOperand &opnd0 = SelectCopy2Reg(*HandleExpr(intrnNode, *intrnNode.Opnd(0)), + intrnNode.Opnd(0)->GetPrimType()); /* first argument of intrinsic */ + RegOperand &opnd1 = SelectCopy2Reg(*HandleExpr(intrnNode, *intrnNode.Opnd(1)), + intrnNode.Opnd(1)->GetPrimType()); /* first argument of intrinsic */ + RegOperand &resReg = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(type), cgFunc->GetRegTyFromPrimTy(type)); + if (intrinsic == INTRN_ADD_WITH_OVERFLOW) { + SelectAdd(resReg, opnd0, opnd1, type); + } else if (intrinsic == INTRN_SUB_WITH_OVERFLOW) { + SelectSub(resReg, opnd0, opnd1, type); + } else if (intrinsic == INTRN_MUL_WITH_OVERFLOW) { + SelectMpy(resReg, opnd0, opnd1, type); + } else { + CHECK_FATAL(false, "niy"); + } + + // store + auto *p2nrets = &intrnNode.GetReturnVec(); + if (p2nrets->size() == k1ByteSize) { + StIdx stIdx = (*p2nrets)[0].first; + MIRSymbol *sym = + cgFunc->GetBecommon().GetMIRModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + MemOperand &memOperand = GetOrCreateMemOpndFromSymbol(*sym, 1); + MemOperand &memOperand2 = GetOrCreateMemOpndFromSymbol(*sym, 2); + SelectCopy(memOperand, resReg, type); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_seto_m, X64CG::kMd[MOP_seto_m]); + insn.AddOpndChain(memOperand2); + cgFunc->GetCurBB()->AppendInsn(insn); + } else { + CHECK_FATAL(false, "should not happen"); + } + return; +} + +/* The second parameter in function va_start does not need to be concerned here, + * it is mainly used in proepilog */ +void X64MPIsel::SelectCVaStart(const IntrinsiccallNode &intrnNode) +{ + DEBUG_ASSERT(intrnNode.NumOpnds() == 2, "must be 2 operands"); + /* 2 operands, but only 1 needed. Don't need to emit code for second operand + * + * va_list is a passed struct with an address, load its address + */ + BaseNode *argExpr = intrnNode.Opnd(0); + Operand *opnd = HandleExpr(intrnNode, *argExpr); + RegOperand &opnd0 = SelectCopy2Reg(*opnd, GetLoweredPtrType()); /* first argument of intrinsic */ + + /* Find beginning of unnamed arg on stack. + * Ex. void foo(int i1, int i2, ... int i8, struct S r, struct S s, ...) + * where struct S has size 32, address of r and s are on stack but they are named. + */ + X64CallConvImpl parmLocator(cgFunc->GetBecommon()); + CCLocInfo pLoc; + uint32 stkSize = 0; + for (uint32 i = 0; i < cgFunc->GetFunction().GetFormalCount(); i++) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(cgFunc->GetFunction().GetNthParamTyIdx(i)); + parmLocator.LocateNextParm(*ty, pLoc); + if (pLoc.reg0 == kRinvalid) { /* on stack */ + stkSize = static_cast(pLoc.memOffset + pLoc.memSize); + } + } + + stkSize = static_cast(RoundUp(stkSize, GetPointerSize())); + + GenCVaStartIntrin(opnd0, stkSize); + + return; +} + +void X64MPIsel::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) +{ + MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic(); + + if (intrinsic == INTRN_C_va_start) { + SelectCVaStart(intrinsiccallNode); + return; + } + if (intrinsic == INTRN_C_stack_save || intrinsic == INTRN_C_stack_restore) { + return; + } + // JS + if (intrinsic == INTRN_ADD_WITH_OVERFLOW || intrinsic == INTRN_SUB_WITH_OVERFLOW || + intrinsic == INTRN_MUL_WITH_OVERFLOW) { + SelectOverFlowCall(intrinsiccallNode); + return; + } + + CHECK_FATAL(false, "Intrinsic %d: %s not implemented by the X64 CG.", intrinsic, GetIntrinsicName(intrinsic)); +} + +void X64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) +{ + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64); + std::vector sizeArray; + const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable(); + sizeArray.emplace_back(switchTable.size()); + MemPool *memPool = cgFunc->GetMemoryPool(); + MIRArrayType *arrayType = memPool->New(etype->GetTypeIndex(), sizeArray); + MIRAggConst *arrayConst = memPool->New(cgFunc->GetMirModule(), *arrayType); + for (const auto &itPair : switchTable) { + LabelIdx labelIdx = itPair.second; + cgFunc->GetCurBB()->PushBackRangeGotoLabel(labelIdx); + MIRConst *mirConst = memPool->New(labelIdx, cgFunc->GetFunction().GetPuidx(), *etype); + arrayConst->AddItem(mirConst, 0); + } + MIRSymbol *lblSt = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + lblSt->SetStorageClass(kScFstatic); + lblSt->SetSKind(kStConst); + lblSt->SetTyIdx(arrayType->GetTypeIndex()); + lblSt->SetKonst(arrayConst); + std::string lblStr(".L_"); + uint32 labelIdxTmp = cgFunc->GetLabelIdx(); + lblStr.append(std::to_string(cgFunc->GetUniqueID())).append("_LOCAL_CONST.").append(std::to_string(labelIdxTmp++)); + cgFunc->SetLabelIdx(labelIdxTmp); + lblSt->SetNameStrIdx(lblStr); + cgFunc->AddEmitSt(cgFunc->GetCurBB()->GetId(), *lblSt); + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*lblSt, 0, 0); + /* get index */ + PrimType srcType = rangeGotoNode.Opnd(0)->GetPrimType(); + RegOperand &opnd0 = SelectCopy2Reg(srcOpnd, srcType); + int32 minIdx = switchTable[0].first; + ImmOperand &opnd1 = + cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(srcType), -minIdx - rangeGotoNode.GetTagOffset()); + RegOperand &indexOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(srcType), kRegTyInt); + SelectAdd(indexOpnd, opnd0, opnd1, srcType); + + /* load the displacement into a register by accessing memory at base + index * 8 */ + /* mov .L_xxx_LOCAL_CONST.x(%baseReg, %indexOpnd, 8), %dstRegOpnd */ + MemOperand &dstMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(PTY_a64)); + RegOperand &baseReg = cgFunc->GetOpndBuilder()->CreatePReg(x64::RBP, GetPrimTypeBitSize(PTY_i64), kRegTyInt); + dstMemOpnd.SetBaseRegister(baseReg); + dstMemOpnd.SetIndexRegister(indexOpnd); + dstMemOpnd.SetOffsetOperand(stOpnd); + dstMemOpnd.SetScaleOperand(cgFunc->GetOpndBuilder()->CreateImm(baseReg.GetSize(), k8ByteSize)); + + /* jumping to the absolute address which is stored in dstRegOpnd */ + MOperator mOp = x64::MOP_jmpq_m; + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + jmpInsn.AddOpndChain(dstMemOpnd); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); +} + +Operand *X64MPIsel::SelectAddrof(AddrofNode &expr, const BaseNode &parent) +{ + /* get mirSymbol info*/ + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); + /* of AddrofNode must be either ptr, a32 or a64 */ + PrimType ptype = expr.GetPrimType(); + RegOperand &resReg = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(ptype), cgFunc->GetRegTyFromPrimTy(ptype)); + MemOperand &memOperand = GetOrCreateMemOpndFromSymbol(*symbol, expr.GetFieldID()); + uint32 pSize = GetPrimTypeSize(ptype); + MOperator mOp; + if (pSize <= k4ByteSize) { + mOp = x64::MOP_leal_m_r; + } else if (pSize <= k8ByteSize) { + mOp = x64::MOP_leaq_m_r; + } else { + CHECK_FATAL(false, "NIY"); + } + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp])); + addrInsn.AddOpndChain(memOperand).AddOpndChain(resReg); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + return &resReg; +} + +Operand *X64MPIsel::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) +{ + uint32 instrSize = static_cast(expr.SizeOfInstr()); + /* must be either a32 or a64. */ + PrimType primType = (instrSize == k8ByteSize) ? PTY_a64 : (instrSize == k4ByteSize) ? PTY_a32 : PTY_begin; + CHECK_FATAL(primType != PTY_begin, "prim-type of Func Addr must be either a32 or a64!"); + MIRFunction *mirFunction = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(expr.GetPUIdx()); + MIRSymbol *symbol = mirFunction->GetFuncSymbol(); + MIRStorageClass storageClass = symbol->GetStorageClass(); + RegOperand &resReg = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); + if (storageClass == maple::kScText && symbol->GetSKind() == maple::kStFunc) { + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*symbol, 0, 0); + X64MOP_t mOp = x64::MOP_movabs_s_r; + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp])); + addrInsn.AddOpndChain(stOpnd).AddOpndChain(resReg); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + } else { + CHECK_FATAL(false, "NIY"); + } + return &resReg; +} + +Operand *X64MPIsel::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) +{ + PrimType primType = expr.GetPrimType(); + uint32 bitSize = GetPrimTypeBitSize(primType); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand &baseOpnd = + cgFunc->GetOpndBuilder()->CreatePReg(x64::RIP, bitSize, cgFunc->GetRegTyFromPrimTy(primType)); + + auto labelStr = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(expr.GetOffset()); + MIRSymbol *labelSym = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + DEBUG_ASSERT(labelSym != nullptr, "null ptr check"); + labelSym->SetStorageClass(kScFstatic); + labelSym->SetSKind(kStConst); + labelSym->SetNameStrIdx(labelStr); + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64); + DEBUG_ASSERT(etype != nullptr, "null ptr check"); + auto *labelConst = + cgFunc->GetMemoryPool()->New(expr.GetOffset(), cgFunc->GetFunction().GetPuidx(), *etype); + DEBUG_ASSERT(labelConst != nullptr, "null ptr check"); + labelSym->SetKonst(labelConst); + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*labelSym, 0, 0); + + MemOperand &memOpnd = cgFunc->GetOpndBuilder()->CreateMem(bitSize); + memOpnd.SetBaseRegister(baseOpnd); + memOpnd.SetOffsetOperand(stOpnd); + + X64MOP_t mOp = x64::MOP_begin; + if (bitSize <= k32BitSize) { + mOp = x64::MOP_leal_m_r; + } else if (bitSize <= k64BitSize) { + mOp = x64::MOP_leaq_m_r; + } else { + CHECK_FATAL(false, "NIY"); + } + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp])); + addrInsn.AddOpndChain(memOpnd).AddOpndChain(resOpnd); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + return &resOpnd; +} + +static X64MOP_t PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSigned) +{ + switch (cmpOp) { + case OP_ne: + return (brOp == OP_brtrue) ? MOP_jne_l : MOP_je_l; + case OP_eq: + return (brOp == OP_brtrue) ? MOP_je_l : MOP_jne_l; + case OP_lt: + return (brOp == OP_brtrue) ? (isSigned ? MOP_jl_l : MOP_jb_l) : (isSigned ? MOP_jge_l : MOP_jae_l); + case OP_le: + return (brOp == OP_brtrue) ? (isSigned ? MOP_jle_l : MOP_jbe_l) : (isSigned ? MOP_jg_l : MOP_ja_l); + case OP_gt: + return (brOp == OP_brtrue) ? (isFloat ? MOP_ja_l : (isSigned ? MOP_jg_l : MOP_ja_l)) : (isSigned ? MOP_jle_l : MOP_jbe_l); + case OP_ge: + return (brOp == OP_brtrue) ? (isSigned ? MOP_jge_l : MOP_jae_l) : (isSigned ? MOP_jl_l : MOP_jb_l); + default: + CHECK_FATAL(false, "PickJmpInsn error"); + } +} + +/* + * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node + * such as a dread for example + */ +void X64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) +{ + Opcode opcode = stmt.GetOpCode(); + X64MOP_t jmpOperator = x64::MOP_begin; + if (opnd0.IsImmediate()) { + DEBUG_ASSERT(opnd0.IsIntImmediate(), "only support int immediate"); + DEBUG_ASSERT(opcode == OP_brtrue || opcode == OP_brfalse, "unsupported opcode"); + ImmOperand &immOpnd0 = static_cast(opnd0); + if ((opcode == OP_brtrue && !(immOpnd0.GetValue() != 0)) || + (opcode == OP_brfalse && !(immOpnd0.GetValue() == 0))) { + return; + } + jmpOperator = x64::MOP_jmpq_l; + cgFunc->SetCurBBKind(BB::kBBGoto); + } else { + PrimType primType; + Opcode condOpcode = condNode.GetOpCode(); + if (!kOpcodeInfo.IsCompare(condOpcode)) { + primType = condNode.GetPrimType(); + ImmOperand &imm0 = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(primType), 0); + SelectCmp(opnd0, imm0, primType); + condOpcode = OP_ne; + } else { + primType = static_cast(condNode).GetOpndType(); + } + bool isFloat = IsPrimitiveFloat(primType); + jmpOperator = PickJmpInsn(opcode, condOpcode, isFloat, IsSignedInteger(primType)); + cgFunc->SetCurBBKind(BB::kBBIf); + } + /* gen targetOpnd, .L.xxx__xx */ + auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset()); + LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset()); + /* select jump Insn */ + Insn &jmpInsn = (cgFunc->GetInsnBuilder()->BuildInsn(jmpOperator, X64CG::kMd[jmpOperator])); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); +} + +Operand *X64MPIsel::SelectStrLiteral(ConststrNode &constStr) +{ + std::string labelStr; + labelStr.append(".LUstr_"); + labelStr.append(std::to_string(constStr.GetStrIdx())); + MIRSymbol *labelSym = + GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(labelStr)); + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64); + auto *c = cgFunc->GetMemoryPool()->New(constStr.GetStrIdx(), *etype); + if (labelSym == nullptr) { + labelSym = cgFunc->GetMirModule().GetMIRBuilder()->CreateGlobalDecl(labelStr, c->GetType()); + labelSym->SetStorageClass(kScFstatic); + labelSym->SetSKind(kStConst); + /* c may be local, we need a global node here */ + labelSym->SetKonst(cgFunc->NewMirConst(*c)); + } + if (c->GetPrimType() == PTY_ptr) { + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*labelSym, 0, 0); + RegOperand &addrOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, cgFunc->GetRegTyFromPrimTy(PTY_a64)); + Insn &addrOfInsn = (cgFunc->GetInsnBuilder()->BuildInsn(x64::MOP_movabs_s_r, X64CG::kMd[x64::MOP_movabs_s_r])); + addrOfInsn.AddOpndChain(stOpnd).AddOpndChain(addrOpnd); + cgFunc->GetCurBB()->AppendInsn(addrOfInsn); + return &addrOpnd; + } + CHECK_FATAL(false, "Unsupported const string type"); + return nullptr; +} + +Operand &X64MPIsel::GetTargetRetOperand(PrimType primType, int32 sReg) +{ + uint32 bitSize = GetPrimTypeBitSize(primType); + regno_t retReg = 0; + switch (sReg) { + case kSregRetval0: + retReg = IsPrimitiveFloat(primType) ? x64::V0 : x64::RAX; + break; + case kSregRetval1: + retReg = x64::RDX; + break; + default: + CHECK_FATAL(false, "GetTargetRetOperand: NIY"); + break; + } + RegOperand &parmRegOpnd = + cgFunc->GetOpndBuilder()->CreatePReg(retReg, bitSize, cgFunc->GetRegTyFromPrimTy(primType)); + return parmRegOpnd; +} + +Operand *X64MPIsel::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, dtype, node.Opnd(1)->GetPrimType()); + SelectMpy(*resOpnd, regOpnd0, regOpnd1, dtype); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + + return resOpnd; +} + +void X64MPIsel::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + uint32 bitSize = GetPrimTypeBitSize(primType); + SelectCopy(resOpnd, opnd0, primType); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType); + if (IsSignedInteger(primType) || IsUnsignedInteger(primType)) { + X64MOP_t mOp = (bitSize == k64BitSize) + ? x64::MOP_imulq_r_r + : (bitSize == k32BitSize) ? x64::MOP_imull_r_r + : (bitSize == k16BitSize) ? x64::MOP_imulw_r_r : x64::MOP_begin; + CHECK_FATAL(mOp != x64::MOP_begin, "NIY mapping"); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(regOpnd1).AddOpndChain(resOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); + } else if (IsPrimitiveFloat(primType)) { + X64MOP_t mOp = (bitSize == k64BitSize) ? x64::MOP_mulfd_r_r : + (bitSize == k32BitSize) ? x64::MOP_mulfs_r_r : x64::MOP_begin; + CHECK_FATAL(mOp != x64::MOP_begin, "NIY mapping"); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(regOpnd1).AddOpndChain(resOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); + } +} + +/* + * Dividend(EDX:EAX) / Divisor(reg/mem32) = Quotient(EAX) Remainder(EDX) + * IDIV instruction perform signed division of EDX:EAX by the contents of 32-bit register or memory location and + * store the quotient in EAX and the remainder in EDX. + * The instruction truncates non-integral results towards 0. The sign of the remainder is always the same as the sign + * of the dividend, and the absolute value of the remainder is less than the absolute value of the divisor. + * An overflow generates a #DE (divide error) exception, rather than setting the OF flag. + * To avoid overflow problems, precede this instruction with a CDQ instruction to sign-extend the dividend Divisor. + * CDQ Sign-extend EAX into EDX:EAX. This action helps avoid overflow problems in signed number arithmetic. + */ +Operand *X64MPIsel::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType primType = node.GetPrimType(); + Operand *resOpnd = nullptr; + if (!IsPrimitiveVector(primType)) { + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + resOpnd = SelectDivRem(regOpnd0, regOpnd1, primType, node.GetOpCode()); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +Operand *X64MPIsel::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType primType = node.GetPrimType(); + Operand *resOpnd = nullptr; + if (!IsPrimitiveVector(primType)) { + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + resOpnd = SelectDivRem(regOpnd0, regOpnd1, primType, node.GetOpCode()); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +Operand *X64MPIsel::SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode) +{ + DEBUG_ASSERT(opcode == OP_div || opcode == OP_rem, "unsupported opcode"); + if (IsSignedInteger(primType) || IsUnsignedInteger(primType)) { + uint32 bitSize = GetPrimTypeBitSize(primType); + /* copy dividend to eax */ + RegOperand &raxOpnd = + cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, bitSize, cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(raxOpnd, opnd0, primType); + + RegOperand &rdxOpnd = + cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, bitSize, cgFunc->GetRegTyFromPrimTy(primType)); + bool isSigned = IsSignedInteger(primType); + if (isSigned) { + /* cdq edx:eax = sign-extend of eax*/ + X64MOP_t cvtMOp = + (bitSize == k64BitSize) + ? x64::MOP_cqo + : (bitSize == k32BitSize) ? x64::MOP_cdq : (bitSize == k16BitSize) ? x64::MOP_cwd : x64::MOP_begin; + CHECK_FATAL(cvtMOp != x64::MOP_begin, "NIY mapping"); + Insn &cvtInsn = cgFunc->GetInsnBuilder()->BuildInsn(cvtMOp, raxOpnd, rdxOpnd); + cgFunc->GetCurBB()->AppendInsn(cvtInsn); + } else { + /* set edx = 0 */ + SelectCopy(rdxOpnd, cgFunc->GetOpndBuilder()->CreateImm(bitSize, 0), primType); + } + /* div */ + X64MOP_t divMOp = + (bitSize == k64BitSize) + ? (isSigned ? x64::MOP_idivq_r : x64::MOP_divq_r) + : (bitSize == k32BitSize) + ? (isSigned ? x64::MOP_idivl_r : x64::MOP_divl_r) + : (bitSize == k16BitSize) ? (isSigned ? x64::MOP_idivw_r : x64::MOP_divw_r) : x64::MOP_begin; + CHECK_FATAL(divMOp != x64::MOP_begin, "NIY mapping"); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(divMOp, opnd1, raxOpnd, rdxOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); + /* return */ + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(resOpnd, ((opcode == OP_div) ? raxOpnd : rdxOpnd), primType); + return &resOpnd; + } else if (IsPrimitiveFloat(primType)) { + X64MOP_t divMOp = x64::MOP_divsd_r; + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(divMOp, opnd1, opnd0); + cgFunc->GetCurBB()->AppendInsn(insn); + return &opnd0; + } else { + CHECK_FATAL(false, "NIY"); + } +} + +Operand *X64MPIsel::SelectLnot(const UnaryNode &node, Operand &opnd0, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType()); + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(dtype), 0); + SelectCmp(regOpnd0, immOpnd, dtype); + SelectCmpResult(*resOpnd, OP_eq, dtype, dtype); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +Operand *X64MPIsel::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + PrimType primOpndType = node.GetOpndType(); + RegOperand *resOpnd = nullptr; + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primOpndType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primOpndType, node.Opnd(1)->GetPrimType()); + if (!IsPrimitiveVector(node.GetPrimType())) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype)); + SelectCmp(regOpnd0, regOpnd1, primOpndType); + Opcode parentOp = parent.GetOpCode(); + if (parentOp == OP_brfalse || parentOp == OP_brtrue || parentOp == OP_select) { + return resOpnd; + } + SelectCmpResult(*resOpnd, node.GetOpCode(), dtype, primOpndType); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +void X64MPIsel::SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType) +{ + x64::X64MOP_t cmpMOp = x64::MOP_begin; + if (IsPrimitiveInteger(primType)) { + cmpMOp = GetCmpMop(opnd0.GetKind(), opnd1.GetKind(), primType); + } else if (IsPrimitiveFloat(primType)) { + cmpMOp = x64::MOP_ucomisd_r_r; + } else { + CHECK_FATAL(false, "NIY"); + } + DEBUG_ASSERT(cmpMOp != x64::MOP_begin, "unsupported mOp"); + Insn &cmpInsn = (cgFunc->GetInsnBuilder()->BuildInsn(cmpMOp, X64CG::kMd[cmpMOp])); + cmpInsn.AddOpndChain(opnd1).AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(cmpInsn); +} + +void X64MPIsel::SelectCmpResult(RegOperand &resOpnd, Opcode opCode, PrimType primType, PrimType primOpndType) +{ + bool isSigned = (!IsPrimitiveUnsigned(primOpndType) && !IsPrimitiveFloat(primOpndType)); + /* set result -> u8 */ + RegOperand &tmpResOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k8BitSize, cgFunc->GetRegTyFromPrimTy(PTY_u8)); + x64::X64MOP_t setMOp = GetSetCCMop(opCode, tmpResOpnd.GetKind(), isSigned); + DEBUG_ASSERT(setMOp != x64::MOP_begin, "unsupported mOp"); + Insn &setInsn = cgFunc->GetInsnBuilder()->BuildInsn(setMOp, X64CG::kMd[setMOp]); + setInsn.AddOpndChain(tmpResOpnd); + cgFunc->GetCurBB()->AppendInsn(setInsn); + /* cvt u8 -> primType */ + SelectIntCvt(resOpnd, tmpResOpnd, primType, PTY_u8); +} + +Operand *X64MPIsel::SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) +{ + PrimType dtype = expr.GetPrimType(); + RegOperand &resOpnd = + cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand &trueRegOpnd = SelectCopy2Reg(trueOpnd, dtype, expr.Opnd(1)->GetPrimType()); + RegOperand &falseRegOpnd = SelectCopy2Reg(falseOpnd, dtype, expr.Opnd(2)->GetPrimType()); + Opcode cmpOpcode; + PrimType cmpPrimType; + if (kOpcodeInfo.IsCompare(expr.Opnd(0)->GetOpCode())) { + CompareNode *cmpNode = static_cast(expr.Opnd(0)); + DEBUG_ASSERT(cmpNode != nullptr, "null ptr check"); + cmpOpcode = cmpNode->GetOpCode(); + cmpPrimType = cmpNode->GetOpndType(); + } else { + cmpPrimType = expr.Opnd(0)->GetPrimType(); + cmpOpcode = OP_ne; + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(cmpPrimType), 0); + SelectCmp(cond, immOpnd, cmpPrimType); + } + SelectSelect(resOpnd, trueRegOpnd, falseRegOpnd, dtype, cmpOpcode, cmpPrimType); + return &resOpnd; +} + +void X64MPIsel::SelectSelect(Operand &resOpnd, Operand &trueOpnd, Operand &falseOpnd, PrimType primType, + Opcode cmpOpcode, PrimType cmpPrimType) +{ + CHECK_FATAL(!IsPrimitiveFloat(primType), "NIY"); + bool isSigned = !IsPrimitiveUnsigned(primType); + uint32 bitSize = GetPrimTypeBitSize(primType); + if (bitSize == k8BitSize) { + /* cmov unsupported 8bit, cvt to 32bit */ + PrimType cvtType = isSigned ? PTY_i32 : PTY_u32; + RegOperand &tmpResOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k32BitSize, kRegTyInt); + Operand &tmpTrueOpnd = SelectCopy2Reg(trueOpnd, cvtType, primType); + Operand &tmpFalseOpnd = SelectCopy2Reg(falseOpnd, cvtType, primType); + SelectSelect(tmpResOpnd, tmpTrueOpnd, tmpFalseOpnd, cvtType, cmpOpcode, cmpPrimType); + SelectCopy(resOpnd, tmpResOpnd, primType, cvtType); + return; + } + RegOperand &tmpOpnd = SelectCopy2Reg(trueOpnd, primType); + SelectCopy(resOpnd, falseOpnd, primType); + x64::X64MOP_t cmovMop = GetCMovCCMop(cmpOpcode, bitSize, !IsPrimitiveUnsigned(cmpPrimType)); + DEBUG_ASSERT(cmovMop != x64::MOP_begin, "unsupported mOp"); + Insn &comvInsn = cgFunc->GetInsnBuilder()->BuildInsn(cmovMop, X64CG::kMd[cmovMop]); + comvInsn.AddOpndChain(tmpOpnd).AddOpndChain(resOpnd); + cgFunc->GetCurBB()->AppendInsn(comvInsn); +} + +void X64MPIsel::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + if (IsPrimitiveInteger(primType)) { + SelectCmp(opnd0, opnd1, primType); + Opcode cmpOpcode = isMin ? OP_lt : OP_gt; + SelectSelect(resOpnd, opnd0, opnd1, primType, cmpOpcode, primType); + } else { + CHECK_FATAL(false, "NIY type max or min"); + } +} + +Operand *X64MPIsel::SelectCexp(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) +{ + PrimType primType = node.GetPrimType(); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType); + Operand &retReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + std::vector opndVec = {®Opnd0}; + SelectLibCall("exp", opndVec, primType, &retReg, primType); + return &retReg; +} + +Operand *X64MPIsel::SelectCctz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) +{ + CHECK_FATAL(opnd0.IsImmediate() || opnd0.IsRegister(), "unhandled operand type here!"); + PrimType origPrimType = node.Opnd(0)->GetPrimType(); + RegOperand &opnd = SelectCopy2Reg(opnd0, origPrimType); + + bool is64BitCtz = node.GetIntrinsic() == INTRN_C_ctz64; + MOperator mopBsf = is64BitCtz ? x64::MOP_bsfq_r_r : x64::MOP_bsfl_r_r; + Insn &bsfInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopBsf, X64CG::kMd[mopBsf]); + bsfInsn.AddOpndChain(opnd).AddOpndChain(opnd); + cgFunc->GetCurBB()->AppendInsn(bsfInsn); + + PrimType retType = node.GetPrimType(); + RegOperand &destReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(retType), + cgFunc->GetRegTyFromPrimTy(retType)); + // ctz i32 (u32) => cvt u32 -> i32 + // ctz i32 (u64) => cvt u64 -> i32 + SelectIntCvt(destReg, opnd, retType, origPrimType); + return &destReg; +} + +Operand *X64MPIsel::SelectCclz(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) +{ + CHECK_FATAL(opnd0.IsImmediate() || opnd0.IsRegister(), "unhandled operand type here!"); + PrimType origPrimType = node.Opnd(0)->GetPrimType(); + RegOperand &opnd = SelectCopy2Reg(opnd0, origPrimType); + + bool is64BitClz = node.GetIntrinsic() == INTRN_C_clz64; + MOperator mopBsr = is64BitClz ? x64::MOP_bsrq_r_r : x64::MOP_bsrl_r_r; + Insn &bsrInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopBsr, X64CG::kMd[mopBsr]); + bsrInsn.AddOpndChain(opnd).AddOpndChain(opnd); + cgFunc->GetCurBB()->AppendInsn(bsrInsn); + + MOperator mopXor = is64BitClz ? x64::MOP_xorq_i_r : MOP_xorl_i_r; + ImmOperand &imm = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(origPrimType), + GetPrimTypeBitSize(origPrimType) - 1); + Insn &xorInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopXor, X64CG::kMd[mopXor]); + xorInsn.AddOpndChain(imm).AddOpndChain(opnd); + cgFunc->GetCurBB()->AppendInsn(xorInsn); + + PrimType retType = node.GetPrimType(); + RegOperand &destReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(retType), + cgFunc->GetRegTyFromPrimTy(retType)); + SelectIntCvt(destReg, opnd, retType, origPrimType); + return &destReg; +} + +Operand *X64MPIsel::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) +{ + PrimType dtype = node.GetPrimType(); + auto bitWidth = GetPrimTypeBitSize(dtype); + // bswap only support 32/64-bit, xchg support 16-bit -- xchg al, ah + CHECK_FATAL(bitWidth == k16BitSize || bitWidth == k32BitSize || bitWidth == k64BitSize, + "NIY, unsupported bitWidth."); + + RegOperand *resOpnd = nullptr; + + if (bitWidth == k16BitSize) { + /* + * For 16-bit, use xchg, such as: xchg ah, al. So, the register must support high 8-bit. + * For x64, we can use RAX(AH:AL), RBX(BH:BL), RCX(CH:CL), RDX(DH:DL). + * The RA does not perform special processing for the high 8-bit case. + * So, we use the RAX regiser in here. + */ + resOpnd = &cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, bitWidth, cgFunc->GetRegTyFromPrimTy(dtype)); + SelectCopy(*resOpnd, opnd0, dtype, node.Opnd(0)->GetPrimType()); + RegOperand &lowerOpnd = + cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k8BitSize, cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand &highOpnd = + cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k8BitSize, cgFunc->GetRegTyFromPrimTy(dtype)); + highOpnd.SetHigh8Bit(); + x64::X64MOP_t xchgMop = MOP_xchgb_r_r; + Insn &xchgInsn = cgFunc->GetInsnBuilder()->BuildInsn(xchgMop, X64CG::kMd[xchgMop]); + xchgInsn.AddOpndChain(highOpnd).AddOpndChain(lowerOpnd); + cgFunc->GetCurBB()->AppendInsn(xchgInsn); + } else { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(bitWidth, cgFunc->GetRegTyFromPrimTy(dtype)); + SelectCopy(*resOpnd, opnd0, dtype, node.Opnd(0)->GetPrimType()); + x64::X64MOP_t bswapMop = (bitWidth == k64BitSize) ? MOP_bswapq_r : MOP_bswapl_r; + Insn &bswapInsn = cgFunc->GetInsnBuilder()->BuildInsn(bswapMop, X64CG::kMd[bswapMop]); + bswapInsn.AddOperand(*resOpnd); + cgFunc->GetCurBB()->AppendInsn(bswapInsn); + } + return resOpnd; +} + +RegOperand &X64MPIsel::GetTargetStackPointer(PrimType primType) +{ + return cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); +} + +RegOperand &X64MPIsel::GetTargetBasicPointer(PrimType primType) +{ + return cgFunc->GetOpndBuilder()->CreatePReg(x64::RBP, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); +} + +void X64MPIsel::SelectRetypeFloat(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) +{ + uint32 fromSize = GetPrimTypeBitSize(fromType); + [[maybe_unused]] uint32 toSize = GetPrimTypeBitSize(toType); + DEBUG_ASSERT(fromSize == toSize, "retype bit widith doesn' match"); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); + MOperator mOp = x64::MOP_begin; + if (fromSize == k32BitSize) { + mOp = IsPrimitiveFloat(fromType) ? x64::MOP_movd_fr_r : x64::MOP_begin; + } else if (fromSize == k64BitSize) { + mOp = IsPrimitiveFloat(fromType) ? x64::MOP_movq_fr_r : x64::MOP_movq_r_fr; + } else { + CHECK_FATAL(false, "niy"); + } + CHECK_FATAL(mOp != x64::MOP_begin, "NIY"); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + (void)insn.AddOpndChain(regOpnd0).AddOpndChain(resOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); + return; +} + +void X64MPIsel::SelectAsm(AsmNode &node) +{ + cgFunc->SetHasAsm(); + CHECK_FATAL(false, "NIY"); +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_abi.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_abi.cpp new file mode 100644 index 0000000000000000000000000000000000000000..27ed3ad53b784bd5cf3bf1e34950221b481a4f61 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_abi.cpp @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_cgfunc.h" +#include "becommon.h" +#include "x64_isa.h" + +namespace maplebe { +using namespace maple; +namespace x64 { +bool IsAvailableReg(X64reg reg) +{ + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, \ + isExtraSpill) \ + case R##ID: \ + return canBeAssigned; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return canBeAssigned; +#include "x64_fp_simd_regs.def" +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsCallerSaveReg(X64reg regNO) +{ + return (regNO == R0) || (regNO == R1) || (R2 <= regNO && regNO <= R3) || (R6 <= regNO && regNO <= R7) || + (R8 <= regNO && regNO <= R11) || (V2 <= regNO && regNO <= V7) || (V16 <= regNO && regNO <= V23); +} + +bool IsCalleeSavedReg(X64reg reg) +{ + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, \ + isExtraSpill) \ + case R##ID: \ + return isCalleeSave; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isCalleeSave; +#include "x64_fp_simd_regs.def" +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsParamReg(X64reg reg) +{ + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, \ + isExtraSpill) \ + case R##ID: \ + return isParam; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isParam; +#include "x64_fp_simd_regs.def" +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsSpillReg(X64reg reg) +{ + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, \ + isExtraSpill) \ + case R##ID: \ + return isSpill; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isSpill; +#include "x64_fp_simd_regs.def" +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsExtraSpillReg(X64reg reg) +{ + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, \ + isExtraSpill) \ + case R##ID: \ + return isExtraSpill; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isExtraSpill; +#include "x64_fp_simd_regs.def" +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsSpillRegInRA(X64reg regNO, bool has3RegOpnd) +{ + /* if has 3 RegOpnd, previous reg used to spill. */ + if (has3RegOpnd) { + return IsSpillReg(regNO) || IsExtraSpillReg(regNO); + } + return IsSpillReg(regNO); +} +} /* namespace x64 */ +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_args.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_args.cpp new file mode 100644 index 0000000000000000000000000000000000000000..feecd95167db534389512a09d0ad73e958e3eb4a --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_args.cpp @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_cg.h" +#include "x64_isa.h" +#include "x64_MPISel.h" + +namespace maplebe { +using namespace maple; + +void X64MoveRegArgs::Run() +{ + MoveVRegisterArgs(); + MoveRegisterArgs(); +} + +void X64MoveRegArgs::CollectRegisterArgs(std::map &argsList, std::vector &indexList, + std::map &pairReg, std::vector &numFpRegs, + std::vector &fpSize) const +{ + X64CGFunc *x64CGFunc = static_cast(cgFunc); + uint32 numFormal = static_cast(x64CGFunc->GetFunction().GetFormalCount()); + numFpRegs.resize(numFormal); + fpSize.resize(numFormal); + X64CallConvImpl parmlocator(x64CGFunc->GetBecommon()); + CCLocInfo ploc; + uint32 start = 0; + if (numFormal) { + MIRFunction *func = const_cast(x64CGFunc->GetBecommon().GetMIRModule().CurFunction()); + if (x64CGFunc->GetBecommon().HasFuncReturnType(*func)) { + TyIdx tyIdx = x64CGFunc->GetBecommon().GetFuncReturnType(*func); + if (x64CGFunc->GetBecommon().GetTypeSize(tyIdx) <= k16ByteSize) { + start = 1; + } + } + } + for (uint32 i = start; i < numFormal; ++i) { + MIRType *ty = x64CGFunc->GetFunction().GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, &x64CGFunc->GetFunction()); + if (ploc.reg0 == kRinvalid) { + continue; + } + X64reg reg0 = static_cast(ploc.reg0); + MIRSymbol *sym = x64CGFunc->GetFunction().GetFormal(i); + if (sym->IsPreg()) { + continue; + } + argsList[i] = reg0; + indexList.emplace_back(i); + if (ploc.reg1 == kRinvalid) { + continue; + } + if (ploc.numFpPureRegs) { + uint32 index = i; + numFpRegs[index] = ploc.numFpPureRegs; + fpSize[index] = ploc.fpSize; + continue; + } + pairReg[i] = static_cast(ploc.reg1); + } +} + +ArgInfo X64MoveRegArgs::GetArgInfo(std::map &argsList, uint32 argIndex, std::vector &numFpRegs, + std::vector &fpSize) const +{ + X64CGFunc *x64CGFunc = static_cast(cgFunc); + ArgInfo argInfo; + argInfo.reg = argsList[argIndex]; + argInfo.mirTy = x64CGFunc->GetFunction().GetNthParamType(argIndex); + argInfo.symSize = x64CGFunc->GetBecommon().GetTypeSize(argInfo.mirTy->GetTypeIndex()); + argInfo.memPairSecondRegSize = 0; + argInfo.doMemPairOpt = false; + argInfo.createTwoStores = false; + argInfo.isTwoRegParm = false; + if ((argInfo.symSize > k8ByteSize) && (argInfo.symSize <= k16ByteSize)) { + argInfo.isTwoRegParm = true; + if (numFpRegs[argIndex] > kOneRegister) { + argInfo.symSize = fpSize[argIndex]; + } else { + if (argInfo.symSize > k12ByteSize) { + argInfo.memPairSecondRegSize = k8ByteSize; + } else { + /* Round to 4 the stack space required for storing the struct */ + argInfo.memPairSecondRegSize = k4ByteSize; + } + argInfo.doMemPairOpt = true; + argInfo.symSize = GetPointerSize(); + } + } else if (argInfo.symSize > k16ByteSize) { + /* For large struct passing, a pointer to the copy is used. */ + argInfo.symSize = GetPointerSize(); + } else { + if (argInfo.symSize > k4ByteSize) { + argInfo.symSize = k8ByteSize; + } else if ((argInfo.mirTy->GetPrimType() == PTY_agg) && (argInfo.symSize <= k4ByteSize)) { + argInfo.symSize = k4ByteSize; + } + } + + if (GetVecLanes(argInfo.mirTy->GetPrimType()) > 0) { + /* vector type */ + CHECK_FATAL(false, "NIY"); + } + + argInfo.regType = (argInfo.reg < V0) ? kRegTyInt : kRegTyFloat; + argInfo.sym = x64CGFunc->GetFunction().GetFormal(argIndex); + CHECK_NULL_FATAL(argInfo.sym); + argInfo.symLoc = + static_cast(x64CGFunc->GetMemlayout()->GetSymAllocInfo(argInfo.sym->GetStIndex())); + CHECK_NULL_FATAL(argInfo.symLoc); + return argInfo; +} + +void X64MoveRegArgs::GenerateMovInsn(ArgInfo &argInfo, X64reg reg2) +{ + /* reg2 is required when the struct size is between 8-16 bytes */ + X64CGFunc *x64CGFunc = static_cast(cgFunc); + int32 stOffset = x64CGFunc->GetBaseOffset(*argInfo.symLoc); + RegOperand *baseOpnd = static_cast(x64CGFunc->GetBaseReg(*argInfo.symLoc)); + uint32 opndSize = argInfo.symSize * kBitsPerByte; + RegOperand ®Opnd = x64CGFunc->GetOpndBuilder()->CreatePReg(argInfo.reg, opndSize, argInfo.regType); + MemOperand *memOpnd = &x64CGFunc->GetOpndBuilder()->CreateMem(*baseOpnd, stOffset, opndSize); + + MOperator mOp = x64::MOP_begin; + if (opndSize == k64BitSize) { + mOp = argInfo.regType == kRegTyInt ? x64::MOP_movq_r_m : x64::MOP_movfd_r_m; + } else if (opndSize == k32BitSize) { + mOp = argInfo.regType == kRegTyInt ? x64::MOP_movl_r_m : x64::MOP_movfs_r_m; + } else if (opndSize == k16BitSize) { + mOp = argInfo.regType == kRegTyInt ? x64::MOP_movw_r_m : x64::MOP_begin; + } else if (opndSize == k8BitSize) { + mOp = argInfo.regType == kRegTyInt ? x64::MOP_movb_r_m : x64::MOP_begin; + } else { + CHECK_FATAL(false, "NIY"); + } + CHECK_FATAL(mOp != x64::MOP_begin, "NIY"); + Insn &insn = x64CGFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(regOpnd).AddOpndChain(*memOpnd); + x64CGFunc->GetCurBB()->AppendInsn(insn); + if (reg2 != kRinvalid) { + RegOperand ®Opnd2 = x64CGFunc->GetOpndBuilder()->CreatePReg(reg2, opndSize, argInfo.regType); + MemOperand *memOpnd2 = &x64CGFunc->GetOpndBuilder()->CreateMem(*baseOpnd, stOffset + 8, opndSize); + Insn &insn2 = x64CGFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn2.AddOpndChain(regOpnd2).AddOpndChain(*memOpnd2); + x64CGFunc->GetCurBB()->AppendInsn(insn2); + } +} + +void X64MoveRegArgs::MoveRegisterArgs() +{ + X64CGFunc *x64CGFunc = static_cast(cgFunc); + BB *formerCurBB = x64CGFunc->GetCurBB(); + x64CGFunc->GetDummyBB()->ClearInsns(); + x64CGFunc->SetCurBB(*x64CGFunc->GetDummyBB()); + + /* <[0], maplebe::R0>; <[1], maplebe::V0> */ + std::map movePara; + /* [0], [1] */ + std::vector moveParaIndex; + std::map pairReg; + std::vector numFpRegs; + std::vector fpSize; + CollectRegisterArgs(movePara, moveParaIndex, pairReg, numFpRegs, fpSize); + + for (auto indexItem = moveParaIndex.begin(); indexItem != moveParaIndex.end(); ++indexItem) { + uint32 index = *indexItem; + ArgInfo argInfo = GetArgInfo(movePara, index, numFpRegs, fpSize); + GenerateMovInsn(argInfo, pairReg[index]); + } + + x64CGFunc->GetFirstBB()->InsertAtBeginning(*x64CGFunc->GetDummyBB()); + x64CGFunc->SetCurBB(*formerCurBB); +} + +void X64MoveRegArgs::LoadStackArgsToVReg(MIRSymbol &mirSym) +{ + DEBUG_ASSERT(mirSym.GetStorageClass() == kScFormal, "NIY, vreg parameters should be kScFormal type."); + X64CGFunc *x64CGFunc = static_cast(cgFunc); + PrimType stype = mirSym.GetType()->GetPrimType(); + uint32 opndSize = GetPrimTypeBitSize(stype); + RegType regType = cgFunc->GetRegTyFromPrimTy(stype); + auto symLoc = static_cast(x64CGFunc->GetMemlayout()->GetSymAllocInfo(mirSym.GetStIndex())); + int32 stOffset = x64CGFunc->GetBaseOffset(*symLoc); + RegOperand *baseOpnd = static_cast(x64CGFunc->GetBaseReg(*symLoc)); + MemOperand &memOpnd = x64CGFunc->GetOpndBuilder()->CreateMem(*baseOpnd, stOffset, opndSize); + PregIdx pregIdx = x64CGFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno(mirSym.GetPreg()->GetPregNo()); + RegOperand &dstRegOpnd = x64CGFunc->GetOpndBuilder()->CreateVReg( + x64CGFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), opndSize, regType); + + MOperator mOp; + if (opndSize == k64BitSize) { + mOp = regType == kRegTyInt ? x64::MOP_movq_m_r : x64::MOP_movfd_m_r; + } else if (opndSize == k32BitSize) { + mOp = regType == kRegTyInt ? x64::MOP_movl_m_r : x64::MOP_movfs_m_r; + } else if (opndSize == k16BitSize) { + mOp = regType == kRegTyInt ? x64::MOP_movw_m_r : x64::MOP_begin; + } else if (opndSize == k8BitSize) { + mOp = regType == kRegTyInt ? x64::MOP_movb_m_r : x64::MOP_begin; + } else { + CHECK_FATAL(false, "NIY"); + } + CHECK_FATAL(mOp != x64::MOP_begin, "should not happen"); + Insn &insn = x64CGFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(memOpnd).AddOpndChain(dstRegOpnd); + if (x64CGFunc->GetCG()->GenerateVerboseCG()) { + std::string key = "param: %%"; + key += std::to_string(mirSym.GetPreg()->GetPregNo()); + insn.SetComment(key); + } + x64CGFunc->GetCurBB()->InsertInsnBegin(insn); +} + +void X64MoveRegArgs::MoveArgsToVReg(const CCLocInfo &ploc, MIRSymbol &mirSym) +{ + DEBUG_ASSERT(mirSym.GetStorageClass() == kScFormal, "NIY, vreg parameters should be kScFormal type."); + X64CGFunc *x64CGFunc = static_cast(cgFunc); + RegType regType = (ploc.reg0 < V0) ? kRegTyInt : kRegTyFloat; + PrimType stype = mirSym.GetType()->GetPrimType(); + uint32 byteSize = GetPrimTypeSize(stype); + uint32 srcBitSize = ((byteSize < k4ByteSize) ? k4ByteSize : byteSize) * kBitsPerByte; + PregIdx pregIdx = x64CGFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno(mirSym.GetPreg()->GetPregNo()); + RegOperand &dstRegOpnd = x64CGFunc->GetOpndBuilder()->CreateVReg( + x64CGFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), srcBitSize, regType); + RegOperand &srcRegOpnd = x64CGFunc->GetOpndBuilder()->CreateVReg(ploc.reg0, srcBitSize, regType); + + MOperator mOp; + if (srcBitSize == k64BitSize) { + mOp = (regType == kRegTyInt) ? x64::MOP_movq_r_r : x64::MOP_movfd_r_r; + } else if (srcBitSize == k32BitSize) { + mOp = (regType == kRegTyInt) ? x64::MOP_movl_r_r : x64::MOP_movfs_r_r; + } else if (srcBitSize == k16BitSize) { + mOp = (regType == kRegTyInt) ? x64::MOP_movw_r_r : x64::MOP_begin; + } else if (srcBitSize == k8BitSize) { + mOp = (regType == kRegTyInt) ? x64::MOP_movb_r_r : x64::MOP_begin; + } else { + CHECK_FATAL(false, "NIY"); + } + CHECK_FATAL(mOp != x64::MOP_begin, "should not happen"); + Insn &insn = x64CGFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(srcRegOpnd).AddOpndChain(dstRegOpnd); + if (x64CGFunc->GetCG()->GenerateVerboseCG()) { + std::string key = "param: %%"; + key += std::to_string(mirSym.GetPreg()->GetPregNo()); + insn.SetComment(key); + } + x64CGFunc->GetCurBB()->InsertInsnBegin(insn); +} + +void X64MoveRegArgs::MoveVRegisterArgs() +{ + X64CGFunc *x64CGFunc = static_cast(cgFunc); + BB *formerCurBB = x64CGFunc->GetCurBB(); + x64CGFunc->GetDummyBB()->ClearInsns(); + x64CGFunc->SetCurBB(*x64CGFunc->GetDummyBB()); + X64CallConvImpl parmlocator(x64CGFunc->GetBecommon()); + CCLocInfo ploc; + + uint32 formalCount = static_cast(x64CGFunc->GetFunction().GetFormalCount()); + uint32 start = 0; + if (formalCount) { + MIRFunction *func = const_cast(x64CGFunc->GetBecommon().GetMIRModule().CurFunction()); + if (x64CGFunc->GetBecommon().HasFuncReturnType(*func)) { + TyIdx idx = x64CGFunc->GetBecommon().GetFuncReturnType(*func); + if (x64CGFunc->GetBecommon().GetTypeSize(idx) <= k16BitSize) { + start = 1; + } + } + } + for (uint32 i = start; i < formalCount; ++i) { + MIRType *ty = x64CGFunc->GetFunction().GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, &x64CGFunc->GetFunction()); + MIRSymbol *sym = x64CGFunc->GetFunction().GetFormal(i); + + /* load locarefvar formals to store in the reflocals. */ + if (x64CGFunc->GetFunction().GetNthParamAttr(i).GetAttr(ATTR_localrefvar) && ploc.reg0 == kRinvalid) { + CHECK_FATAL(false, "NIY"); + } + + if (!sym->IsPreg()) { + continue; + } + + if (ploc.reg0 == kRinvalid) { + /* load stack parameters to the vreg. */ + LoadStackArgsToVReg(*sym); + } else { + MoveArgsToVReg(ploc, *sym); + } + } + + x64CGFunc->GetFirstBB()->InsertAtBeginning(*x64CGFunc->GetDummyBB()); + x64CGFunc->SetCurBB(*formerCurBB); +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_call_conv.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_call_conv.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fff356a6b9f33157778ecddd63945afbdbbdb486 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_call_conv.cpp @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_cgfunc.h" +#include "becommon.h" +#include "abi.h" +#include "x64_call_conv.h" +namespace maplebe { +using namespace maple; +using namespace x64; + +int32 CCallConventionInfo::ClassifyAggregate(MIRType &mirType, uint64 sizeOfTy, + std::vector &classes) const +{ + /* + * 1. If the size of an object is larger than four eightbytes, or it contains unaligned + * fields, it has class MEMORY; + * 2. for the processors that do not support the __m256 type, if the size of an object + * is larger than two eightbytes and the first eightbyte is not SSE or any other eightbyte + * is not SSEUP, it still has class MEMORY. + * This in turn ensures that for rocessors that do support the __m256 type, if the size of + * an object is four eightbytes and the first eightbyte is SSE and all other eightbytes are + * SSEUP, it can be passed in a register. + *(Currently, assume that m256 is not supported) + */ + if (sizeOfTy > k2EightBytesSize) { + classes.push_back(kMemoryClass); + } else if (sizeOfTy > k1EightBytesSize) { + classes.push_back(kIntegerClass); + classes.push_back(kIntegerClass); + } else { + classes.push_back(kIntegerClass); + } + return static_cast(sizeOfTy); +} + +int32 CCallConventionInfo::Classification(const BECommon &be, MIRType &mirType, + std::vector &classes) const +{ + switch (mirType.GetPrimType()) { + /* + * Arguments of types void, (signed and unsigned) _Bool, char, short, int, + * long, long long, and pointers are in the INTEGER class. + */ + case PTY_void: + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + case PTY_a64: + case PTY_ptr: + case PTY_ref: + case PTY_u64: + case PTY_i64: + classes.push_back(kIntegerClass); + return k8ByteSize; + /* + * Arguments of type __int128 offer the same operations as INTEGERs, + * yet they do not fit into one general purpose register but require + * two registers. + */ + case PTY_i128: + case PTY_u128: + classes.push_back(kIntegerClass); + classes.push_back(kIntegerClass); + return k16ByteSize; + case PTY_f32: + case PTY_f64: + classes.push_back(kFloatClass); + return k8ByteSize; + case PTY_agg: { + /* + * The size of each argument gets rounded up to eightbytes, + * Therefore the stack will always be eightbyte aligned. + */ + uint64 sizeOfTy = RoundUp(be.GetTypeSize(mirType.GetTypeIndex()), k8ByteSize); + if (sizeOfTy == 0) { + return 0; + } + /* If the size of an object is larger than four eightbytes, it has class MEMORY */ + if ((sizeOfTy > k4EightBytesSize)) { + classes.push_back(kMemoryClass); + return static_cast(sizeOfTy); + } + return ClassifyAggregate(mirType, sizeOfTy, classes); + } + default: + CHECK_FATAL(false, "NYI"); + } + return 0; +} + +int32 WebKitJSCallConventionInfo::Classification(const BECommon &be, MIRType &mirType, + std::vector &classes) const +{ + switch (mirType.GetPrimType()) { + /* + * Arguments of types void, (signed and unsigned) _Bool, char, short, int, + * long, long long, and pointers are in the INTEGER class. + */ + case PTY_void: + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + classes.push_back(kIntegerClass); + return k4ByteSize; + case PTY_a64: + case PTY_ptr: + case PTY_ref: + case PTY_u64: + case PTY_i64: + classes.push_back(kIntegerClass); + return k8ByteSize; + default: + CHECK_FATAL(false, "NYI"); + } + return 0; +} + +int32 GHCCallConventionInfo::Classification(const BECommon &be, MIRType &mirType, + std::vector &classes) const +{ + switch (mirType.GetPrimType()) { + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + case PTY_a64: + case PTY_ptr: + case PTY_ref: + case PTY_u64: + case PTY_i64: + classes.push_back(kIntegerClass); + return k8ByteSize; + default: + CHECK_FATAL(false, "NYI"); + } + // TODO: + return 0; +} + +void X64CallConvImpl::InitCCLocInfo(CCLocInfo &pLoc) const +{ + pLoc.reg0 = kRinvalid; + pLoc.reg1 = kRinvalid; + pLoc.reg2 = kRinvalid; + pLoc.reg3 = kRinvalid; + pLoc.memOffset = nextStackArgAdress; + pLoc.fpSize = 0; + pLoc.numFpPureRegs = 0; +} + +int32 X64CallConvImpl::LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst, MIRFunction *tFunc) +{ + InitCCLocInfo(pLoc); + std::vector classes {}; + int32 alignedTySize = GetCallConvInfo().Classification(beCommon, mirType, classes); + if (alignedTySize == 0) { + return 0; + } + pLoc.memSize = alignedTySize; + ++paramNum; + if (classes[0] == kIntegerClass) { + if ((alignedTySize == k4ByteSize) || (alignedTySize == k8ByteSize)) { + pLoc.reg0 = AllocateGPParmRegister(); + DEBUG_ASSERT(nextGeneralParmRegNO <= GetCallConvInfo().GetIntParamRegsNum(), "RegNo should be pramRegNO"); + } else if (alignedTySize == k16ByteSize) { + AllocateTwoGPParmRegisters(pLoc); + DEBUG_ASSERT(nextGeneralParmRegNO <= GetCallConvInfo().GetIntParamRegsNum(), "RegNo should be pramRegNO"); + } + } else if (classes[0] == kFloatClass) { + if (alignedTySize == k8ByteSize) { + pLoc.reg0 = AllocateSIMDFPRegister(); + DEBUG_ASSERT(nextGeneralParmRegNO <= kNumFloatParmRegs, "RegNo should be pramRegNO"); + } else { + CHECK_FATAL(false, "niy"); + } + } + if (pLoc.reg0 == kRinvalid || classes[0] == kMemoryClass) { + /* being passed in memory */ + nextStackArgAdress = pLoc.memOffset + alignedTySize; + } + return 0; +} + +int32 X64CallConvImpl::LocateRetVal(MIRType &retType, CCLocInfo &pLoc) +{ + InitCCLocInfo(pLoc); + std::vector classes {}; /* Max of four Regs. */ + int32 alignedTySize = GetCallConvInfo().Classification(beCommon, retType, classes); + if (alignedTySize == 0) { + return 0; /* size 0 ret val */ + } + if (classes[0] == kIntegerClass) { + /* If the class is INTEGER, the next available register of the sequence %rax, */ + /* %rdx is used. */ + CHECK_FATAL(alignedTySize <= k16ByteSize, "LocateRetVal: illegal number of regs"); + if ((alignedTySize == k4ByteSize) || (alignedTySize == k8ByteSize)) { + pLoc.regCount = kOneRegister; + pLoc.reg0 = AllocateGPReturnRegister(); + DEBUG_ASSERT(nextGeneralReturnRegNO <= GetCallConvInfo().GetIntReturnRegsNum(), + "RegNo should be pramRegNO"); + } else if (alignedTySize == k16ByteSize) { + pLoc.regCount = kTwoRegister; + AllocateTwoGPReturnRegisters(pLoc); + DEBUG_ASSERT(nextGeneralReturnRegNO <= GetCallConvInfo().GetIntReturnRegsNum(), + "RegNo should be pramRegNO"); + } + if (nextGeneralReturnRegNO == kOneRegister) { + pLoc.primTypeOfReg0 = retType.GetPrimType() == PTY_agg ? PTY_u64 : retType.GetPrimType(); + } else if (nextGeneralReturnRegNO == kTwoRegister) { + pLoc.primTypeOfReg0 = retType.GetPrimType() == PTY_agg ? PTY_u64 : retType.GetPrimType(); + pLoc.primTypeOfReg1 = retType.GetPrimType() == PTY_agg ? PTY_u64 : retType.GetPrimType(); + } + return 0; + } else if (classes[0] == kFloatClass) { + /* If the class is SSE, the next available vector register of the sequence %xmm0, */ + /* %xmm1 is used. */ + CHECK_FATAL(alignedTySize <= k16ByteSize, "LocateRetVal: illegal number of regs"); + if (alignedTySize == k8ByteSize) { + pLoc.regCount = 1; + pLoc.reg0 = AllocateSIMDFPReturnRegister(); + DEBUG_ASSERT(nextFloatRetRegNO <= kNumFloatReturnRegs, "RegNo should be pramRegNO"); + } else if (alignedTySize == k16ByteSize) { + CHECK_FATAL(false, "niy"); + } + if (nextFloatRetRegNO == kOneRegister) { + pLoc.primTypeOfReg0 = retType.GetPrimType() == PTY_agg ? PTY_f64 : retType.GetPrimType(); + } else if (nextFloatRetRegNO == kTwoRegister) { + CHECK_FATAL(false, "niy"); + } + return 0; + } + if (pLoc.reg0 == kRinvalid || classes[0] == kMemoryClass) { + /* + * the caller provides space for the return value and passes + * the address of this storage in %rdi as if it were the first + * argument to the function. In effect, this address becomes a + * “hidden” first argument. + * On return %rax will contain the address that has been passed + * in by the caller in %rdi. + * Currently, this scenario is not fully supported. + */ + pLoc.reg0 = AllocateGPReturnRegister(); + return 0; + } + CHECK_FATAL(false, "NYI"); + return 0; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_cfgo.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_cfgo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..80e8326eb801b4e1835efc699c5662779e0531bc --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_cfgo.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_cfgo.h" +#include "x64_isa.h" + +namespace maplebe { +/* Initialize cfg optimization patterns */ +void X64CFGOptimizer::InitOptimizePatterns() +{ + /* disable the pass that conflicts with cfi */ + if (!cgFunc->GenCfi()) { + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + } + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); +} + +uint32 X64FlipBRPattern::GetJumpTargetIdx(const Insn &insn) +{ + return x64::GetJumpTargetIdx(insn); +} +MOperator X64FlipBRPattern::FlipConditionOp(MOperator flippedOp) +{ + return x64::FlipConditionOp(flippedOp); +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_cg.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_cg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d73eca78e82ea7ceb334e4b5af306bb2b3c7800f --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_cg.cpp @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_cg.h" +#include "x64_cgfunc.h" +#include "x64_isa.h" +namespace maplebe { + +using namespace x64; + +#define DEFINE_MOP(...) {__VA_ARGS__}, +const InsnDesc X64CG::kMd[kMopLast] = { +#include "abstract_mmir.def" +#include "x64_md.def" +}; +#undef DEFINE_MOP + +void X64CG::EnrollTargetPhases(maple::MaplePhaseManager *pm) const { +#include "x64_phases.def" +} + +CGFunc *X64CG::CreateCGFunc(MIRModule &mod, MIRFunction &mirFunc, BECommon &bec, MemPool &memPool, + StackMemPool &stackMp, MapleAllocator &mallocator, uint32 funcId) +{ + return memPool.New(mod, *this, mirFunc, bec, memPool, stackMp, mallocator, funcId); +} + +bool X64CG::IsEffectiveCopy(Insn &insn) const +{ + return false; +} +bool X64CG::IsTargetInsn(MOperator mOp) const +{ + return (mOp >= MOP_movb_r_r && mOp <= MOP_pseudo_ret_int); +} +bool X64CG::IsClinitInsn(MOperator mOp) const +{ + return false; +} +bool X64CG::IsPseudoInsn(MOperator mOp) const +{ + return false; +} + +Insn &X64CG::BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) +{ + CHECK_FATAL(false, "NIY"); + Insn *a = nullptr; + return *a; +} + +PhiOperand &X64CG::CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) +{ + CHECK_FATAL(false, "NIY"); + PhiOperand *a = nullptr; + return *a; +} + +void X64CG::DumpTargetOperand(Operand &opnd, const OpndDesc &opndDesc) const +{ + X64OpndDumpVisitor visitor(opndDesc); + opnd.Accept(visitor); +} + +bool X64CG::IsExclusiveFunc(MIRFunction &mirFunc) +{ + return false; +} + +/* NOTE: Consider making be_common a field of CG. */ +void X64CG::GenerateObjectMaps(BECommon &beCommon) {} + +/* Used for GCTIB pattern merging */ +std::string X64CG::FindGCTIBPatternName(const std::string &name) const +{ + return ""; +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_cgfunc.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_cgfunc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7d4de6a06d11fd3c3b5eeb3d9cc2b672d001776f --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_cgfunc.cpp @@ -0,0 +1,1118 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "x64_cgfunc.h" +#include "x64_memlayout.h" +#include "x64_isa.h" +#include "assembler/operand.h" + +namespace maplebe { +/* null implementation yet */ +void X64CGFunc::GenSaveMethodInfoCode(BB &bb) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::GenerateCleanupCode(BB &bb) +{ + CHECK_FATAL(false, "NIY"); +} +bool X64CGFunc::NeedCleanup() +{ + CHECK_FATAL(false, "NIY"); + return false; +} +void X64CGFunc::GenerateCleanupCodeForExtEpilog(BB &bb) +{ + CHECK_FATAL(false, "NIY"); +} +uint32 X64CGFunc::FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) +{ + CHECK_FATAL(false, "NIY"); + return 0; +} +void X64CGFunc::AssignLmbcFormalParams() +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::LmbcGenSaveSpForAlloca() +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::MergeReturn() +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::DetermineReturnTypeofCall() +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::HandleRetCleanup(NaryStmtNode &retNode) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectDassign(DassignNode &stmt, Operand &opnd0) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectRegassign(RegassignNode &stmt, Operand &opnd0) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAbort() +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAssertNull(UnaryStmtNode &stmt) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAsm(AsmNode &node) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAggDassign(DassignNode &stmt) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIassign(IassignNode &stmt) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIassignoff(IassignoffNode &stmt) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &lhsAddrOpnd) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectReturnSendOfStructInRegs(BaseNode *x) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectReturn(Operand *opnd) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIgoto(Operand *opnd0) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &opnd0) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &opnd0) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectGoto(GotoNode &stmt) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectCall(CallNode &callNode) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIcall(IcallNode &icallNode, Operand &fptrOpnd) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) +{ + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrinopNode, std::string name) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCclz(IntrinsicopNode &intrinopNode) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCctz(IntrinsicopNode &intrinopNode) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCpopcount(IntrinsicopNode &intrinopNode) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCparity(IntrinsicopNode &intrinopNode) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCclrsb(IntrinsicopNode &intrinopNode) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCisaligned(IntrinsicopNode &intrinopNode) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCalignup(IntrinsicopNode &intrinopNode) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCaligndown(IntrinsicopNode &intrinopNode) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinopNode) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncValCmpSwap(IntrinsicopNode &intrinopNode) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, PrimType pty) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncFetch(IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncSynchronize(IntrinsicopNode &intrinsicopNode) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCAtomicExchangeN(IntrinsicopNode &intrinsicopNode) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCReturnAddress(IntrinsicopNode &intrinopNode) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectMembar(StmtNode &membar) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectComment(CommentNode &comment) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::HandleCatch() +{ + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectDread(const BaseNode &parent, AddrofNode &expr) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectRegread(RegreadNode &expr) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} + +Operand &X64CGFunc::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand &X64CGFunc::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand *X64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset, + PrimType finalBitFieldDestType) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectIntConst(const MIRIntConst &intConst) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectStrConst(MIRStrConst &strConst) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectStr16Const(MIRStr16Const &strConst) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectMadd(Operand &resOpnd, Operand &opndM0, Operand &opndM1, Operand &opnd1, PrimType primType) +{ + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectMadd(BinaryNode &node, Operand &opndM0, Operand &opndM1, Operand &opnd1, + const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand &X64CGFunc::SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand *X64CGFunc::SelectShift(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectDiv(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectBand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectLand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectLor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent, bool parentIsBr) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectBior(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectBxor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) +{ + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectAbs(UnaryNode &node, Operand &opnd0) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectExtractbits(ExtractbitsNode &node, Operand &opnd0, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectLnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRecip(UnaryNode &node, Operand &opnd0, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectSqrt(UnaryNode &node, Operand &opnd0, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCeil(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectFloor(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRetype(TypeCvtNode &node, Operand &opnd0) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRound(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectSelect(TernaryNode &node, Operand &cond, Operand &opnd0, Operand &opnd1, + const BaseNode &parent, bool hasCompare) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectMalloc(UnaryNode &call, Operand &opnd0) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand &X64CGFunc::SelectCopy(Operand &src, PrimType srcType, PrimType dstType) +{ + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +Operand *X64CGFunc::SelectAlloca(UnaryNode &call, Operand &opnd0) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectGCMalloc(GCMallocNode &call) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectJarrayMalloc(JarrayMallocNode &call, Operand &opnd0) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &opnd0) +{ + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectLazyLoad(Operand &opnd0, PrimType primType) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectLazyLoadStatic(MIRSymbol &st, int64 offset, PrimType primType) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimType primType) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::GenerateYieldpoint(BB &bb) +{ + CHECK_FATAL(false, "NIY"); +} +Operand &X64CGFunc::ProcessReturnReg(PrimType primType, int32 sReg) +{ + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand &X64CGFunc::GetOrCreateRflag() +{ + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +const Operand *X64CGFunc::GetRflag() const +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +const Operand *X64CGFunc::GetFloatRflag() const +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +const LabelOperand *X64CGFunc::GetLabelOperand(LabelIdx labIdx) const +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +LabelOperand &X64CGFunc::GetOrCreateLabelOperand(LabelIdx labIdx) +{ + std::string lableName = ".L." + std::to_string(GetUniqueID()) + "__" + std::to_string(labIdx); + return GetOpndBuilder()->CreateLabel(lableName.c_str(), labIdx); +} +LabelOperand &X64CGFunc::GetOrCreateLabelOperand(BB &bb) +{ + CHECK_FATAL(false, "NIY"); + LabelOperand *a; + return *a; +} +RegOperand &X64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO) +{ + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +RegOperand &X64CGFunc::GetOrCreateVirtualRegisterOperand(regno_t vRegNO) +{ + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +RegOperand &X64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) +{ + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +RegOperand &X64CGFunc::GetOrCreateFramePointerRegOperand() +{ + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +RegOperand &X64CGFunc::GetOrCreateStackBaseRegOperand() +{ + return GetOpndBuilder()->CreatePReg(x64::RBP, GetPointerSize() * kBitsPerByte, kRegTyInt); +} +RegOperand &X64CGFunc::GetZeroOpnd(uint32 size) +{ + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +Operand &X64CGFunc::CreateCfiRegOperand(uint32 reg, uint32 size) +{ + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand &X64CGFunc::GetTargetRetOperand(PrimType primType, int32 sReg) +{ + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand &X64CGFunc::CreateImmOperand(PrimType primType, int64 val) +{ + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +void X64CGFunc::ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t regno) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::CleanupDeadMov(bool dump) +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::GetRealCallerSaveRegs(const Insn &insn, std::set &realCallerSave) +{ + CHECK_FATAL(false, "NIY"); +} +bool X64CGFunc::IsFrameReg(const RegOperand &opnd) const +{ + CHECK_FATAL(false, "NIY"); + return false; +} +RegOperand *X64CGFunc::SelectVectorAddLong(PrimType rTy, Operand *o1, Operand *o2, PrimType oty, bool isLow) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorAddWiden(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, bool isLow) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorAbs(PrimType rType, Operand *o1) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorBinOp(PrimType rType, Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, + Opcode opc) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorBitwiseOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, + Opcode opc) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorCompareZero(Operand *o1, PrimType oty1, Operand *o2, Opcode opc) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorCompare(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorFromScalar(PrimType pType, Operand *opnd, PrimType sType) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorDup(PrimType rType, Operand *src, bool getLow) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorGetElement(PrimType rType, Operand *src, PrimType sType, int32 lane) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorAbsSubL(PrimType rType, Operand *o1, Operand *o2, PrimType oTy, bool isLow) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorMadd(Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, Operand *o3, + PrimType oTyp3) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorMerge(PrimType rTyp, Operand *o1, Operand *o2, int32 iNum) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorMull(PrimType rType, Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, + bool isLow) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorNarrow(PrimType rType, Operand *o1, PrimType otyp) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorNarrow2(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorNeg(PrimType rType, Operand *o1) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorNot(PrimType rType, Operand *o1) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorPairwiseAdalp(Operand *src1, PrimType sty1, Operand *src2, PrimType sty2) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorPairwiseAdd(PrimType rType, Operand *src, PrimType sType) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorReverse(PrimType rtype, Operand *src, PrimType stype, uint32 size) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorSetElement(Operand *eOp, PrimType eTyp, Operand *vOpd, PrimType vTyp, int32 lane) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorShift(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, + Opcode opc) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorShiftImm(PrimType rType, Operand *o1, Operand *imm, int32 sVal, Opcode opc) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorShiftRNarrow(PrimType rType, Operand *o1, PrimType oType, Operand *o2, bool isLow) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorSubWiden(PrimType resType, Operand *o1, PrimType otyp1, Operand *o2, PrimType otyp2, + bool isLow, bool isWide) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorSum(PrimType rtype, Operand *o1, PrimType oType) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorTableLookup(PrimType rType, Operand *o1, Operand *o2) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectIntrinsicOpWithNParams(IntrinsicopNode &intrinopNode, PrimType retType, + const std::string &name) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::ProcessLazyBinding() +{ + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::DBGFixCallFrameLocationOffsets() +{ + CHECK_FATAL(false, "NIY"); +} +MemOperand *X64CGFunc::GetPseudoRegisterSpillMemoryOperand(PregIdx idx) +{ + CHECK_FATAL(false, "NIY"); + return nullptr; +} +int32 X64CGFunc::GetBaseOffset(const SymbolAlloc &symbolAlloc) +{ + const auto *symAlloc = static_cast(&symbolAlloc); + /* Call Frame layout of X64 + * Refer to layout in x64_memlayout.h. + * Do Not change this unless you know what you do + * memlayout like this + * rbp position + * prologue slots -- + * ArgsReg | + * Locals | -- FrameSize + * Spill | + * ArgsStk -- + */ + constexpr const int32 sizeofFplr = 2 * kIntregBytelen; + // baseOffset is the offset of this symbol based on the rbp position. + int32 baseOffset = symAlloc->GetOffset(); + MemSegmentKind sgKind = symAlloc->GetMemSegment()->GetMemSegmentKind(); + auto *memLayout = static_cast(this->GetMemlayout()); + if (sgKind == kMsSpillReg) { + /* spill = -(Locals + ArgsReg + baseOffset + ReseverdSlot + kSizeOfPtr) */ + return -(memLayout->GetSizeOfLocals() + memLayout->SizeOfArgsRegisterPassed() + baseOffset + + GetFunction().GetFrameReseverdSlot() + GetPointerSize()); + } else if (sgKind == kMsLocals) { + /* Locals = baseOffset - (ReseverdSlot + Locals + ArgsReg) */ + return baseOffset - (GetFunction().GetFrameReseverdSlot() + memLayout->GetSizeOfLocals() + + memLayout->SizeOfArgsRegisterPassed()); + } else if (sgKind == kMsArgsRegPassed) { + /* ArgsReg = baseOffset - ReseverdSlot - ArgsReg */ + return baseOffset - GetFunction().GetFrameReseverdSlot() - memLayout->SizeOfArgsRegisterPassed(); + } else if (sgKind == kMsArgsStkPassed) { + return baseOffset + sizeofFplr; + } else { + CHECK_FATAL(false, "sgKind check"); + } + return 0; +} + +RegOperand *X64CGFunc::GetBaseReg(const maplebe::SymbolAlloc &symAlloc) +{ + MemSegmentKind sgKind = symAlloc.GetMemSegment()->GetMemSegmentKind(); + DEBUG_ASSERT(((sgKind == kMsArgsRegPassed) || (sgKind == kMsLocals) || (sgKind == kMsRefLocals) || + (sgKind == kMsArgsToStkPass) || (sgKind == kMsArgsStkPassed)), + "NIY"); + if (sgKind == kMsLocals || sgKind == kMsArgsRegPassed || sgKind == kMsArgsStkPassed) { + return &GetOpndBuilder()->CreatePReg(x64::RBP, GetPointerSize() * kBitsPerByte, kRegTyInt); + } else { + CHECK_FATAL(false, "NIY sgKind"); + } + return nullptr; +} + +void X64CGFunc::FreeSpillRegMem(regno_t vrNum) +{ + MemOperand *memOpnd = nullptr; + + auto p = spillRegMemOperands.find(vrNum); + if (p != spillRegMemOperands.end()) { + memOpnd = p->second; + } + + if ((memOpnd == nullptr) && IsVRegNOForPseudoRegister(vrNum)) { + auto pSecond = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum)); + if (pSecond != pRegSpillMemOperands.end()) { + memOpnd = pSecond->second; + } + } + + if (memOpnd == nullptr) { + DEBUG_ASSERT(false, "free spillreg have no mem"); + return; + } + + uint32 size = memOpnd->GetSize(); + MapleUnorderedMap::iterator iter; + if ((iter = reuseSpillLocMem.find(size)) != reuseSpillLocMem.end()) { + iter->second->Add(*memOpnd); + } else { + reuseSpillLocMem[size] = memPool->New(*GetFuncScopeAllocator()); + reuseSpillLocMem[size]->Add(*memOpnd); + } +} + +MemOperand *X64CGFunc::GetOrCreatSpillMem(regno_t vrNum, uint32 memSize) +{ + /* NOTES: must used in RA, not used in other place. */ + if (IsVRegNOForPseudoRegister(vrNum)) { + auto p = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum)); + if (p != pRegSpillMemOperands.end()) { + return p->second; + } + } + + auto p = spillRegMemOperands.find(vrNum); + if (p == spillRegMemOperands.end()) { + uint32 memBitSize = k64BitSize; + auto it = reuseSpillLocMem.find(memBitSize); + if (it != reuseSpillLocMem.end()) { + MemOperand *memOpnd = it->second->GetOne(); + if (memOpnd != nullptr) { + spillRegMemOperands.emplace(std::pair(vrNum, memOpnd)); + return memOpnd; + } + } + + RegOperand &baseOpnd = GetOrCreateStackBaseRegOperand(); + int32 offset = GetOrCreatSpillRegLocation(vrNum, memBitSize / kBitsPerByte); + MemOperand *memOpnd = &GetOpndBuilder()->CreateMem(baseOpnd, offset, memBitSize); + spillRegMemOperands.emplace(std::pair(vrNum, memOpnd)); + return memOpnd; + } else { + return p->second; + } +} + +void X64OpndDumpVisitor::Visit(maplebe::RegOperand *v) +{ + DumpOpndPrefix(); + LogInfo::MapleLogger() << "reg "; + DumpRegInfo(*v); + DumpSize(*v); + DumpReferenceInfo(*v); + const OpndDesc *regDesc = GetOpndDesc(); + LogInfo::MapleLogger() << " ["; + if (regDesc->IsRegDef()) { + LogInfo::MapleLogger() << "DEF,"; + } + if (regDesc->IsRegUse()) { + LogInfo::MapleLogger() << "USE,"; + } + LogInfo::MapleLogger() << "]"; + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(CommentOperand *v) +{ + LogInfo::MapleLogger() << ":#" << v->GetComment(); +} + +void X64OpndDumpVisitor::Visit(maplebe::ImmOperand *v) +{ + DumpOpndPrefix(); + LogInfo::MapleLogger() << "imm "; + LogInfo::MapleLogger() << v->GetValue(); + DumpSize(*v); + DumpReferenceInfo(*v); + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(maplebe::MemOperand *v) +{ + DumpOpndPrefix(); + LogInfo::MapleLogger() << "mem "; + if (v->GetBaseRegister() != nullptr) { + DumpRegInfo(*v->GetBaseRegister()); + if (v->GetOffsetOperand() != nullptr) { + LogInfo::MapleLogger() << " + " << v->GetOffsetOperand()->GetValue(); + } + } + DumpSize(*v); + DumpReferenceInfo(*v); + DumpOpndSuffix(); +} +void X64OpndDumpVisitor::DumpRegInfo(maplebe::RegOperand &v) +{ + if (v.GetRegisterNumber() > baseVirtualRegNO) { + LogInfo::MapleLogger() << "V" << v.GetRegisterNumber(); + } else { + uint8 regType = -1; + switch (v.GetSize()) { + case k8BitSize: + /* use lower 8-bits */ + regType = X64CG::kR8LowList; + break; + case k16BitSize: + regType = X64CG::kR16List; + break; + case k32BitSize: + regType = X64CG::kR32List; + break; + case k64BitSize: + regType = X64CG::kR64List; + break; + default: + CHECK_FATAL(false, "unkown reg size"); + break; + } + assembler::Reg reg = assembler::kRegArray[regType][v.GetRegisterNumber()]; + LogInfo::MapleLogger() << "%" << assembler::kRegStrMap.at(reg); + } +} + +void X64OpndDumpVisitor::Visit(maplebe::FuncNameOperand *v) +{ + DumpOpndPrefix(); + LogInfo::MapleLogger() << "funcname "; + LogInfo::MapleLogger() << v->GetName(); + DumpSize(*v); + DumpReferenceInfo(*v); + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(maplebe::ListOperand *v) +{ + DumpOpndPrefix(); + LogInfo::MapleLogger() << "list "; + + MapleList opndList = v->GetOperands(); + for (auto it = opndList.begin(); it != opndList.end();) { + (*it)->Dump(); + LogInfo::MapleLogger() << (++it == opndList.end() ? "" : " ,"); + } + DumpSize(*v); + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(maplebe::LabelOperand *v) +{ + DumpOpndPrefix(); + LogInfo::MapleLogger() << "label "; + LogInfo::MapleLogger() << v->GetLabelIndex(); + DumpSize(*v); + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(PhiOperand *v) +{ + CHECK_FATAL(false, "NIY"); +} + +void X64OpndDumpVisitor::Visit(CondOperand *v) +{ + CHECK_FATAL(false, "do not use this operand, it will be eliminated soon"); +} +void X64OpndDumpVisitor::Visit(StImmOperand *v) +{ + CHECK_FATAL(false, "do not use this operand, it will be eliminated soon"); +} +void X64OpndDumpVisitor::Visit(BitShiftOperand *v) +{ + CHECK_FATAL(false, "do not use this operand, it will be eliminated soon"); +} +void X64OpndDumpVisitor::Visit(ExtendShiftOperand *v) +{ + CHECK_FATAL(false, "do not use this operand, it will be eliminated soon"); +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_emitter.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_emitter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1b9a21a2dca0a319679b2775fc2c3a8501b154ff --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_emitter.cpp @@ -0,0 +1,2540 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_emitter.h" +#include "x64_cgfunc.h" +#include "x64_cg.h" +#include "insn.h" + +#define __ assmbler. + +namespace { +using namespace maple; + +DBGDieAttr *LFindAttribute(MapleVector &vec, DwAt key) +{ + for (DBGDieAttr *at : vec) { + if (at->GetDwAt() == key) { + return at; + } + } + return nullptr; +} + +DBGAbbrevEntry *LFindAbbrevEntry(MapleVector &abbvec, unsigned int key) +{ + for (DBGAbbrevEntry *daie : abbvec) { + if (!daie) { + continue; + } + if (daie->GetAbbrevId() == key) { + return daie; + } + } + DEBUG_ASSERT(0, ""); + return nullptr; +} + +bool LShouldEmit(unsigned int dwform) +{ + return dwform != static_cast(DW_FORM_flag_present); +} + +DBGDie *LFindChildDieWithName(DBGDie &die, DwTag tag, const GStrIdx key) +{ + for (DBGDie *c : die.GetSubDieVec()) { + if (c->GetTag() != tag) { + continue; + } + for (DBGDieAttr *a : c->GetAttrVec()) { + if ((a->GetDwAt() == static_cast(DW_AT_name)) && + ((a->GetDwForm() == static_cast(DW_FORM_string) || + a->GetDwForm() == static_cast(DW_FORM_strp)) && + a->GetId() == key.GetIdx())) { + return c; + } + if ((a->GetDwAt() == static_cast(DW_AT_name)) && + (!((a->GetDwForm() == static_cast(DW_FORM_string) || + a->GetDwForm() == static_cast(DW_FORM_strp)) && + a->GetId() == key.GetIdx()))) { + break; + } + } + } + return nullptr; +} + +/* GetDwOpName(unsigned n) */ +#define TOSTR(s) #s +const std::string GetDwOpName(unsigned n) +{ + switch (n) { +#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) \ + case DW_OP_##NAME: \ + return TOSTR(DW_OP_##NAME) + case DW_OP_hi_user: + return "DW_OP_hi_user"; + default: + return nullptr; + } +} +} // namespace + +using namespace std; +using namespace assembler; + +namespace maplebe { +uint8 X64Emitter::GetSymbolAlign(const MIRSymbol &mirSymbol, bool isComm) +{ + uint8 alignInByte = mirSymbol.GetAttrs().GetAlignValue(); + MIRTypeKind kind = mirSymbol.GetType()->GetKind(); + if (isComm) { + MIRStorageClass storage = mirSymbol.GetStorageClass(); + if (((kind == kTypeStruct) || (kind == kTypeClass) || (kind == kTypeArray) || (kind == kTypeUnion)) && + ((storage == kScGlobal) || (storage == kScPstatic) || (storage == kScFstatic)) && + alignInByte < kSizeOfPTR) { + alignInByte = kQ; + return alignInByte; + } + } + if (alignInByte == 0) { + if (kind == kTypeStruct || kind == kTypeClass || kind == kTypeArray || kind == kTypeUnion) { + return alignInByte; + } else { + alignInByte = Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirSymbol.GetType()->GetTypeIndex()); + } + } + return alignInByte; +} + +uint64 X64Emitter::GetSymbolSize(const TyIdx typeIndex) +{ + uint64 sizeInByte = Globals::GetInstance()->GetBECommon()->GetTypeSize(typeIndex); + return sizeInByte; +} + +Reg X64Emitter::TransferReg(Operand *opnd) const +{ + RegOperand *v = static_cast(opnd); + /* check whether this reg is still virtual */ + CHECK_FATAL(v->IsPhysicalRegister(), "register is still virtual or reg num is 0"); + + uint8 regType = -1; + switch (v->GetSize()) { + case k8BitSize: + regType = v->IsHigh8Bit() ? X64CG::kR8HighList : X64CG::kR8LowList; + break; + case k16BitSize: + regType = X64CG::kR16List; + break; + case k32BitSize: + regType = X64CG::kR32List; + break; + case k64BitSize: + regType = X64CG::kR64List; + break; + case k128BitSize: + regType = X64CG::kR128List; + break; + default: + FATAL(kLncFatal, "unkown reg size"); + break; + } + Reg reg = kRegArray[regType][v->GetRegisterNumber()]; + return reg; +} + +pair X64Emitter::TransferImm(Operand *opnd) +{ + ImmOperand *v = static_cast(opnd); + if (v->GetKind() == Operand::kOpdStImmediate) { + uint32 symIdx = v->GetSymbol()->GetNameStrIdx().get(); + const string &symName = v->GetName(); + __ StoreNameIntoSymMap(symIdx, symName); + return pair(symIdx, true); + } else { + return pair(v->GetValue(), false); + } +} + +Mem X64Emitter::TransferMem(Operand *opnd, uint32 funcUniqueId) +{ + MemOperand *v = static_cast(opnd); + Mem mem; + mem.size = v->GetSize(); + if (v->GetOffsetOperand() != nullptr) { + ImmOperand *ofset = v->GetOffsetOperand(); + if (ofset->GetKind() == Operand::kOpdStImmediate) { + string symbolName = ofset->GetName(); + const MIRSymbol *symbol = ofset->GetSymbol(); + + MIRStorageClass storageClass = symbol->GetStorageClass(); + bool isLocalVar = ofset->GetSymbol()->IsLocal(); + if (storageClass == kScPstatic && isLocalVar) { + symbolName.append(to_string(funcUniqueId)); + } + + int64 symIdx; + /* 2 : if it is a bb label, the second position in symbolName is '.' */ + if (symbolName.size() > 2 && symbolName[2] == '.') { + string delimiter = "__"; + size_t pos = symbolName.find(delimiter); + uint32 itsFuncUniqueId = + pos > 3 ? stoi(symbolName.substr(3, pos)) : 0; /* 3: index starts after ".L." */ + uint32 labelIdx = stoi(symbolName.substr(pos + 2, symbolName.length())); /* 2: delimiter.length() */ + symIdx = CalculateLabelSymIdx(itsFuncUniqueId, labelIdx); + } else { + symIdx = symbol->GetNameStrIdx().get(); + } + __ StoreNameIntoSymMap(symIdx, symbolName); + mem.disp.first = symIdx; + } + if (ofset->GetValue() != 0) { + mem.disp.second = ofset->GetValue(); + } + } + if (v->GetBaseRegister() != nullptr) { + if (v->GetIndexRegister() != nullptr && v->GetBaseRegister()->GetRegisterNumber() == x64::RBP) { + mem.base = ERR; + } else { + mem.base = TransferReg(v->GetBaseRegister()); + } + } + if (v->GetIndexRegister() != nullptr) { + mem.index = TransferReg(v->GetIndexRegister()); + uint8 s = static_cast(v->GetScaleOperand()->GetValue()); + /* 1, 2, 4, 8: allowed range for s */ + CHECK_FATAL(s == 1 || s == 2 || s == 4 || s == 8, "mem.s is not 1, 2, 4, or 8"); + mem.s = s; + } + mem.SetMemType(); + return mem; +} + +int64 X64Emitter::TransferLabel(Operand *opnd, uint32 funcUniqueId) +{ + LabelOperand *v = static_cast(opnd); + int64 labelSymIdx = CalculateLabelSymIdx(funcUniqueId, v->GetLabelIndex()); + __ StoreNameIntoSymMap(labelSymIdx, v->GetParentFunc()); + return labelSymIdx; +} + +uint32 X64Emitter::TransferFuncName(Operand *opnd) +{ + FuncNameOperand *v = static_cast(opnd); + uint32 funcSymIdx = v->GetFunctionSymbol()->GetNameStrIdx().get(); + __ StoreNameIntoSymMap(funcSymIdx, v->GetName()); + return funcSymIdx; +} + +void X64Emitter::EmitInsn(Insn &insn, uint32 funcUniqueId) +{ +#if DEBUG + insn.Check(); +#endif + + MOperator mop = insn.GetMachineOpcode(); + const InsnDesc &curMd = X64CG::kMd[mop]; + uint32 opndNum = curMd.GetOpndMDLength(); /* Get operands Number */ + + /* Get operand(s) */ + Operand *opnd0 = nullptr; + Operand *opnd1 = nullptr; + if (opndNum > 0) { + opnd0 = &insn.GetOperand(0); + if (opndNum > 1) { + opnd1 = &insn.GetOperand(1); + } + } + + switch (mop) { + /* mov */ + case x64::MOP_movb_r_r: + __ Mov(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movw_r_r: + __ Mov(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movl_r_r: + __ Mov(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movq_r_r: + __ Mov(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movb_m_r: + __ Mov(kB, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movw_m_r: + __ Mov(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movl_m_r: + __ Mov(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movq_m_r: + __ Mov(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movb_i_r: + __ Mov(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movw_i_r: + __ Mov(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movl_i_r: + __ Mov(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movq_i_r: + __ Mov(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movb_i_m: + __ Mov(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_movw_i_m: + __ Mov(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_movl_i_m: + __ Mov(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_movb_r_m: + __ Mov(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_movw_r_m: + __ Mov(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_movl_r_m: + __ Mov(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_movq_r_m: + __ Mov(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* floating point mov */ + case x64::MOP_movd_fr_r: + __ Mov(TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movq_fr_r: + case x64::MOP_movq_r_fr: + __ Mov(TransferReg(opnd0), TransferReg(opnd1), false); + break; + case x64::MOP_movfs_r_r: + __ MovF(TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movfd_r_r: + __ MovF(TransferReg(opnd0), TransferReg(opnd1), false); + break; + case x64::MOP_movfs_m_r: + __ MovF(TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movfs_r_m: + __ MovF(TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_movfd_m_r: + __ MovF(TransferMem(opnd0, funcUniqueId), TransferReg(opnd1), false); + break; + case x64::MOP_movfd_r_m: + __ MovF(TransferReg(opnd0), TransferMem(opnd1, funcUniqueId), false); + break; + /* movzx */ + case x64::MOP_movzbw_r_r: + __ MovZx(kB, kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movzbl_r_r: + __ MovZx(kB, kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movzbq_r_r: + __ MovZx(kB, kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movzwl_r_r: + __ MovZx(kW, kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movzwq_r_r: + __ MovZx(kW, kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movzbw_m_r: + __ MovZx(kB, kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movzbl_m_r: + __ MovZx(kB, kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movzbq_m_r: + __ MovZx(kB, kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movzwl_m_r: + __ MovZx(kW, kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movzwq_m_r: + __ MovZx(kW, kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* movsx */ + case x64::MOP_movsbw_r_r: + __ MovSx(kB, kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movsbl_r_r: + __ MovSx(kB, kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movsbq_r_r: + __ MovSx(kB, kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movswl_r_r: + __ MovSx(kW, kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movswq_r_r: + __ MovSx(kW, kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movslq_r_r: + __ MovSx(kL, kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movsbw_m_r: + __ MovSx(kB, kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movsbl_m_r: + __ MovSx(kB, kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movsbq_m_r: + __ MovSx(kB, kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movswl_m_r: + __ MovSx(kW, kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movswq_m_r: + __ MovSx(kW, kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_movslq_m_r: + __ MovSx(kL, kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* add */ + case x64::MOP_addb_r_r: + __ Add(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addw_r_r: + __ Add(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addl_r_r: + __ Add(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addq_r_r: + __ Add(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addb_i_r: + __ Add(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addw_i_r: + __ Add(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addl_i_r: + __ Add(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addq_i_r: + __ Add(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_addb_m_r: + __ Add(kB, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_addw_m_r: + __ Add(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_addl_m_r: + __ Add(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_addq_m_r: + __ Add(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_addb_r_m: + __ Add(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_addw_r_m: + __ Add(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_addl_r_m: + __ Add(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_addq_r_m: + __ Add(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_addb_i_m: + __ Add(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_addw_i_m: + __ Add(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_addl_i_m: + __ Add(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_addq_i_m: + __ Add(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* add floating point */ + case x64::MOP_adds_r_r: + __ Add(TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_adds_m_r: + __ Add(TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_addd_r_r: + __ Add(TransferReg(opnd0), TransferReg(opnd1), false); + break; + case x64::MOP_addd_m_r: + __ Add(TransferMem(opnd0, funcUniqueId), TransferReg(opnd1), false); + break; + /* movabs */ + case x64::MOP_movabs_i_r: + __ Movabs(TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_movabs_l_r: + __ Movabs(TransferLabel(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* push */ + case x64::MOP_pushq_r: + __ Push(kQ, TransferReg(opnd0)); + break; + /* pop */ + case x64::MOP_popq_r: + __ Pop(kQ, TransferReg(opnd0)); + break; + /* lea */ + case x64::MOP_leaw_m_r: + __ Lea(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_leal_m_r: + __ Lea(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_leaq_m_r: + __ Lea(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* sub , sbb */ + case x64::MOP_subb_r_r: + __ Sub(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subw_r_r: + __ Sub(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subl_r_r: + __ Sub(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subq_r_r: + __ Sub(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subb_i_r: + __ Sub(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subw_i_r: + __ Sub(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subl_i_r: + __ Sub(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subq_i_r: + __ Sub(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subb_m_r: + __ Sub(kB, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_subw_m_r: + __ Sub(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_subl_m_r: + __ Sub(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_subq_m_r: + __ Sub(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_subb_r_m: + __ Sub(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_subw_r_m: + __ Sub(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_subl_r_m: + __ Sub(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_subq_r_m: + __ Sub(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_subb_i_m: + __ Sub(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_subw_i_m: + __ Sub(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_subl_i_m: + __ Sub(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_subq_i_m: + __ Sub(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* sub floating point */ + case x64::MOP_subs_r_r: + __ Sub(TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_subs_m_r: + __ Sub(TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_subd_r_r: + __ Sub(TransferReg(opnd0), TransferReg(opnd1), false); + break; + case x64::MOP_subd_m_r: + __ Sub(TransferMem(opnd0, funcUniqueId), TransferReg(opnd1), false); + break; + /* and */ + case x64::MOP_andb_r_r: + __ And(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andw_r_r: + __ And(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andl_r_r: + __ And(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andq_r_r: + __ And(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andb_i_r: + __ And(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andw_i_r: + __ And(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andl_i_r: + __ And(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andq_i_r: + __ And(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_andb_m_r: + __ And(kB, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_andw_m_r: + __ And(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_andl_m_r: + __ And(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_andq_m_r: + __ And(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_andb_r_m: + __ And(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_andw_r_m: + __ And(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_andl_r_m: + __ And(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_andq_r_m: + __ And(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_andb_i_m: + __ And(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_andw_i_m: + __ And(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_andl_i_m: + __ And(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_andq_i_m: + __ And(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* or */ + case x64::MOP_orb_r_r: + __ Or(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orw_r_r: + __ Or(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orl_r_r: + __ Or(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orq_r_r: + __ Or(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orb_m_r: + __ Or(kB, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_orw_m_r: + __ Or(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_orl_m_r: + __ Or(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_orq_m_r: + __ Or(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_orb_i_r: + __ Or(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orw_i_r: + __ Or(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orl_i_r: + __ Or(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orq_i_r: + __ Or(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_orb_r_m: + __ Or(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_orw_r_m: + __ Or(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_orl_r_m: + __ Or(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_orq_r_m: + __ Or(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_orb_i_m: + __ Or(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_orw_i_m: + __ Or(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_orl_i_m: + __ Or(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_orq_i_m: + __ Or(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* xor */ + case x64::MOP_xorb_r_r: + __ Xor(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorw_r_r: + __ Xor(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorl_r_r: + __ Xor(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorq_r_r: + __ Xor(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorb_i_r: + __ Xor(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorw_i_r: + __ Xor(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorl_i_r: + __ Xor(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorq_i_r: + __ Xor(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_xorb_m_r: + __ Xor(kB, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_xorw_m_r: + __ Xor(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_xorl_m_r: + __ Xor(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_xorq_m_r: + __ Xor(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_xorb_r_m: + __ Xor(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_xorw_r_m: + __ Xor(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_xorl_r_m: + __ Xor(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_xorq_r_m: + __ Xor(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_xorb_i_m: + __ Xor(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_xorw_i_m: + __ Xor(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_xorl_i_m: + __ Xor(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_xorq_i_m: + __ Xor(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* not */ + case x64::MOP_notb_r: + __ Not(kB, TransferReg(opnd0)); + break; + case x64::MOP_notw_r: + __ Not(kW, TransferReg(opnd0)); + break; + case x64::MOP_notl_r: + __ Not(kL, TransferReg(opnd0)); + break; + case x64::MOP_notq_r: + __ Not(kQ, TransferReg(opnd0)); + break; + case x64::MOP_notb_m: + __ Not(kB, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_notw_m: + __ Not(kW, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_notl_m: + __ Not(kL, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_notq_m: + __ Not(kQ, TransferMem(opnd0, funcUniqueId)); + break; + /* neg */ + case x64::MOP_negb_r: + __ Neg(kB, TransferReg(opnd0)); + break; + case x64::MOP_negw_r: + __ Neg(kW, TransferReg(opnd0)); + break; + case x64::MOP_negl_r: + __ Neg(kL, TransferReg(opnd0)); + break; + case x64::MOP_negq_r: + __ Neg(kQ, TransferReg(opnd0)); + break; + case x64::MOP_negb_m: + __ Neg(kB, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_negw_m: + __ Neg(kW, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_negl_m: + __ Neg(kL, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_negq_m: + __ Neg(kQ, TransferMem(opnd0, funcUniqueId)); + break; + /* div, cwd, cdq, cqo */ + case x64::MOP_idivw_r: + __ Idiv(kW, TransferReg(opnd0)); + break; + case x64::MOP_idivl_r: + __ Idiv(kL, TransferReg(opnd0)); + break; + case x64::MOP_idivq_r: + __ Idiv(kQ, TransferReg(opnd0)); + break; + case x64::MOP_idivw_m: + __ Idiv(kW, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_idivl_m: + __ Idiv(kL, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_idivq_m: + __ Idiv(kQ, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_divw_r: + __ Div(kW, TransferReg(opnd0)); + break; + case x64::MOP_divl_r: + __ Div(kL, TransferReg(opnd0)); + break; + case x64::MOP_divq_r: + __ Div(kQ, TransferReg(opnd0)); + break; + case x64::MOP_divw_m: + __ Div(kW, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_divl_m: + __ Div(kL, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_divq_m: + __ Div(kQ, TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_cwd: + __ Cwd(); + break; + case x64::MOP_cdq: + __ Cdq(); + break; + case x64::MOP_cqo: + __ Cqo(); + break; + /* shl */ + case x64::MOP_shlb_r_r: + __ Shl(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shlw_r_r: + __ Shl(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shll_r_r: + __ Shl(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shlq_r_r: + __ Shl(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shlb_i_r: + __ Shl(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shlw_i_r: + __ Shl(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shll_i_r: + __ Shl(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shlq_i_r: + __ Shl(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shlb_r_m: + __ Shl(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shlw_r_m: + __ Shl(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shll_r_m: + __ Shl(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shlq_r_m: + __ Shl(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shlb_i_m: + __ Shl(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shlw_i_m: + __ Shl(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shll_i_m: + __ Shl(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shlq_i_m: + __ Shl(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* sar */ + case x64::MOP_sarb_r_r: + __ Sar(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarw_r_r: + __ Sar(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarl_r_r: + __ Sar(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarq_r_r: + __ Sar(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarb_i_r: + __ Sar(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarw_i_r: + __ Sar(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarl_i_r: + __ Sar(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarq_i_r: + __ Sar(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_sarb_r_m: + __ Sar(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_sarw_r_m: + __ Sar(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_sarl_r_m: + __ Sar(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_sarq_r_m: + __ Sar(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_sarb_i_m: + __ Sar(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_sarw_i_m: + __ Sar(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_sarl_i_m: + __ Sar(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_sarq_i_m: + __ Sar(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* shr */ + case x64::MOP_shrb_r_r: + __ Shr(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrw_r_r: + __ Shr(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrl_r_r: + __ Shr(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrq_r_r: + __ Shr(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrb_i_r: + __ Shr(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrw_i_r: + __ Shr(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrl_i_r: + __ Shr(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrq_i_r: + __ Shr(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_shrb_r_m: + __ Shr(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shrw_r_m: + __ Shr(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shrl_r_m: + __ Shr(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shrq_r_m: + __ Shr(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shrb_i_m: + __ Shr(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shrw_i_m: + __ Shr(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shrl_i_m: + __ Shr(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_shrq_i_m: + __ Shr(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + /* jmp */ + case x64::MOP_jmpq_r: + __ Jmp(TransferReg(opnd0)); + break; + case x64::MOP_jmpq_m: + __ Jmp(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_jmpq_l: + __ Jmp(TransferLabel(opnd0, funcUniqueId)); + break; + /* je, jne */ + case x64::MOP_je_l: + __ Je(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_ja_l: + __ Ja(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jae_l: + __ Jae(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jne_l: + __ Jne(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jb_l: + __ Jb(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jbe_l: + __ Jbe(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jg_l: + __ Jg(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jge_l: + __ Jge(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jl_l: + __ Jl(TransferLabel(opnd0, funcUniqueId)); + break; + case x64::MOP_jle_l: + __ Jle(TransferLabel(opnd0, funcUniqueId)); + break; + /* cmp */ + case x64::MOP_cmpb_r_r: + __ Cmp(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpw_r_r: + __ Cmp(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpl_r_r: + __ Cmp(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpq_r_r: + __ Cmp(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpb_i_r: + __ Cmp(kB, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpw_i_r: + __ Cmp(kW, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpl_i_r: + __ Cmp(kL, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpq_i_r: + __ Cmp(kQ, TransferImm(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmpb_m_r: + __ Cmp(kB, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmpw_m_r: + __ Cmp(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmpl_m_r: + __ Cmp(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmpq_m_r: + __ Cmp(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmpb_r_m: + __ Cmp(kB, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_cmpw_r_m: + __ Cmp(kW, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_cmpl_r_m: + __ Cmp(kL, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_cmpq_r_m: + __ Cmp(kQ, TransferReg(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_cmpb_i_m: + __ Cmp(kB, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_cmpw_i_m: + __ Cmp(kW, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_cmpl_i_m: + __ Cmp(kL, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_cmpq_i_m: + __ Cmp(kQ, TransferImm(opnd0), TransferMem(opnd1, funcUniqueId)); + break; + case x64::MOP_testq_r_r: + __ Test(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + /* setcc */ + case x64::MOP_seta_r: + __ Seta(TransferReg(opnd0)); + break; + case x64::MOP_setae_r: + __ Setae(TransferReg(opnd0)); + break; + case x64::MOP_setb_r: + __ Setb(TransferReg(opnd0)); + break; + case x64::MOP_seto_r: + __ Seto(TransferReg(opnd0)); + break; + case x64::MOP_setbe_r: + __ Setbe(TransferReg(opnd0)); + break; + case x64::MOP_sete_r: + __ Sete(TransferReg(opnd0)); + break; + case x64::MOP_setg_r: + __ Setg(TransferReg(opnd0)); + break; + case x64::MOP_setge_r: + __ Setge(TransferReg(opnd0)); + break; + case x64::MOP_setl_r: + __ Setl(TransferReg(opnd0)); + break; + case x64::MOP_setle_r: + __ Setle(TransferReg(opnd0)); + break; + case x64::MOP_setne_r: + __ Setne(TransferReg(opnd0)); + break; + case x64::MOP_seta_m: + __ Seta(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setae_m: + __ Setae(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setb_m: + __ Setb(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_seto_m: + __ Seto(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setbe_m: + __ Setbe(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_sete_m: + __ Sete(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setl_m: + __ Setl(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setle_m: + __ Setle(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setg_m: + __ Setg(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setge_m: + __ Setge(TransferMem(opnd0, funcUniqueId)); + break; + case x64::MOP_setne_m: + __ Setne(TransferMem(opnd0, funcUniqueId)); + break; + /* cmova & cmovae */ + case x64::MOP_cmovaw_r_r: + __ Cmova(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmoval_r_r: + __ Cmova(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovaq_r_r: + __ Cmova(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovaw_m_r: + __ Cmova(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmoval_m_r: + __ Cmova(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovaq_m_r: + __ Cmova(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovaew_r_r: + __ Cmovae(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovael_r_r: + __ Cmovae(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovaeq_r_r: + __ Cmovae(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovaew_m_r: + __ Cmovae(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovael_m_r: + __ Cmovae(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovaeq_m_r: + __ Cmovae(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* cmovb & cmovbe */ + case x64::MOP_cmovbw_r_r: + __ Cmovb(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovbl_r_r: + __ Cmovb(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovbq_r_r: + __ Cmovb(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovbw_m_r: + __ Cmovb(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovbl_m_r: + __ Cmovb(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovbq_m_r: + __ Cmovb(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovbew_r_r: + __ Cmovbe(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovbel_r_r: + __ Cmovbe(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovbeq_r_r: + __ Cmovbe(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovbew_m_r: + __ Cmovbe(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovbel_m_r: + __ Cmovbe(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovbeq_m_r: + __ Cmovbe(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* cmove */ + case x64::MOP_cmovew_r_r: + __ Cmove(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovel_r_r: + __ Cmove(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmoveq_r_r: + __ Cmove(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovew_m_r: + __ Cmove(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovel_m_r: + __ Cmove(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmoveq_m_r: + __ Cmove(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* cmovg & cmovge */ + case x64::MOP_cmovgw_r_r: + __ Cmovg(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovgl_r_r: + __ Cmovg(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovgq_r_r: + __ Cmovg(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovgw_m_r: + __ Cmovg(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovgl_m_r: + __ Cmovg(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovgq_m_r: + __ Cmovg(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovgew_r_r: + __ Cmovge(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovgel_r_r: + __ Cmovge(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovgeq_r_r: + __ Cmovge(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovgew_m_r: + __ Cmovge(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovgel_m_r: + __ Cmovge(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovgeq_m_r: + __ Cmovge(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* cmovl & cmovle */ + case x64::MOP_cmovlw_r_r: + __ Cmovl(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovll_r_r: + __ Cmovl(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovlq_r_r: + __ Cmovl(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovlw_m_r: + __ Cmovl(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovll_m_r: + __ Cmovl(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovlq_m_r: + __ Cmovl(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovlew_r_r: + __ Cmovle(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovlel_r_r: + __ Cmovle(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovleq_r_r: + __ Cmovle(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovlew_m_r: + __ Cmovle(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovlel_m_r: + __ Cmovle(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovleq_m_r: + __ Cmovle(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* cmovne */ + case x64::MOP_cmovnew_r_r: + __ Cmovne(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovnel_r_r: + __ Cmovne(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovneq_r_r: + __ Cmovne(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovnew_m_r: + __ Cmovne(kW, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovnel_m_r: + __ Cmovne(kL, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovneq_m_r: + __ Cmovne(kQ, TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + case x64::MOP_cmovow_r_r: + __ Cmovo(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovol_r_r: + __ Cmovo(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cmovoq_r_r: + __ Cmovo(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + /* call */ + case x64::MOP_callq_r: { + __ Call(kQ, TransferReg(opnd0)); + if (insn.GetStackMap() != nullptr) { + auto referenceMap = insn.GetStackMap()->GetReferenceMap().SerializeInfo(); + auto deoptInfo = insn.GetStackMap()->GetDeoptInfo().SerializeInfo(); + __ RecordStackmap(referenceMap, deoptInfo); + } + break; + } + case x64::MOP_callq_l: { + __ Call(kQ, TransferFuncName(opnd0)); + if (insn.GetStackMap() != nullptr) { + auto referenceMap = insn.GetStackMap()->GetReferenceMap().SerializeInfo(); + auto deoptInfo = insn.GetStackMap()->GetDeoptInfo().SerializeInfo(); + __ RecordStackmap(referenceMap, deoptInfo); + } + break; + } + + case x64::MOP_callq_m: { + __ Call(kQ, TransferMem(opnd0, funcUniqueId)); + if (insn.GetStackMap() != nullptr) { + auto referenceMap = insn.GetStackMap()->GetReferenceMap().SerializeInfo(); + auto deoptInfo = insn.GetStackMap()->GetDeoptInfo().SerializeInfo(); + __ RecordStackmap(referenceMap, deoptInfo); + } + break; + } + + /* ret */ + case x64::MOP_retq: + __ Ret(); + break; + case x64::MOP_leaveq: + __ Leave(); + break; + /* imul */ + case x64::MOP_imulw_r_r: + __ Imul(kW, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_imull_r_r: + __ Imul(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_imulq_r_r: + __ Imul(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + /* mul float */ + case x64::MOP_mulfs_r_r: + __ Mul(TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_mulfd_r_r: + __ Mul(TransferReg(opnd0), TransferReg(opnd1), false); + break; + /* nop */ + case x64::MOP_nop: + __ Nop(); + break; + /* byte swap */ + case x64::MOP_bswapl_r: + __ Bswap(kL, TransferReg(opnd0)); + break; + case x64::MOP_bswapq_r: + __ Bswap(kQ, TransferReg(opnd0)); + break; + case x64::MOP_xchgb_r_r: + __ Xchg(kB, TransferReg(opnd0), TransferReg(opnd1)); + break; + /* pseudo instruction */ + case x64::MOP_pseudo_ret_int: + __ DealWithPseudoInst(curMd.GetName()); + break; + /* floating point and */ + case x64::MOP_andd_r_r: + __ And(TransferReg(opnd0), TransferReg(opnd1), false); + break; + case x64::MOP_ands_r_r: + __ And(TransferReg(opnd0), TransferReg(opnd1)); + break; + /* floating div */ + case x64::MOP_divsd_r: + __ Divsd(TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_divsd_m: + __ Divsd(TransferMem(opnd0, funcUniqueId), TransferReg(opnd1)); + break; + /* convert int2float */ + case x64::MOP_cvtsi2ssq_r: + __ Cvtsi2ss(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cvtsi2ssl_r: + __ Cvtsi2ss(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cvtsi2sdq_r: + __ Cvtsi2sd(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cvtsi2sdl_r: + __ Cvtsi2sd(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + /*convert float2int */ + case x64::MOP_cvttsd2siq_r: + __ Cvttsd2si(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cvttsd2sil_r: + __ Cvttsd2si(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cvttss2siq_r: + __ Cvttss2si(kQ, TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cvttss2sil_r: + __ Cvttss2si(kL, TransferReg(opnd0), TransferReg(opnd1)); + break; + /* convert float2float */ + case x64::MOP_cvtss2sd_r: + __ Cvtss2sd(TransferReg(opnd0), TransferReg(opnd1)); + break; + case x64::MOP_cvtsd2ss_r: + __ Cvtsd2ss(TransferReg(opnd0), TransferReg(opnd1)); + break; + /* unordered compare */ + case x64::MOP_ucomisd_r_r: + __ Ucomisd(TransferReg(opnd0), TransferReg(opnd1)); + break; + default: { + insn.Dump(); + LogInfo::MapleLogger() << "\n"; + FATAL(kLncFatal, "unsupported instruction"); + break; + } + } +} + +void X64Emitter::EmitFunctionHeader(CGFunc &cgFunc) +{ + const MIRSymbol *funcSymbol = cgFunc.GetFunction().GetFuncSymbol(); + uint32 symIdx = funcSymbol->GetNameStrIdx().get(); + const string &symName = funcSymbol->GetName(); + __ StoreNameIntoSymMap(symIdx, symName); + + SymbolAttr funcAttr = kSAGlobal; + if (funcSymbol->GetFunction()->GetAttr(FUNCATTR_weak)) { + funcAttr = kSAWeak; + } else if (funcSymbol->GetFunction()->GetAttr(FUNCATTR_local)) { + funcAttr = kSALocal; + } else if (!cgFunc.GetCG()->GetMIRModule()->IsCModule()) { + funcAttr = kSAHidden; + } + if (cgFunc.GetFunction().GetAttr(FUNCATTR_section)) { + const string §ionName = cgFunc.GetFunction().GetAttrs().GetPrefixSectionName(); + __ EmitFunctionHeader(symIdx, funcAttr, §ionName); + } else { + __ EmitFunctionHeader(symIdx, funcAttr, nullptr); + } +} + +void X64Emitter::EmitBBHeaderLabel(CGFunc &cgFunc, LabelIdx labIdx, uint32 freq) +{ + uint32 funcUniqueId = cgFunc.GetUniqueID(); + /* Concatenate BB Label Name and its idx */ + string bbLabel = ".L."; + bbLabel.append(to_string(funcUniqueId)); + bbLabel.append("__"); + bbLabel.append(to_string(labIdx)); + int64 labelSymIdx = CalculateLabelSymIdx(funcUniqueId, static_cast(labIdx)); + __ StoreNameIntoSymMap(labelSymIdx, bbLabel); + + if (cgFunc.GetCG()->GenerateVerboseCG()) { + const string &labelName = cgFunc.GetFunction().GetLabelTab()->GetName(labIdx); + /* If label name has @ as its first char, it is not from MIR */ + if (!labelName.empty() && labelName.at(0) != '@') { + __ EmitBBLabel(labelSymIdx, true, freq, &labelName); + } else { + __ EmitBBLabel(labelSymIdx, true, freq); + } + } else { + __ EmitBBLabel(labelSymIdx); + } +} + +/* Specially, emit switch table here */ +void X64Emitter::EmitJmpTable(const CGFunc &cgFunc) +{ + for (auto &it : cgFunc.GetEmitStVec()) { + MIRSymbol *st = it.second; + DEBUG_ASSERT(st->IsReadOnly(), "NYI"); + uint32 symIdx = st->GetNameStrIdx().get(); + const string &symName = st->GetName(); + __ StoreNameIntoSymMap(symIdx, symName); + + MIRAggConst *arrayConst = safe_cast(st->GetKonst()); + CHECK_NULL_FATAL(arrayConst); + uint32 funcUniqueId = cgFunc.GetUniqueID(); + vector labelSymIdxs; + for (size_t i = 0; i < arrayConst->GetConstVec().size(); i++) { + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + CHECK_NULL_FATAL(lblConst); + uint32 labelIdx = lblConst->GetValue(); + string labelName = ".L." + to_string(funcUniqueId) + "__" + to_string(labelIdx); + int64 labelSymIdx = CalculateLabelSymIdx(funcUniqueId, labelIdx); + __ StoreNameIntoSymMap(labelSymIdx, labelName); + labelSymIdxs.push_back(labelSymIdx); + } + __ EmitJmpTableElem(symIdx, labelSymIdxs); + } +} + +void X64Emitter::EmitFunctionFoot(CGFunc &cgFunc) +{ + const MIRSymbol *funcSymbol = cgFunc.GetFunction().GetFuncSymbol(); + uint32 symIdx = funcSymbol->GetNameStrIdx().get(); + SymbolAttr funcAttr = kSALocal; + if (funcSymbol->GetFunction()->GetAttr(FUNCATTR_weak)) { + funcAttr = kSAWeak; + } else if (funcSymbol->GetFunction()->GetAttr(FUNCATTR_local)) { + funcAttr = kSALocal; + } else if (!funcSymbol->GetFunction()->GetAttr(FUNCATTR_static)) { + funcAttr = kSAGlobal; + } + __ EmitFunctionFoot(symIdx, funcAttr); +} + +uint64 X64Emitter::EmitStructure(MIRConst &mirConst, CG &cg, bool belongsToDataSec) +{ + uint32 subStructFieldCounts = 0; + uint64 valueSize = EmitStructure(mirConst, cg, subStructFieldCounts, belongsToDataSec); + return valueSize; +} + +uint64 X64Emitter::EmitStructure(MIRConst &mirConst, CG &cg, uint32 &subStructFieldCounts, bool belongsToDataSec) +{ + StructEmitInfo *sEmitInfo = cg.GetMIRModule()->GetMemPool()->New(); + CHECK_NULL_FATAL(sEmitInfo); + MIRType &mirType = mirConst.GetType(); + MIRAggConst &structCt = static_cast(mirConst); + MIRStructType &structType = static_cast(mirType); + uint8 structPack = static_cast(structType.GetTypeAttrs().GetPack()); + uint64 valueSize = 0; + MIRTypeKind structKind = structType.GetKind(); + /* all elements of struct. */ + uint8 num = structKind == kTypeUnion ? 1 : static_cast(structType.GetFieldsSize()); + BECommon *beCommon = Globals::GetInstance()->GetBECommon(); + /* total size of emitted elements size. */ + uint64 sizeInByte = GetSymbolSize(structType.GetTypeIndex()); + uint32 fieldIdx = structKind == kTypeUnion ? structCt.GetFieldIdItem(0) : 1; + for (uint32 i = 0; i < num; ++i) { + MIRConst *elemConst = + structKind == kTypeStruct ? structCt.GetAggConstElement(i + 1) : structCt.GetAggConstElement(fieldIdx); + MIRType *elemType = structKind == kTypeUnion ? &(elemConst->GetType()) : structType.GetElemType(i); + MIRType *nextElemType = i != static_cast(num - 1) ? structType.GetElemType(i + 1) : nullptr; + uint64 elemSize = GetSymbolSize(elemType->GetTypeIndex()); + uint8 charBitWidth = GetPrimTypeSize(PTY_i8) * k8Bits; + MIRTypeKind elemKind = elemType->GetKind(); + if (elemKind == kTypeBitField) { + if (elemConst == nullptr) { + MIRIntConst *zeroFill = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, *elemType); + elemConst = zeroFill; + } + pair fieldOffsetPair = beCommon->GetFieldOffset(structType, fieldIdx); + uint64 fieldOffset = + static_cast(static_cast(fieldOffsetPair.first)) * static_cast(charBitWidth) + + static_cast(static_cast(fieldOffsetPair.second)); + EmitBitField(*sEmitInfo, *elemConst, nextElemType, fieldOffset); + } else { + if (elemConst != nullptr) { + if (IsPrimitiveVector(elemType->GetPrimType())) { + valueSize += EmitVector(*elemConst); + } else if (IsPrimitiveScalar(elemType->GetPrimType())) { + valueSize += EmitSingleElement(*elemConst, belongsToDataSec, true); + } else if (elemKind == kTypeArray) { + if (elemType->GetSize() != 0) { + valueSize += EmitArray(*elemConst, cg, belongsToDataSec); + } + } else if (elemKind == kTypeStruct || elemKind == kTypeClass || elemKind == kTypeUnion) { + valueSize += EmitStructure(*elemConst, cg, subStructFieldCounts, belongsToDataSec); + fieldIdx += subStructFieldCounts; + } else { + DEBUG_ASSERT(false, "should not run here"); + } + } else { + __ EmitNull(elemSize); + } + sEmitInfo->IncreaseTotalSize(elemSize); + sEmitInfo->SetNextFieldOffset(sEmitInfo->GetTotalSize() * charBitWidth); + } + + if (nextElemType != nullptr && nextElemType->GetKind() != kTypeBitField) { + DEBUG_ASSERT(i < static_cast(num - 1), "NYI"); + uint8 nextAlign = Globals::GetInstance()->GetBECommon()->GetTypeAlign(nextElemType->GetTypeIndex()); + auto fieldAttr = structType.GetFields()[i + 1].second.second; + nextAlign = fieldAttr.IsPacked() ? 1 : min(nextAlign, structPack); + DEBUG_ASSERT(nextAlign != 0, "expect non-zero"); + /* append size, append 0 when align need. */ + uint64 totalSize = sEmitInfo->GetTotalSize(); + uint64 psize = (totalSize % nextAlign == 0) ? 0 : (nextAlign - (totalSize % nextAlign)); + /* element is uninitialized, emit null constant. */ + if (psize != 0) { + __ EmitNull(psize); + sEmitInfo->IncreaseTotalSize(psize); + sEmitInfo->SetNextFieldOffset(sEmitInfo->GetTotalSize() * charBitWidth); + } + } + fieldIdx++; + } + if (structType.GetKind() == kTypeStruct) { + /* The reason of subtracting one is that fieldIdx adds one at the end of the cycle. */ + subStructFieldCounts = fieldIdx - 1; + } else if (structType.GetKind() == kTypeUnion) { + subStructFieldCounts = static_cast(beCommon->GetStructFieldCount(structType.GetTypeIndex())); + } + + uint64 opSize = sizeInByte - sEmitInfo->GetTotalSize(); + if (opSize != 0) { + __ EmitNull(opSize); + } + return valueSize; +} + +uint64 X64Emitter::EmitVector(MIRConst &mirConst, bool belongsToDataSec) +{ + MIRType &mirType = mirConst.GetType(); + MIRAggConst &vecCt = static_cast(mirConst); + size_t uNum = vecCt.GetConstVec().size(); + uint64 valueSize = 0; + for (size_t i = 0; i < uNum; ++i) { + MIRConst *elemConst = vecCt.GetConstVecItem(i); + if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { + uint64 elemSize = EmitSingleElement(*elemConst, belongsToDataSec); + valueSize += elemSize; + } else { + DEBUG_ASSERT(false, "EmitVector: should not run here"); + } + } + size_t lanes = GetVecLanes(mirType.GetPrimType()); + if (lanes > uNum) { + MIRIntConst zConst(0, vecCt.GetConstVecItem(0)->GetType()); + for (size_t i = uNum; i < lanes; i++) { + uint64 elemSize = EmitSingleElement(zConst, belongsToDataSec); + valueSize += elemSize; + } + } + return valueSize; +} + +uint64 X64Emitter::EmitArray(MIRConst &mirConst, CG &cg, bool belongsToDataSec) +{ + MIRType &mirType = mirConst.GetType(); + MIRAggConst &arrayCt = static_cast(mirConst); + MIRArrayType &arrayType = static_cast(mirType); + size_t uNum = arrayCt.GetConstVec().size(); + uint32 dim = arrayType.GetSizeArrayItem(0); + TyIdx elmTyIdx = arrayType.GetElemTyIdx(); + MIRType *subTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(elmTyIdx); + uint64 valueSize = 0; + if (uNum == 0 && dim) { + while (subTy->GetKind() == kTypeArray) { + MIRArrayType *aSubTy = static_cast(subTy); + if (aSubTy->GetSizeArrayItem(0) > 0) { + dim *= (aSubTy->GetSizeArrayItem(0)); + } + elmTyIdx = aSubTy->GetElemTyIdx(); + subTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(elmTyIdx); + } + } + for (size_t i = 0; i < uNum; ++i) { + MIRConst *elemConst = arrayCt.GetConstVecItem(i); + if (IsPrimitiveVector(subTy->GetPrimType())) { + valueSize += EmitVector(*elemConst, belongsToDataSec); + } else if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { + if (cg.GetMIRModule()->IsCModule()) { + bool strLiteral = false; + if (arrayType.GetDim() == 1) { + MIRType *ety = arrayType.GetElemType(); + if (ety->GetPrimType() == PTY_i8 || ety->GetPrimType() == PTY_u8) { + strLiteral = true; + } + } + valueSize += EmitSingleElement(*elemConst, belongsToDataSec, !strLiteral); + } else { + valueSize += EmitSingleElement(*elemConst, belongsToDataSec); + } + } else if (elemConst->GetType().GetKind() == kTypeArray) { + valueSize += EmitArray(*elemConst, cg, belongsToDataSec); + } else if (elemConst->GetType().GetKind() == kTypeStruct || elemConst->GetType().GetKind() == kTypeClass || + elemConst->GetType().GetKind() == kTypeUnion) { + valueSize += EmitStructure(*elemConst, cg); + } else if (elemConst->GetKind() == kConstAddrofFunc) { + valueSize += EmitSingleElement(*elemConst, belongsToDataSec); + } else { + DEBUG_ASSERT(false, "should not run here"); + } + } + int64 iNum = (arrayType.GetSizeArrayItem(0) > 0) ? (static_cast(arrayType.GetSizeArrayItem(0))) - uNum : 0; + if (iNum > 0) { + if (uNum > 0) { + uint64 unInSizeInByte = + static_cast(iNum) * + static_cast(GetSymbolSize(arrayCt.GetConstVecItem(0)->GetType().GetTypeIndex())); + if (unInSizeInByte != 0) { + __ EmitNull(unInSizeInByte); + } + } else { + uint64 sizeInByte = GetSymbolSize(elmTyIdx) * dim; + __ EmitNull(sizeInByte); + } + } + return valueSize; +} + +void X64Emitter::EmitAddrofElement(MIRConst &mirConst, bool belongsToDataSec) +{ + MIRAddrofConst &symAddr = static_cast(mirConst); + StIdx stIdx = symAddr.GetSymbolIndex(); + MIRSymbol *symAddrSym = + stIdx.IsGlobal() + ? GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()) + : CG::GetCurCGFunc()->GetMirModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + string addrName = symAddrSym->GetName(); + if (!stIdx.IsGlobal() && symAddrSym->GetStorageClass() == kScPstatic) { + uint32 funcUniqueId = CG::GetCurCGFunc()->GetUniqueID(); + addrName += to_string(funcUniqueId); + } + uint32 symIdx = symAddrSym->GetNameStrIdx(); + int32 symAddrOfs = 0; + int32 structFieldOfs = 0; + if (symAddr.GetOffset() != 0) { + symAddrOfs = symAddr.GetOffset(); + } + if (symAddr.GetFieldID() > 1) { + MIRStructType *structType = static_cast(symAddrSym->GetType()); + DEBUG_ASSERT(structType != nullptr, "EmitScalarConstant: non-zero fieldID for non-structure"); + structFieldOfs = Globals::GetInstance()->GetBECommon()->GetFieldOffset(*structType, symAddr.GetFieldID()).first; + } + __ StoreNameIntoSymMap(symIdx, addrName); + __ EmitAddrValue(symIdx, symAddrOfs, structFieldOfs, belongsToDataSec); +} + +uint32 X64Emitter::EmitSingleElement(MIRConst &mirConst, bool belongsToDataSec, bool isIndirect) +{ + MIRType &elmType = mirConst.GetType(); + uint64 elemSize = elmType.GetSize(); + MIRConstKind kind = mirConst.GetKind(); + switch (kind) { + case kConstAddrof: + EmitAddrofElement(mirConst, belongsToDataSec); + break; + case kConstAddrofFunc: { + MIRAddroffuncConst &funcAddr = static_cast(mirConst); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFuncTable().at(funcAddr.GetValue()); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + + uint32 symIdx = symAddrSym->GetNameStrIdx(); + const string &name = symAddrSym->GetName(); + __ StoreNameIntoSymMap(symIdx, name); + __ EmitAddrOfFuncValue(symIdx, belongsToDataSec); + break; + } + case kConstInt: { + MIRIntConst &intCt = static_cast(mirConst); + uint32 sizeInBits = elemSize << kLeftShift3Bits; + if (intCt.GetActualBitWidth() > sizeInBits) { + DEBUG_ASSERT(false, "actual value is larger than expected"); + } + int64 value = intCt.GetExtValue(); + __ EmitIntValue(value, elemSize, belongsToDataSec); + break; + } + case kConstLblConst: { + MIRLblConst &lbl = static_cast(mirConst); + uint32 labelIdx = lbl.GetValue(); + uint32 funcUniqueId = lbl.GetPUIdx(); + string labelName = ".L." + to_string(funcUniqueId) + "__" + to_string(labelIdx); + int64 symIdx = CalculateLabelSymIdx(funcUniqueId, labelIdx); + __ StoreNameIntoSymMap(symIdx, labelName); + __ EmitLabelValue(symIdx, belongsToDataSec); + break; + } + case kConstStrConst: { + MIRStrConst &strCt = static_cast(mirConst); + if (isIndirect) { + uint32 strIdx = strCt.GetValue().GetIdx(); + string strName = ".LSTR__" + to_string(strIdx); + int64 strSymIdx = CalculateStrLabelSymIdx(GlobalTables::GetGsymTable().GetSymbolTableSize(), strIdx); + stringPtr.push_back(strIdx); + __ StoreNameIntoSymMap(strSymIdx, strName); + __ EmitIndirectString(strSymIdx, belongsToDataSec); + } else { + const string &ustr = GlobalTables::GetUStrTable().GetStringFromStrIdx(strCt.GetValue()); + __ EmitDirectString(ustr, belongsToDataSec); + } + break; + } + default: + FATAL(kLncFatal, "EmitSingleElement: unsupport variable kind"); + break; + } + return elemSize; +} + +void X64Emitter::EmitBitField(StructEmitInfo &structEmitInfo, MIRConst &mirConst, const MIRType *nextType, + uint64 fieldOffset, bool belongsToDataSec) +{ + MIRType &mirType = mirConst.GetType(); + if (fieldOffset > structEmitInfo.GetNextFieldOffset()) { + uint16 curFieldOffset = structEmitInfo.GetNextFieldOffset() - structEmitInfo.GetCombineBitFieldWidth(); + structEmitInfo.SetCombineBitFieldWidth(fieldOffset - curFieldOffset); + EmitCombineBfldValue(structEmitInfo); + DEBUG_ASSERT(structEmitInfo.GetNextFieldOffset() <= fieldOffset, + "structEmitInfo's nextFieldOffset > fieldOffset"); + structEmitInfo.SetNextFieldOffset(fieldOffset); + } + uint32 fieldSize = static_cast(mirType).GetFieldSize(); + MIRIntConst &fieldValue = static_cast(mirConst); + /* Truncate the size of FieldValue to the bit field size. */ + if (fieldSize < fieldValue.GetActualBitWidth()) { + fieldValue.Trunc(fieldSize); + } + /* Clear higher Bits for signed value */ + if (structEmitInfo.GetCombineBitFieldValue() != 0) { + structEmitInfo.SetCombineBitFieldValue((~(~0ULL << structEmitInfo.GetCombineBitFieldWidth())) & + structEmitInfo.GetCombineBitFieldValue()); + } + if (CGOptions::IsBigEndian()) { + uint64 beValue = static_cast(fieldValue.GetExtValue()); + if (fieldValue.IsNegative()) { + beValue = beValue - ((beValue >> fieldSize) << fieldSize); + } + structEmitInfo.SetCombineBitFieldValue((structEmitInfo.GetCombineBitFieldValue() << fieldSize) + beValue); + } else { + structEmitInfo.SetCombineBitFieldValue( + (static_cast(fieldValue.GetExtValue()) << structEmitInfo.GetCombineBitFieldWidth()) + + structEmitInfo.GetCombineBitFieldValue()); + } + structEmitInfo.IncreaseCombineBitFieldWidth(fieldSize); + structEmitInfo.IncreaseNextFieldOffset(fieldSize); + if ((nextType == nullptr) || (nextType->GetKind() != kTypeBitField)) { + /* emit structEmitInfo->combineBitFieldValue */ + EmitCombineBfldValue(structEmitInfo); + } +} + +void X64Emitter::EmitCombineBfldValue(StructEmitInfo &structEmitInfo, bool belongsToDataSec) +{ + uint8 charBitWidth = GetPrimTypeSize(PTY_i8) * k8Bits; + const uint64 kGetLow8Bits = 0x00000000000000ffUL; + auto emitBfldValue = [&structEmitInfo, charBitWidth, belongsToDataSec, this](bool flag) { + while (structEmitInfo.GetCombineBitFieldWidth() > charBitWidth) { + uint8 shift = flag ? (structEmitInfo.GetCombineBitFieldWidth() - charBitWidth) : 0U; + uint64 tmp = (structEmitInfo.GetCombineBitFieldValue() >> shift) & kGetLow8Bits; + __ EmitBitFieldValue(tmp, belongsToDataSec); + structEmitInfo.DecreaseCombineBitFieldWidth(charBitWidth); + uint64 value = + flag ? structEmitInfo.GetCombineBitFieldValue() - (tmp << structEmitInfo.GetCombineBitFieldWidth()) + : structEmitInfo.GetCombineBitFieldValue() >> charBitWidth; + structEmitInfo.SetCombineBitFieldValue(value); + } + }; + if (CGOptions::IsBigEndian()) { + /* + * If the total number of bits in the bit field is not a multiple of 8, + * the bits must be aligned to 8 bits to prevent errors in the emit. + */ + auto width = static_cast(RoundUp(structEmitInfo.GetCombineBitFieldWidth(), charBitWidth)); + if (structEmitInfo.GetCombineBitFieldWidth() < width) { + structEmitInfo.SetCombineBitFieldValue(structEmitInfo.GetCombineBitFieldValue() + << (width - structEmitInfo.GetCombineBitFieldWidth())); + structEmitInfo.IncreaseCombineBitFieldWidth( + static_cast(width - structEmitInfo.GetCombineBitFieldWidth())); + } + emitBfldValue(true); + } else { + emitBfldValue(false); + } + if (structEmitInfo.GetCombineBitFieldWidth() != 0) { + uint64 value = structEmitInfo.GetCombineBitFieldValue() & kGetLow8Bits; + __ EmitBitFieldValue(value, belongsToDataSec); + } + CHECK_FATAL(charBitWidth != 0, "divide by zero"); + if ((structEmitInfo.GetNextFieldOffset() % charBitWidth) != 0) { + uint8 value = charBitWidth - static_cast((structEmitInfo.GetNextFieldOffset() % charBitWidth)); + structEmitInfo.IncreaseNextFieldOffset(value); + } + structEmitInfo.SetTotalSize(structEmitInfo.GetNextFieldOffset() / charBitWidth); + structEmitInfo.SetCombineBitFieldValue(0); + structEmitInfo.SetCombineBitFieldWidth(0); +} + +void X64Emitter::EmitLocalVariable(CGFunc &cgFunc) +{ + /* function local pstatic initialization */ + MIRSymbolTable *lSymTab = cgFunc.GetFunction().GetSymTab(); + if (lSymTab != nullptr) { + uint32 funcUniqueId = cgFunc.GetUniqueID(); + size_t lsize = lSymTab->GetSymbolTableSize(); + vector emittedLocalSym; + for (uint32 i = 0; i < lsize; i++) { + MIRSymbol *symbol = lSymTab->GetSymbolFromStIdx(i); + if (symbol != nullptr && symbol->GetStorageClass() == kScPstatic) { + const string &symbolName = symbol->GetName() + to_string(funcUniqueId); + /* Local static names can repeat, if repeat, pass */ + bool found = false; + for (auto name : emittedLocalSym) { + if (name == symbolName) { + found = true; + break; + } + } + if (found) { + continue; + } + emittedLocalSym.push_back(symbolName); + + uint32 symIdx = symbol->GetNameStrIdx().get(); + __ StoreNameIntoSymMap(symIdx, symbolName, true); + + MIRConst *ct = symbol->GetKonst(); + MIRType *ty = symbol->GetType(); + uint64 sizeInByte = GetSymbolSize(ty->GetTypeIndex()); + uint8 alignInByte = GetSymbolAlign(*symbol); + if (ct == nullptr) { + alignInByte = GetSymbolAlign(*symbol, true); + __ EmitVariable(symIdx, sizeInByte, alignInByte, kSALocal, kSBss); + } else { + MIRTypeKind kind = ty->GetKind(); + uint64 valueSize = 0; + bool isFloatTy = (ct->GetKind() == maple::kConstDoubleConst || ct->GetKind() == maple::kConstFloatConst); + auto secType = isFloatTy ? kSText : kSData; + __ EmitVariable(symIdx, sizeInByte, alignInByte, kSALocal, secType); + if (kind == kTypeStruct || kind == kTypeUnion || kind == kTypeClass) { + valueSize = EmitStructure(*ct, *cgFunc.GetCG()); + } else if (IsPrimitiveVector(ty->GetPrimType())) { + valueSize = EmitVector(*ct); + } else if (kind == kTypeArray) { + valueSize = EmitArray(*ct, *cgFunc.GetCG()); + } else if (isFloatTy) { + MIRType &elmType = ct->GetType(); + uint64 elemSize = elmType.GetSize(); + if (ct->GetKind() == maple::kConstDoubleConst) { + MIRDoubleConst &dCt = static_cast(*ct); + int64 value = dCt.GetIntValue(); + __ EmitFloatValue(symIdx, value, elemSize); + } else { + MIRFloatConst &fCt = static_cast(*ct); + int64 value = fCt.GetIntValue(); + __ EmitFloatValue(symIdx, value, elemSize); + } + } else { + valueSize = EmitSingleElement(*ct, true); + } + __ PostEmitVariable(symIdx, kSALocal, valueSize, isFloatTy); + } + } + } + } +} + +void X64Emitter::EmitStringPointers() +{ + for (uint32 strIdx : stringPtr) { + string ustr = GlobalTables::GetUStrTable().GetStringFromStrIdx(strIdx); + int64 strSymIdx = CalculateStrLabelSymIdx(GlobalTables::GetGsymTable().GetSymbolTableSize(), strIdx); + __ EmitDirectString(ustr, true, strSymIdx); + } +} + +void X64Emitter::EmitGlobalVariable(CG &cg) +{ + uint64 size = GlobalTables::GetGsymTable().GetSymbolTableSize(); + for (uint64 i = 0; i < size; ++i) { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); + + if (mirSymbol == nullptr || mirSymbol->IsDeleted() || mirSymbol->GetStorageClass() == kScUnused) { + continue; + } + + MIRStorageClass storageClass = mirSymbol->GetStorageClass(); + /* symbols we do not emit here. */ + if (storageClass == kScTypeInfo || storageClass == kScTypeInfoName || storageClass == kScTypeCxxAbi) { + continue; + } + + MIRType *mirType = mirSymbol->GetType(); + if (mirType == nullptr) { + continue; + } + int64 symIdx = mirSymbol->GetNameStrIdx().get(); + uint64 sizeInByte = GetSymbolSize(mirType->GetTypeIndex()); + uint8 alignInByte = GetSymbolAlign(*mirSymbol); + + /* Uninitialized global/static variables */ + if ((storageClass == kScGlobal || storageClass == kScFstatic) && !mirSymbol->IsConst()) { + if (mirSymbol->IsGctibSym()) { + continue; + } + __ StoreNameIntoSymMap(symIdx, mirSymbol->GetName()); + SectionKind secKind; + if (mirSymbol->IsThreadLocal()) { + secKind = kSTbss; + } else if (maplebe::CGOptions::IsNoCommon()) { + secKind = kSBss; + } else { + secKind = kSComm; + alignInByte = GetSymbolAlign(*mirSymbol, true); + } + __ EmitVariable(symIdx, sizeInByte, alignInByte, kSAGlobal, secKind); + continue; + } + MIRTypeKind kind = mirType->GetKind(); + /* Initialized global/static variables. */ + if (storageClass == kScGlobal || (storageClass == kScFstatic && !mirSymbol->IsReadOnly())) { + MIRConst *mirConst = mirSymbol->GetKonst(); + uint64 valueSize = 0; + __ StoreNameIntoSymMap(symIdx, mirSymbol->GetName()); + if (mirSymbol->IsThreadLocal()) { + __ EmitVariable(symIdx, sizeInByte, alignInByte, kSAGlobal, kSTdata); + } else { + __ EmitVariable(symIdx, sizeInByte, alignInByte, kSAGlobal, kSData); + } + if (IsPrimitiveVector(mirType->GetPrimType())) { + valueSize = EmitVector(*mirConst); + } else if (IsPrimitiveScalar(mirType->GetPrimType())) { + valueSize = EmitSingleElement(*mirConst, true, cg.GetMIRModule()->IsCModule()); + } else if (kind == kTypeArray) { + CHECK_FATAL(!mirSymbol->HasAddrOfValues(), "EmitGlobalVariable: need EmitConstantTable"); + valueSize = EmitArray(*mirConst, cg); + } else if (kind == kTypeStruct || kind == kTypeClass || kind == kTypeUnion) { + CHECK_FATAL(!mirSymbol->HasAddrOfValues(), "EmitGlobalVariable: need EmitConstantTable"); + EmitStructure(*mirConst, cg); + } else { + DEBUG_ASSERT(false, "EmitGlobalVariable: Unknown mirKind"); + } + __ PostEmitVariable(symIdx, kSAGlobal, valueSize); + } else if (mirSymbol->IsReadOnly()) { /* If symbol is const & static */ + MIRConst *mirConst = mirSymbol->GetKonst(); + __ StoreNameIntoSymMap(symIdx, mirSymbol->GetName()); + if (mirConst == nullptr) { + alignInByte = GetSymbolAlign(*mirSymbol, true); + __ EmitVariable(symIdx, sizeInByte, alignInByte, kSAGlobal, kSComm); + } else { + SymbolAttr symAttr = kSAGlobal; + if (mirSymbol->IsWeak()) { + symAttr = kSAWeak; + } else if (storageClass == kScPstatic || + (storageClass == kScFstatic && mirSymbol->sectionAttr == UStrIdx(0))) { + symAttr = kSAStatic; + } + __ EmitVariable(symIdx, sizeInByte, alignInByte, symAttr, kSRodata); + if (IsPrimitiveVector(mirType->GetPrimType())) { + (void)EmitVector(*mirConst, false); + } else if (IsPrimitiveScalar(mirType->GetPrimType())) { + if (storageClass == kScPstatic) { + (void)EmitSingleElement(*mirConst, false, true); + } else { + (void)EmitSingleElement(*mirConst, false); + } + } else if (kind == kTypeArray) { + (void)EmitArray(*mirConst, cg, false); + } else if (kind == kTypeStruct || kind == kTypeUnion || kind == kTypeClass) { + (void)EmitStructure(*mirConst, cg); + } else { + FATAL(kLncFatal, "Unknown type in Global pstatic"); + } + } + } + } /* end proccess all mirSymbols. */ + EmitStringPointers(); +} + +void X64Emitter::Run(CGFunc &cgFunc) +{ + X64CGFunc &x64CGFunc = static_cast(cgFunc); + uint32 funcUniqueId = cgFunc.GetUniqueID(); + /* emit local variable(s) if exists */ + EmitLocalVariable(cgFunc); + + /* emit function header */ + EmitFunctionHeader(cgFunc); + + /* emit instructions */ + FOR_ALL_BB(bb, &x64CGFunc) { + if (bb->IsUnreachable()) { + continue; + } + + /* emit bb headers */ + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + EmitBBHeaderLabel(cgFunc, bb->GetLabIdx(), bb->GetFrequency()); + } + + FOR_BB_INSNS(insn, bb) { + EmitInsn(*insn, funcUniqueId); + } + } + + /* emit switch table if exists */ + EmitJmpTable(cgFunc); + + EmitFunctionFoot(cgFunc); + + __ ClearLocalSymMap(); +} + +bool CgEmission::PhaseRun(CGFunc &f) +{ + Emitter *emitter = f.GetCG()->GetEmitter(); + CHECK_NULL_FATAL(emitter); + static_cast(emitter)->Run(f); + return false; +} + +void X64Emitter::EmitDwFormAddr(const DBGDie &die, const DBGDieAttr &attr, DwAt attrName, DwTag tagName, DebugInfo &di) +{ + MapleVector attrvec = die.GetAttrVec(); + if (attrName == static_cast(DW_AT_low_pc) && tagName == static_cast(DW_TAG_compile_unit)) { + __ EmitDwFormAddr(true); + } + if (attrName == static_cast(DW_AT_low_pc) && tagName == static_cast(DW_TAG_subprogram)) { + /* if decl, name should be found; if def, we try DW_AT_specification */ + DBGDieAttr *name = LFindAttribute(attrvec, static_cast(DW_AT_name)); + if (name == nullptr) { + DBGDieAttr *spec = LFindAttribute(attrvec, static_cast(DW_AT_specification)); + CHECK_FATAL(spec != nullptr, "spec is null in Emitter::EmitDIAttrValue"); + DBGDie *decl = di.GetDie(spec->GetId()); + name = LFindAttribute(decl->GetAttrVec(), static_cast(DW_AT_name)); + CHECK_FATAL(name != nullptr, "name is null in Emitter::EmitDIAttrValue"); + } + const std::string &str = GlobalTables::GetStrTable().GetStringFromStrIdx(name->GetId()); + MIRBuilder *mirbuilder = GetCG()->GetMIRModule()->GetMIRBuilder(); + MIRFunction *mfunc = mirbuilder->GetFunctionFromName(str); + MapleMap>::iterator it = CG::GetFuncWrapLabels().find(mfunc); + if (it != CG::GetFuncWrapLabels().end()) { + /* it is a */ + __ EmitLabel(mfunc->GetPuidx(), (*it).second.first); + } else { + PUIdx pIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + __ EmitLabel(pIdx, attr.GetId()); /* maybe deadbeef */ + } + } + if (attrName == static_cast(DW_AT_low_pc) && tagName == static_cast(DW_TAG_label)) { + DBGDie *subpgm = die.GetParent(); + DEBUG_ASSERT(subpgm->GetTag() == DW_TAG_subprogram, "Label DIE should be a child of a Subprogram DIE"); + DBGDieAttr *fnameAttr = LFindAttribute(subpgm->GetAttrVec(), static_cast(DW_AT_name)); + if (!fnameAttr) { + DBGDieAttr *specAttr = LFindAttribute(subpgm->GetAttrVec(), static_cast(DW_AT_specification)); + CHECK_FATAL(specAttr, "pointer is null"); + DBGDie *twin = di.GetDie(static_cast(specAttr->GetU())); + fnameAttr = LFindAttribute(twin->GetAttrVec(), static_cast(DW_AT_name)); + } + /* todo */ + } + if (attrName == static_cast(DW_AT_high_pc)) { + if (tagName == static_cast(DW_TAG_compile_unit)) { + __ EmitDwFormData8(); + } + } + if (attrName != static_cast(DW_AT_high_pc) && attrName != static_cast(DW_AT_low_pc)) { + __ EmitDwFormAddr(); + } +} + +void X64Emitter::EmitDwFormRef4(DBGDie &die, const DBGDieAttr &attr, DwAt attrName, DwTag tagName, DebugInfo &di) +{ + if (attrName == static_cast(DW_AT_type)) { + DBGDie *die0 = di.GetDie(static_cast(attr.GetU())); + if (die0->GetOffset()) { + __ EmitDwFormRef4(die0->GetOffset()); + } else { + /* unknown type, missing mplt */ + __ EmitDwFormRef4(di.GetDummyTypeDie()->GetOffset(), true); + } + } else if (attrName == static_cast(DW_AT_specification) || attrName == static_cast(DW_AT_sibling)) { + DBGDie *die0 = di.GetDie(static_cast(attr.GetU())); + DEBUG_ASSERT(die0->GetOffset(), ""); + __ EmitDwFormRef4(die0->GetOffset()); + } else if (attrName == static_cast(DW_AT_object_pointer)) { + GStrIdx thisIdx = GlobalTables::GetStrTable().GetStrIdxFromName(kDebugMapleThis); + DBGDie *that = LFindChildDieWithName(die, static_cast(DW_TAG_formal_parameter), thisIdx); + /* need to find the this or self based on the source language + what is the name for 'this' used in mapleir? + this has to be with respect to a function */ + if (that) { + __ EmitDwFormRef4(that->GetOffset()); + } else { + __ EmitDwFormRef4(attr.GetU()); + } + } else { + __ EmitDwFormRef4(attr.GetU(), false, true); + } +} + +void X64Emitter::EmitDwFormData8(const DBGDieAttr &attr, DwAt attrName, DwTag tagName, DebugInfo &di, + MapleVector &attrvec) +{ + if (attrName == static_cast(DW_AT_high_pc)) { + if (tagName == static_cast(DW_TAG_compile_unit)) { + __ EmitDwFormData8(); + } else if (tagName == static_cast(DW_TAG_subprogram)) { + DBGDieAttr *name = LFindAttribute(attrvec, static_cast(DW_AT_name)); + if (name == nullptr) { + DBGDieAttr *spec = LFindAttribute(attrvec, static_cast(DW_AT_specification)); + CHECK_FATAL(spec != nullptr, "spec is null in Emitter::EmitDIAttrValue"); + DBGDie *decl = di.GetDie(spec->GetId()); + name = LFindAttribute(decl->GetAttrVec(), static_cast(DW_AT_name)); + CHECK_FATAL(name != nullptr, "name is null in Emitter::EmitDIAttrValue"); + } + const std::string &str = GlobalTables::GetStrTable().GetStringFromStrIdx(name->GetId()); + + MIRBuilder *mirbuilder = GetCG()->GetMIRModule()->GetMIRBuilder(); + MIRFunction *mfunc = mirbuilder->GetFunctionFromName(str); + MapleMap>::iterator it = CG::GetFuncWrapLabels().find(mfunc); + uint32 endLabelFuncPuIdx; + uint32 startLabelFuncPuIdx; + uint32 endLabelIdx; + uint32 startLabelIdx; + if (it != CG::GetFuncWrapLabels().end()) { + /* end label */ + endLabelFuncPuIdx = mfunc->GetPuidx(); + endLabelIdx = (*it).second.second; + } else { + /* maybe deadbeef */ + endLabelFuncPuIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + endLabelIdx = (*it).second.second; + } + if (it != CG::GetFuncWrapLabels().end()) { + /* start label */ + startLabelFuncPuIdx = mfunc->GetPuidx(); + startLabelIdx = (*it).second.first; + } else { + DBGDieAttr *lowpc = LFindAttribute(attrvec, static_cast(DW_AT_low_pc)); + CHECK_FATAL(lowpc != nullptr, "lowpc is null in Emitter::EmitDIAttrValue"); + /* maybe deadbeef */ + startLabelFuncPuIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + startLabelIdx = lowpc->GetId(); + } + __ EmitDwFormData8(endLabelFuncPuIdx, startLabelFuncPuIdx, endLabelIdx, startLabelIdx); + } + } else { + __ EmitDwFormData(attr.GetI(), k8Bytes); + } +} + +void X64Emitter::EmitDIAttrValue(DBGDie &die, DBGDieAttr &attr, DwAt attrName, DwTag tagName, DebugInfo &di) +{ + MapleVector &attrvec = die.GetAttrVec(); + switch (attr.GetDwForm()) { + case DW_FORM_string: + __ EmitDwFormString(GlobalTables::GetStrTable().GetStringFromStrIdx(attr.GetId())); + break; + case DW_FORM_strp: + __ EmitDwFormStrp(attr.GetId(), GlobalTables::GetStrTable().StringTableSize()); + break; + case DW_FORM_data1: + __ EmitDwFormData(attr.GetI(), k1Byte); + break; + case DW_FORM_data2: + __ EmitDwFormData(attr.GetI(), k2Bytes); + break; + case DW_FORM_data4: + __ EmitDwFormData(attr.GetI(), k4Bytes); + break; + case DW_FORM_data8: + EmitDwFormData8(attr, attrName, tagName, di, attrvec); + break; + case DW_FORM_sec_offset: + if (attrName == static_cast(DW_AT_stmt_list)) { + __ EmitDwFormSecOffset(); + } + break; + case DW_FORM_addr: + EmitDwFormAddr(die, attr, attrName, tagName, di); + break; + case DW_FORM_ref4: + EmitDwFormRef4(die, attr, attrName, tagName, di); + break; + case DW_FORM_exprloc: { + DBGExprLoc *elp = attr.GetPtr(); + switch (elp->GetOp()) { + case DW_OP_call_frame_cfa: + __ EmitDwFormExprlocCfa(elp->GetOp()); + break; + case DW_OP_addr: + __ EmitDwFormExprlocAddr(elp->GetOp(), + GlobalTables::GetStrTable() + .GetStringFromStrIdx(static_cast(elp->GetGvarStridx())) + .c_str()); + break; + case DW_OP_fbreg: + __ EmitDwFormExprlocFbreg(elp->GetOp(), elp->GetFboffset(), + namemangler::GetSleb128Size(elp->GetFboffset())); + break; + case DW_OP_breg0: + case DW_OP_breg1: + case DW_OP_breg2: + case DW_OP_breg3: + case DW_OP_breg4: + case DW_OP_breg5: + case DW_OP_breg6: + case DW_OP_breg7: + __ EmitDwFormExprlocBregn(elp->GetOp(), GetDwOpName(elp->GetOp())); + break; + default: + __ EmitDwFormExprloc(uintptr(elp)); + break; + } + break; + } + default: + CHECK_FATAL(maple::GetDwFormName(attr.GetDwForm()) != nullptr, + "GetDwFormName return null in Emitter::EmitDIAttrValue"); + LogInfo::MapleLogger() << "unhandled : " << maple::GetDwFormName(attr.GetDwForm()) << std::endl; + DEBUG_ASSERT(0, "NYI"); + } +} + +void X64Emitter::EmitDIDebugInfoSection(DebugInfo &mirdi) +{ + __ EmitDIDebugInfoSectionHeader(mirdi.GetDebugInfoLength()); + /* + * 7.5.1.2 type unit header + * currently empty... + * + * 7.5.2 Debugging Information Entry (DIE) + */ + X64Emitter *emitter = this; + MapleVector &abbrevVec = mirdi.GetAbbrevVec(); + ApplyInPrefixOrder(mirdi.GetCompUnit(), [&abbrevVec, &emitter, &mirdi, this](DBGDie *die) { + if (!die) { + /* emit the null entry and return */ + emitter->GetAssembler().EmitDIDebugSectionEnd(kSDebugInfo); + return; + } + bool verbose = emitter->GetCG()->GenerateVerboseAsm(); + if (verbose) { + CHECK_FATAL(maple::GetDwTagName(die->GetTag()) != nullptr, + "GetDwTagName(die->GetTag()) return null in Emitter::EmitDIDebugInfoSection"); + } + uint32 abbrevId = die->GetAbbrevId(); + emitter->GetAssembler().EmitDIDebugInfoSectionAbbrevId(verbose, abbrevId, maple::GetDwTagName(die->GetTag()), + die->GetOffset(), die->GetSize()); + DBGAbbrevEntry *diae = LFindAbbrevEntry(abbrevVec, abbrevId); + CHECK_FATAL(diae != nullptr, "diae is null in Emitter::EmitDIDebugInfoSection"); + std::string sfile, spath; + if (diae->GetTag() == static_cast(DW_TAG_compile_unit) && sfile.empty()) { + /* get full source path from fileMap[2] */ + if (emitter->GetFileMap().size() > k2ByteSize) { /* have src file map */ + std::string srcPath = emitter->GetFileMap()[k2ByteSize]; + size_t t = srcPath.rfind("/"); + DEBUG_ASSERT(t != std::string::npos, ""); + sfile = srcPath.substr(t + 1); + spath = srcPath.substr(0, t); + } + } + + UpdateAttrAndEmit(sfile, mirdi, *diae, *die, spath); + }); +} + +void X64Emitter::UpdateAttrAndEmit(const string &sfile, DebugInfo &mirdi, DBGAbbrevEntry &diae, DBGDie &die, + const string &spath) +{ + X64Emitter *emitter = this; + MapleVector &apl = diae.GetAttrPairs(); /* attribute pair list */ + bool verbose = emitter->GetCG()->GenerateVerboseAsm(); + for (size_t i = 0; i < diae.GetAttrPairs().size(); i += k2ByteSize) { + DBGDieAttr *attr = LFindAttribute(die.GetAttrVec(), DwAt(apl[i])); + CHECK_FATAL(attr != nullptr, "attr is null"); + if (!LShouldEmit(unsigned(apl[i + 1]))) { + continue; + } + + /* update DW_AT_name and DW_AT_comp_dir attrs under DW_TAG_compile_unit + to be C/C++ */ + if (!sfile.empty()) { + if (attr->GetDwAt() == static_cast(DW_AT_name)) { + attr->SetId(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(sfile).GetIdx()); + emitter->GetCG()->GetMIRModule()->GetDbgInfo()->AddStrps(attr->GetId()); + } else if (attr->GetDwAt() == static_cast(DW_AT_comp_dir)) { + attr->SetId(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(spath).GetIdx()); + emitter->GetCG()->GetMIRModule()->GetDbgInfo()->AddStrps(attr->GetId()); + } + } + emitter->GetAssembler().EmitDIFormSpecification(unsigned(apl[i + 1])); + emitter->EmitDIAttrValue(die, *attr, unsigned(apl[i]), diae.GetTag(), mirdi); + if (verbose) { + std::string dwAtName = maple::GetDwAtName(unsigned(apl[i])); + std::string dwForName = maple::GetDwFormName(unsigned(apl[i + 1])); + emitter->GetAssembler().EmitDIDwName(dwAtName, dwForName); + if (apl[i + 1] == static_cast(DW_FORM_strp) || apl[i + 1] == static_cast(DW_FORM_string)) { + emitter->GetAssembler().EmitDIDWFormStr( + GlobalTables::GetStrTable().GetStringFromStrIdx(attr->GetId()).c_str()); + } else if (apl[i] == static_cast(DW_AT_data_member_location)) { + emitter->GetAssembler().EmitDIDWDataMemberLocaltion(apl[i + 1], uintptr(attr)); + } + } + emitter->GetAssembler().EmitLine(); + } +} + +void X64Emitter::EmitDIDebugAbbrevSection(DebugInfo &mirdi) +{ + __ EmitDIDebugAbbrevSectionHeader(); + + /* construct a list of DI abbrev entries + 1. DW_TAG_compile_unit 0x11 + 2. DW_TAG_subprogram 0x2e */ + bool verbose = GetCG()->GenerateVerboseAsm(); + for (DBGAbbrevEntry *diae : mirdi.GetAbbrevVec()) { + if (!diae) { + continue; + } + CHECK_FATAL(maple::GetDwTagName(diae->GetTag()) != nullptr, + "GetDwTagName return null in X64Emitter::EmitDIDebugAbbrevSection"); + __ EmitDIDebugAbbrevDiae(verbose, diae->GetAbbrevId(), diae->GetTag(), maple::GetDwTagName(diae->GetTag()), + diae->GetWithChildren()); + + MapleVector &apl = diae->GetAttrPairs(); /* attribute pair list */ + + for (size_t i = 0; i < diae->GetAttrPairs().size(); i += k2ByteSize) { + CHECK_FATAL(maple::GetDwAtName(unsigned(apl[i])) != nullptr, + "GetDwAtName return null in X64Emitter::EmitDIDebugAbbrevSection"); + CHECK_FATAL(maple::GetDwFormName(unsigned(apl[i + 1])) != nullptr, + "GetDwFormName return null in X64Emitter::EmitDIDebugAbbrevSection"); + __ EmitDIDebugAbbrevDiaePairItem(verbose, apl[i], apl[1 + 1], maple::GetDwAtName(unsigned(apl[i])), + maple::GetDwFormName(unsigned(apl[i + 1]))); + } + __ EmitDIDebugSectionEnd(kSDebugAbbrev); + __ EmitDIDebugSectionEnd(kSDebugAbbrev); + } + __ EmitDIDebugSectionEnd(kSDebugAbbrev); +} + +void X64Emitter::EmitDIDebugStrSection() +{ + std::vector debugStrs; + std::vector strps; + for (auto it : GetCG()->GetMIRModule()->GetDbgInfo()->GetStrps()) { + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(it); + (void)debugStrs.emplace_back(name); + (void)strps.emplace_back(it); + } + __ EmitDIDebugStrSection(strps, debugStrs, GlobalTables::GetGsymTable().GetSymbolTableSize(), + GlobalTables::GetStrTable().StringTableSize()); +} + +void X64Emitter::EmitDebugInfo(CG &cg) +{ + if (!cg.GetCGOptions().WithDwarf()) { + return; + } + SetupDBGInfo(cg.GetMIRModule()->GetDbgInfo()); + __ EmitDIHeaderFileInfo(); + EmitDIDebugInfoSection(*(cg.GetMIRModule()->GetDbgInfo())); + EmitDIDebugAbbrevSection(*(cg.GetMIRModule()->GetDbgInfo())); + __ EmitDIDebugARangesSection(); + __ EmitDIDebugRangesSection(); + __ EmitDIDebugLineSection(); + EmitDIDebugStrSection(); +} + +MAPLE_TRANSFORM_PHASE_REGISTER(CgEmission, cgemit) +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_isa.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_isa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..20d9f0cccb3622af95858cc807822396fe326e9e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_isa.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_isa.h" +#include "insn.h" + +namespace maplebe { +namespace x64 { +MOperator FlipConditionOp(MOperator flippedOp) +{ + switch (flippedOp) { + case X64MOP_t::MOP_je_l: + return X64MOP_t::MOP_jne_l; + case X64MOP_t::MOP_jne_l: + return X64MOP_t::MOP_je_l; + case X64MOP_t::MOP_ja_l: + return X64MOP_t::MOP_jbe_l; + case X64MOP_t::MOP_jbe_l: + return X64MOP_t::MOP_ja_l; + case X64MOP_t::MOP_jae_l: + return X64MOP_t::MOP_jb_l; + case X64MOP_t::MOP_jb_l: + return X64MOP_t::MOP_jae_l; + case X64MOP_t::MOP_jg_l: + return X64MOP_t::MOP_jle_l; + case X64MOP_t::MOP_jle_l: + return X64MOP_t::MOP_jg_l; + case X64MOP_t::MOP_jge_l: + return X64MOP_t::MOP_jl_l; + case X64MOP_t::MOP_jl_l: + return X64MOP_t::MOP_jge_l; + default: + break; + } + return X64MOP_t::MOP_begin; +} + +uint32 GetJumpTargetIdx(const Insn &insn) +{ + CHECK_FATAL(insn.IsCondBranch() || insn.IsUnCondBranch(), "Not a jump insn"); + return kInsnFirstOpnd; +} +} /* namespace x64 */ +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_live.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_live.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a4ab3ce01619bb000205eda011f294a85d92c31b --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_live.cpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_live.h" +#include "x64_cg.h" + +namespace maplebe { +static const std::set intParamRegSet = {RDI, RSI, RDX, RCX, R8, R9}; + +bool X64LiveAnalysis::CleanupBBIgnoreReg(regno_t reg) +{ + if (intParamRegSet.find(reg) != intParamRegSet.end()) { + return true; + } + return false; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_local_opt.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_local_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0ce4aa166cd3e50c32b4ac49d2335d2c5e97f858 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_local_opt.cpp @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_local_opt.h" +#include "x64_reaching.h" +#include "operand.h" +#include "x64_cg.h" + +namespace maplebe { +void X64LocalOpt::DoLocalCopyProp() +{ + LocalOptimizeManager optManager(*cgFunc, *GetRDInfo()); + optManager.Optimize(); + optManager.Optimize(); +} + +bool CopyRegProp::CheckCondition(Insn &insn) +{ + MOperator mOp = insn.GetMachineOpcode(); + if (mOp != MOP_movb_r_r && mOp != MOP_movw_r_r && mOp != MOP_movl_r_r && mOp != MOP_movq_r_r) { + return false; + } + DEBUG_ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "expects registers"); + DEBUG_ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "expects registers"); + auto ®Use = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®Def = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (regUse.GetRegisterNumber() == regDef.GetRegisterNumber()) { + return false; + } + auto &liveOutRegSet = insn.GetBB()->GetLiveOutRegNO(); + if (liveOutRegSet.find(regDef.GetRegisterNumber()) != liveOutRegSet.end()) { + return false; + } + return true; +} + +void CopyRegProp::Optimize(BB &bb, Insn &insn) +{ + InsnSet useInsnSet; + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + auto ®Def = static_cast(insn.GetOperand(kInsnSecondOpnd)); + reachingDef->FindRegUseBetweenInsn(regDef.GetRegisterNumber(), nextInsn, bb.GetLastInsn(), useInsnSet); + bool redefined = false; + auto &replaceOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + for (Insn *tInsn : useInsnSet) { + std::vector defInsnVec = + reachingDef->FindRegDefBetweenInsn(replaceOpnd.GetRegisterNumber(), &insn, tInsn, false, false); + if (defInsnVec.size() > 0) { + redefined = true; + } + if (redefined) { + break; + } + propagateOperand(*tInsn, regDef, replaceOpnd); + } + return; +} + +bool CopyRegProp::propagateOperand(Insn &insn, RegOperand &oldOpnd, RegOperand &replaceOpnd) +{ + bool propagateSuccess = false; + uint32 opndNum = insn.GetOperandSize(); + const InsnDesc *md = insn.GetDesc(); + if (insn.IsShift() && oldOpnd.GetRegisterNumber() == x64::RCX) { + return false; + } + if (insn.GetMachineOpcode() == MOP_pseudo_ret_int) { + return false; + } + for (int i = 0; i < opndNum; i++) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + /* list operands are used by call, + * which can not be propagated + */ + continue; + } + + auto *regProp = md->opndMD[i]; + if (regProp->IsUse() && !regProp->IsDef() && opnd.IsRegister()) { + RegOperand ®Opnd = static_cast(opnd); + if (RegOperand::IsSameReg(regOpnd, oldOpnd)) { + insn.SetOperand(i, replaceOpnd); + propagateSuccess = true; + } + } + } + return propagateSuccess; +} + +void X64RedundantDefRemove::Optimize(BB &bb, Insn &insn) +{ + const InsnDesc *md = insn.GetDesc(); + RegOperand *regDef = nullptr; + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + auto *opndDesc = md->opndMD[i]; + if (opndDesc->IsRegDef()) { + regDef = static_cast(&opnd); + } + } + InsnSet useInsnSet; + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + reachingDef->FindRegUseBetweenInsn(regDef->GetRegisterNumber(), nextInsn, bb.GetLastInsn(), useInsnSet); + if (useInsnSet.size() == 0) { + bb.RemoveInsn(insn); + return; + } + return; +} +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_memlayout.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_memlayout.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f4223495c87315192aa9e198f18ec41ffdff2942 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_memlayout.cpp @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_memlayout.h" +#include "x64_cgfunc.h" +#include "becommon.h" +#include "mir_nodes.h" +#include "x64_call_conv.h" +#include "cg.h" + +namespace maplebe { +using namespace maple; + +uint32 X64MemLayout::ComputeStackSpaceRequirementForCall(StmtNode &stmt, int32 &aggCopySize, bool isIcall) +{ + /* instantiate a parm locator */ + X64CallConvImpl parmLocator(cgFunc->GetBecommon(), X64CallConvImpl::GetCallConvKind(stmt)); + uint32 sizeOfArgsToStkPass = 0; + size_t i = 0; + /* An indirect call's first operand is the invocation target */ + if (isIcall) { + ++i; + } + + aggCopySize = 0; + for (uint32 anum = 0; i < stmt.NumOpnds(); ++i, ++anum) { + BaseNode *opnd = stmt.Opnd(i); + MIRType *ty = nullptr; + if (opnd->GetPrimType() != PTY_agg) { + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(opnd->GetPrimType())]; + } else { + Opcode opndOpcode = opnd->GetOpCode(); + DEBUG_ASSERT(opndOpcode == OP_dread || opndOpcode == OP_iread, "opndOpcode should be OP_dread or OP_iread"); + if (opndOpcode == OP_dread) { + DreadNode *dread = static_cast(opnd); + MIRSymbol *sym = be.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread->GetStIdx()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + if (dread->GetFieldID() != 0) { + DEBUG_ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass || + ty->GetKind() == kTypeUnion, + "expect struct or class"); + if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) { + ty = static_cast(ty)->GetFieldType(dread->GetFieldID()); + } else { + ty = static_cast(ty)->GetFieldType(dread->GetFieldID()); + } + } + } else { + /* OP_iread */ + IreadNode *iread = static_cast(opnd); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx()); + DEBUG_ASSERT(ty->GetKind() == kTypePointer, "expect pointer"); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(ty)->GetPointedTyIdx()); + if (iread->GetFieldID() != 0) { + DEBUG_ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass || + ty->GetKind() == kTypeUnion, + "expect struct or class"); + if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) { + ty = static_cast(ty)->GetFieldType(iread->GetFieldID()); + } else { + ty = static_cast(ty)->GetFieldType(iread->GetFieldID()); + } + } + } + } + CCLocInfo ploc; + aggCopySize += parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { + continue; /* passed in register, so no effect on actual area */ + } + sizeOfArgsToStkPass = RoundUp(ploc.memOffset + ploc.memSize, GetPointerSize()); + } + + return sizeOfArgsToStkPass; +} + +void X64MemLayout::SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const +{ + align = be.GetTypeAlign(typeIdx); + size = static_cast(be.GetTypeSize(typeIdx)); +} + +void X64MemLayout::LayoutVarargParams() +{ + uint32 nIntRegs = 0; + uint32 nFpRegs = 0; + X64CallConvImpl parmlocator(be); + CCLocInfo ploc; + MIRFunction *func = mirFunction; + if (be.GetMIRModule().IsCModule() && func->GetAttr(FUNCATTR_varargs)) { + for (uint32 i = 0; i < func->GetFormalCount(); i++) { + if (i == 0) { + if (be.HasFuncReturnType(*func)) { + TyIdx tidx = be.GetFuncReturnType(*func); + if (be.GetTypeSize(tidx.GetIdx()) <= k16ByteSize) { + continue; + } + } + } + MIRType *ty = func->GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, func); + if (ploc.reg0 != kRinvalid) { + /* The range here is R0 to R15. However, not all registers in the range are parameter registers. + * If necessary later, you can add parameter register checks. */ + if (ploc.reg0 >= R0 && ploc.reg0 <= R15) { + nIntRegs++; + } else if (ploc.reg0 >= V0 && ploc.reg0 <= V7) { + nFpRegs++; + } + } + if (ploc.reg1 != kRinvalid) { + if (ploc.reg1 >= R0 && ploc.reg1 <= R15) { + nIntRegs++; + } else if (ploc.reg1 >= V0 && ploc.reg1 <= V7) { + nFpRegs++; + } + } + if (ploc.reg2 != kRinvalid) { + if (ploc.reg2 >= R0 && ploc.reg2 <= R15) { + nIntRegs++; + } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) { + nFpRegs++; + } + } + if (ploc.reg3 != kRinvalid) { + if (ploc.reg3 >= R0 && ploc.reg3 <= R15) { + nIntRegs++; + } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) { + nFpRegs++; + } + } + } + + SetSizeOfGRSaveArea((k6BitSize - nIntRegs) * GetPointerSize()); + SetSizeOfVRSaveArea((k6BitSize - nFpRegs) * GetPointerSize() * k2ByteSize); + } +} + +void X64MemLayout::LayoutFormalParams() +{ + X64CallConvImpl parmLocator(be); + CCLocInfo ploc; + for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) { + MIRSymbol *sym = mirFunction->GetFormal(i); + uint32 stIndex = sym->GetStIndex(); + X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + if (i == 0) { + // The function name here is not appropriate, it should be to determine + // whether the function returns a structure less than 16 bytes. At this + // time, the first parameter is a structure occupant, which has no + // practical significance. + if (be.HasFuncReturnType(*mirFunction)) { + symLoc->SetMemSegment(GetSegArgsRegPassed()); + symLoc->SetOffset(GetSegArgsRegPassed().GetSize()); + continue; + } + } + + MIRType *ty = mirFunction->GetNthParamType(i); + uint32 ptyIdx = ty->GetTypeIndex(); + parmLocator.LocateNextParm(*ty, ploc, i == 0, mirFunction); + uint32 size = 0; + uint32 align = 0; + if (ploc.reg0 != kRinvalid) { + if (!sym->IsPreg()) { + SetSizeAlignForTypeIdx(ptyIdx, size, align); + symLoc->SetMemSegment(GetSegArgsRegPassed()); + if (ty->GetPrimType() == PTY_agg && be.GetTypeSize(ptyIdx) > k4ByteSize) { + /* struct param aligned on 8 byte boundary unless it is small enough */ + align = GetPointerSize(); + } + segArgsRegPassed.SetSize(static_cast(RoundUp(segArgsRegPassed.GetSize(), align))); + symLoc->SetOffset(segArgsRegPassed.GetSize()); + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + size); + } + } else { + SetSizeAlignForTypeIdx(ptyIdx, size, align); + symLoc->SetMemSegment(GetSegArgsStkPassed()); + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), align))); + symLoc->SetOffset(segArgsStkPassed.GetSize()); + segArgsStkPassed.SetSize(segArgsStkPassed.GetSize() + size); + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize()))); + } + } +} + +void X64MemLayout::LayoutLocalVariables() +{ + uint32 symTabSize = mirFunction->GetSymTab()->GetSymbolTableSize(); + for (uint32 i = 0; i < symTabSize; ++i) { + MIRSymbol *sym = mirFunction->GetSymTab()->GetSymbolFromStIdx(i); + if (sym == nullptr || sym->GetStorageClass() != kScAuto || sym->IsDeleted()) { + continue; + } + uint32 stIndex = sym->GetStIndex(); + TyIdx tyIdx = sym->GetTyIdx(); + X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + CHECK_FATAL(!symLoc->IsRegister(), "expect not register"); + + symLoc->SetMemSegment(segLocals); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + uint32 align = be.GetTypeAlign(tyIdx); + if (ty->GetPrimType() == PTY_agg && align < k8BitSize) { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), k8BitSize))); + } else { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), align))); + } + symLoc->SetOffset(segLocals.GetSize()); + segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(tyIdx)); + } +} + +void X64MemLayout::AssignSpillLocationsToPseudoRegisters() +{ + MIRPregTable *pregTab = cgFunc->GetFunction().GetPregTab(); + + /* BUG: n_regs include index 0 which is not a valid preg index. */ + size_t nRegs = pregTab->Size(); + spillLocTable.resize(nRegs); + for (size_t i = 1; i < nRegs; ++i) { + PrimType pType = pregTab->PregFromPregIdx(i)->GetPrimType(); + X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + symLoc->SetMemSegment(segLocals); + segLocals.SetSize(RoundUp(segLocals.GetSize(), GetPrimTypeSize(pType))); + symLoc->SetOffset(segLocals.GetSize()); + MIRType *mirTy = GlobalTables::GetTypeTable().GetTypeTable()[pType]; + segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(mirTy->GetTypeIndex())); + spillLocTable[i] = symLoc; + } +} + +void X64MemLayout::LayoutReturnRef(int32 &structCopySize, int32 &maxParmStackSize) +{ + segArgsToStkPass.SetSize(FindLargestActualArea(structCopySize)); + maxParmStackSize = static_cast(segArgsToStkPass.GetSize()); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + AssignSpillLocationsToPseudoRegisters(); + } + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), GetPointerSize()))); +} + +void X64MemLayout::LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) +{ + LayoutVarargParams(); + LayoutFormalParams(); + + // Need to be aligned ? + segArgsRegPassed.SetSize(RoundUp(segArgsRegPassed.GetSize(), GetPointerSize())); + segArgsStkPassed.SetSize(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize() + GetPointerSize())); + + /* allocate the local variables in the stack */ + LayoutLocalVariables(); + LayoutReturnRef(structCopySize, maxParmStackSize); + + // Need to adapt to the cc interface. + structCopySize = 0; + // Scenes with more than 6 parameters are not yet enabled. + maxParmStackSize = 0; + + cgFunc->SetUseFP(cgFunc->UseFP() || static_cast(StackFrameSize()) > kMaxPimm32); +} + +uint64 X64MemLayout::StackFrameSize() const +{ + uint64 total = locals().GetSize() + segArgsRegPassed.GetSize() + segArgsToStkPass.GetSize() + + segGrSaveArea.GetSize() + segVrSaveArea.GetSize() + segSpillReg.GetSize() + + cgFunc->GetFunction().GetFrameReseverdSlot(); // frame reserved slot + return RoundUp(total, stackPtrAlignment); +} + +int32 X64MemLayout::GetGRSaveAreaBaseLoc() +{ + int32 total = static_cast(RoundUp(GetSizeOfGRSaveArea(), stackPtrAlignment)); + return total; +} + +int32 X64MemLayout::GetVRSaveAreaBaseLoc() +{ + int32 total = static_cast(RoundUp(GetSizeOfGRSaveArea(), stackPtrAlignment) + + RoundUp(GetSizeOfVRSaveArea(), stackPtrAlignment)); + return total; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_optimize_common.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_optimize_common.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1049916d3b90857a9f68cecb665fbd1edaefd7f9 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_optimize_common.cpp @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_optimize_common.h" +#include "x64_cgfunc.h" +#include "cgbb.h" + +namespace maplebe { +void X64InsnVisitor::ModifyJumpTarget(Operand &targetOperand, BB &bb) +{ + Insn *jmpInsn = bb.GetLastInsn(); + if (bb.GetKind() == BB::kBBIgoto) { + CHECK_FATAL(targetOperand.IsLabel(), "NIY"); + CHECK_FATAL(false, "NIY"); + } + jmpInsn->SetOperand(x64::GetJumpTargetIdx(*jmpInsn), targetOperand); +} + +void X64InsnVisitor::ModifyJumpTarget(LabelIdx targetLabel, BB &bb) +{ + std::string lableName = ".L." + std::to_string(GetCGFunc()->GetUniqueID()) + "__" + std::to_string(targetLabel); + ModifyJumpTarget(GetCGFunc()->GetOpndBuilder()->CreateLabel(lableName.c_str(), targetLabel), bb); +} + +void X64InsnVisitor::ModifyJumpTarget(BB &newTarget, BB &bb) +{ + ModifyJumpTarget(newTarget.GetLastInsn()->GetOperand(x64::GetJumpTargetIdx(*newTarget.GetLastInsn())), bb); +} + +Insn *X64InsnVisitor::CloneInsn(Insn &originalInsn) +{ + MemPool *memPool = const_cast(CG::GetCurCGFunc()->GetMemoryPool()); + if (originalInsn.IsTargetInsn()) { + if (!originalInsn.IsVectorOp()) { + return memPool->Clone(originalInsn); + } else { + auto *insn = memPool->Clone(*static_cast(&originalInsn)); + insn->SetRegSpecList(static_cast(originalInsn).GetRegSpecList()); + return insn; + } + } else if (originalInsn.IsCfiInsn()) { + return memPool->Clone(*static_cast(&originalInsn)); + } else if (originalInsn.IsDbgInsn()) { + return memPool->Clone(*static_cast(&originalInsn)); + } + if (originalInsn.IsComment()) { + return memPool->Clone(originalInsn); + } + CHECK_FATAL(false, "Cannot clone"); + return nullptr; +} + +/* + * Precondition: The given insn is a jump instruction. + * Get the jump target label operand index from the given instruction. + * Note: MOP_jmp_m, MOP_jmp_r is a jump instruction, but the target is unknown at compile time. + */ +LabelIdx X64InsnVisitor::GetJumpLabel(const Insn &insn) const +{ + uint32 operandIdx = x64::GetJumpTargetIdx(insn); + if (insn.GetOperand(operandIdx).IsLabelOpnd()) { + return static_cast(insn.GetOperand(operandIdx)).GetLabelIndex(); + } + DEBUG_ASSERT(false, "Operand is not label"); + return 0; +} + +bool X64InsnVisitor::IsCompareInsn(const Insn &insn) const +{ + switch (insn.GetMachineOpcode()) { + case MOP_cmpb_r_r: + case MOP_cmpb_m_r: + case MOP_cmpb_i_r: + case MOP_cmpb_r_m: + case MOP_cmpb_i_m: + case MOP_cmpw_r_r: + case MOP_cmpw_m_r: + case MOP_cmpw_i_r: + case MOP_cmpw_r_m: + case MOP_cmpw_i_m: + case MOP_cmpl_r_r: + case MOP_cmpl_m_r: + case MOP_cmpl_i_r: + case MOP_cmpl_r_m: + case MOP_cmpl_i_m: + case MOP_cmpq_r_r: + case MOP_cmpq_m_r: + case MOP_cmpq_i_r: + case MOP_cmpq_r_m: + case MOP_cmpq_i_m: + case MOP_testq_r_r: + return true; + default: + return false; + } +} + +bool X64InsnVisitor::IsCompareAndBranchInsn(const Insn &insn) const +{ + return false; +} + +bool X64InsnVisitor::IsAddOrSubInsn(const Insn &insn) const +{ + switch (insn.GetMachineOpcode()) { + case MOP_addb_r_r: + case MOP_addw_r_r: + case MOP_addl_r_r: + case MOP_addq_r_r: + case MOP_addb_m_r: + case MOP_addw_m_r: + case MOP_addl_m_r: + case MOP_addq_m_r: + case MOP_addb_i_r: + case MOP_addw_i_r: + case MOP_addl_i_r: + case MOP_addq_i_r: + case MOP_addb_r_m: + case MOP_addw_r_m: + case MOP_addl_r_m: + case MOP_addq_r_m: + case MOP_addb_i_m: + case MOP_addw_i_m: + case MOP_addl_i_m: + case MOP_addq_i_m: + case MOP_subb_r_r: + case MOP_subw_r_r: + case MOP_subl_r_r: + case MOP_subq_r_r: + case MOP_subb_m_r: + case MOP_subw_m_r: + case MOP_subl_m_r: + case MOP_subq_m_r: + case MOP_subb_i_r: + case MOP_subw_i_r: + case MOP_subl_i_r: + case MOP_subq_i_r: + case MOP_subb_r_m: + case MOP_subw_r_m: + case MOP_subl_r_m: + case MOP_subq_r_m: + case MOP_subb_i_m: + case MOP_subw_i_m: + case MOP_subl_i_m: + case MOP_subq_i_m: + return true; + default: + return false; + } +} + +RegOperand *X64InsnVisitor::CreateVregFromReg(const RegOperand &pReg) +{ + return &GetCGFunc()->GetOpndBuilder()->CreateVReg(pReg.GetRegisterNumber(), pReg.GetSize(), pReg.GetRegisterType()); +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_peep.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_peep.cpp new file mode 100644 index 0000000000000000000000000000000000000000..aa91e62bc3829e0b9895e47a8fc679661acb42d6 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_peep.cpp @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_peep.h" +#include "cg.h" +#include "mpl_logging.h" +#include "common_utils.h" +#include "cg_option.h" +#include "x64_cg.h" + +namespace maplebe { +void X64CGPeepHole::Run() +{ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (ssaInfo == nullptr) { + DoNormalOptimize(*bb, *insn); + } + } + } +} + +bool X64CGPeepHole::DoSSAOptimize(BB &bb, Insn &insn) +{ + CHECK_FATAL(false, "x64 does not support ssa optimize"); + return false; +} + +bool RemoveMovingtoSameRegPattern::CheckCondition(Insn &insn) +{ + DEBUG_ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "expects registers"); + DEBUG_ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "expects registers"); + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + /* remove mov x0,x0 when it cast i32 to i64 */ + if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { + return true; + } + return false; +} + +void RemoveMovingtoSameRegPattern::Run(BB &bb, Insn &insn) +{ + /* remove mov x0,x0 when it cast i32 to i64 */ + if (CheckCondition(insn)) { + bb.RemoveInsn(insn); + } +} + +void X64CGPeepHole::DoNormalOptimize(BB &bb, Insn &insn) +{ + MOperator thisMop = insn.GetMachineOpcode(); + manager = peepMemPool->New(*cgFunc, bb, insn); + switch (thisMop) { + case MOP_movb_r_r: + case MOP_movw_r_r: + case MOP_movl_r_r: + case MOP_movq_r_r: { + manager->NormalPatternOpt(true); + break; + } + default: + break; + } +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_proepilog.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_proepilog.cpp new file mode 100644 index 0000000000000000000000000000000000000000..958f8f1620f097a80365364a171730e5f7fc2821 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_proepilog.cpp @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_proepilog.h" +#include "x64_memlayout.h" +#include "x64_isa.h" +#include "isel.h" +#include "x64_cg.h" + +namespace maplebe { +using namespace maple; +/* + * If a function without callee-saved register, and end with a function call, + * then transfer bl/blr to b/br. + * Return value: true if function do not need Prologue/Epilogue. false otherwise. + */ +bool X64GenProEpilog::TailCallOpt() +{ + return false; +} + +bool X64GenProEpilog::NeedProEpilog() +{ + return true; +} +void X64GenProEpilog::GenerateCalleeSavedRegs(bool isPush) +{ + X64CGFunc &x64cgFunc = static_cast(cgFunc); + const auto &calleeSavedRegs = x64cgFunc.GetCalleeSavedRegs(); + if (calleeSavedRegs.empty()) { + return; + } + /* CalleeSave(0) = -(FrameSize + CalleeReg - ArgsStk) */ + X64MemLayout *memLayout = static_cast(cgFunc.GetMemlayout()); + int64 offset = -(memLayout->StackFrameSize() + static_cast(cgFunc).SizeOfCalleeSaved() - + memLayout->SizeOfArgsToStackPass()); + RegOperand &baseReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + std::vector> calleeRegAndOffsetVec; + for (const auto ® : calleeSavedRegs) { + RegType regType = IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + uint32 regByteSize = IsGPRegister(reg) ? kIntregBytelen : kFpregBytelen; + uint32 regSize = regByteSize * kBitsPerByte; + DEBUG_ASSERT((regSize == k32BitSize || regSize == k64BitSize), "only supported 32/64-bits"); + RegOperand &calleeReg = cgFunc.GetOpndBuilder()->CreatePReg(reg, regSize, regType); + calleeRegAndOffsetVec.push_back(std::pair(static_cast(reg) - 1, static_cast(offset))); + MemOperand &memOpnd = cgFunc.GetOpndBuilder()->CreateMem(baseReg, offset, regSize); + if (isPush) { + GeneratePushCalleeSavedRegs(calleeReg, memOpnd, regSize); + } else { + GeneratePopCalleeSavedRegs(calleeReg, memOpnd, regSize); + } + offset += regByteSize; + } + const auto &emitMemoryManager = CGOptions::GetInstance().GetEmitMemoryManager(); + if (emitMemoryManager.codeSpace != nullptr) { + emitMemoryManager.funcCalleeOffsetSaver(emitMemoryManager.codeSpace, cgFunc.GetName(), calleeRegAndOffsetVec); + const int32 fp2SPDelta = 16; // FP + return address; + emitMemoryManager.funcFpSPDeltaSaver(emitMemoryManager.codeSpace, cgFunc.GetName(), fp2SPDelta); + } +} + +void X64GenProEpilog::GeneratePushCalleeSavedRegs(RegOperand ®Opnd, MemOperand &memOpnd, uint32 regSize) +{ + MOperator mMovrmOp = (regSize == k32BitSize) ? x64::MOP_movl_r_m : x64::MOP_movq_r_m; + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]); + copyInsn.AddOpndChain(regOpnd).AddOpndChain(memOpnd); + cgFunc.GetCurBB()->AppendInsn(copyInsn); +} + +void X64GenProEpilog::GeneratePopCalleeSavedRegs(RegOperand ®Opnd, MemOperand &memOpnd, uint32 regSize) +{ + MOperator mMovrmOp = (regSize == k32BitSize) ? x64::MOP_movl_m_r : x64::MOP_movq_m_r; + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]); + copyInsn.AddOpndChain(memOpnd).AddOpndChain(regOpnd); + cgFunc.GetCurBB()->AppendInsn(copyInsn); +} + +void X64GenProEpilog::GeneratePushUnnamedVarargRegs() +{ + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + X64MemLayout *memlayout = static_cast(cgFunc.GetMemlayout()); + uint8 size = GetPointerSize(); + uint32 dataSizeBits = size * kBitsPerByte; + int64 offset = -memlayout->GetGRSaveAreaBaseLoc(); + if (memlayout->GetSizeOfGRSaveArea() % kX64StackPtrAlignment) { + offset += size; /* End of area should be aligned. Hole between VR and GR area */ + } + uint32 start_regno = k6BitSize - (memlayout->GetSizeOfGRSaveArea() / size); + DEBUG_ASSERT(start_regno <= k6BitSize, "Incorrect starting GR regno for GR Save Area"); + + /* Parameter registers in x86: %rdi, %rsi, %rdx, %rcx, %r8, %r9 */ + std::vector paramRegs = {RDI, RSI, RDX, RCX, R8, R9}; + for (uint32 i = start_regno; i < paramRegs.size(); i++) { + MOperator mMovrmOp = x64::MOP_movq_r_m; + RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + MemOperand &memOpnd = cgFunc.GetOpndBuilder()->CreateMem(opndFpReg, offset, dataSizeBits); + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]); + RegOperand ®Opnd = cgFunc.GetOpndBuilder()->CreatePReg(paramRegs[i], k64BitSize, kRegTyInt); + copyInsn.AddOpndChain(regOpnd).AddOpndChain(memOpnd); + cgFunc.GetCurBB()->AppendInsn(copyInsn); + offset += size; + } + + if (!CGOptions::UseGeneralRegOnly()) { + offset = -memlayout->GetVRSaveAreaBaseLoc(); + start_regno = k6BitSize - (memlayout->GetSizeOfVRSaveArea() / (size * k2BitSize)); + DEBUG_ASSERT(start_regno <= k6BitSize, "Incorrect starting GR regno for VR Save Area"); + for (uint32 i = start_regno + static_cast(V0); i < static_cast(V6); i++) { + MOperator mMovrmOp = x64::MOP_movq_r_m; + RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + MemOperand &memOpnd = cgFunc.GetOpndBuilder()->CreateMem(opndFpReg, offset, dataSizeBits); + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]); + RegOperand ®Opnd = + cgFunc.GetOpndBuilder()->CreatePReg(static_cast(i), k64BitSize, kRegTyInt); + copyInsn.AddOpndChain(regOpnd).AddOpndChain(memOpnd); + + cgFunc.GetCurBB()->AppendInsn(copyInsn); + offset += (size * k2BitSize); + } + } + } +} + +void X64GenProEpilog::GenerateProlog(BB &bb) +{ + auto &x64CGFunc = static_cast(cgFunc); + BB *formerCurBB = cgFunc.GetCurBB(); + x64CGFunc.GetDummyBB()->ClearInsns(); + x64CGFunc.GetDummyBB()->SetIsProEpilog(true); + cgFunc.SetCurBB(*x64CGFunc.GetDummyBB()); + + /* push %rbp */ + MOperator mPushrOp = x64::MOP_pushq_r; + Insn &pushInsn = cgFunc.GetInsnBuilder()->BuildInsn(mPushrOp, X64CG::kMd[mPushrOp]); + RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + pushInsn.AddOpndChain(opndFpReg); + cgFunc.GetCurBB()->AppendInsn(pushInsn); + + /* mov %rsp, %rbp */ + MOperator mMovrrOp = x64::MOP_movq_r_r; + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrrOp, X64CG::kMd[mMovrrOp]); + RegOperand &opndSpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, kRegTyInt); + copyInsn.AddOpndChain(opndSpReg).AddOpndChain(opndFpReg); + cgFunc.GetCurBB()->AppendInsn(copyInsn); + + /* sub $framesize, %rsp */ + if (cgFunc.GetFunction().HasCall() || cgFunc.HasVLAOrAlloca()) { + MOperator mSubirOp = x64::MOP_subq_i_r; + Insn &subInsn = cgFunc.GetInsnBuilder()->BuildInsn(mSubirOp, X64CG::kMd[mSubirOp]); + auto *memLayout = static_cast(cgFunc.GetMemlayout()); + int64 trueFrameSize = memLayout->StackFrameSize() + static_cast(cgFunc).SizeOfCalleeSaved(); + ImmOperand &opndImm = cgFunc.GetOpndBuilder()->CreateImm(k32BitSize, trueFrameSize); + subInsn.AddOpndChain(opndImm).AddOpndChain(opndSpReg); + cgFunc.GetCurBB()->AppendInsn(subInsn); + } + + GenerateCalleeSavedRegs(true); + GeneratePushUnnamedVarargRegs(); + + bb.InsertAtBeginning(*x64CGFunc.GetDummyBB()); + x64CGFunc.GetDummyBB()->SetIsProEpilog(false); + cgFunc.SetCurBB(*formerCurBB); +} + +void X64GenProEpilog::GenerateEpilog(BB &bb) +{ + auto &x64CGFunc = static_cast(cgFunc); + BB *formerCurBB = cgFunc.GetCurBB(); + x64CGFunc.GetDummyBB()->ClearInsns(); + x64CGFunc.GetDummyBB()->SetIsProEpilog(true); + cgFunc.SetCurBB(*x64CGFunc.GetDummyBB()); + + GenerateCalleeSavedRegs(false); + + if (cgFunc.GetFunction().HasCall() || cgFunc.HasVLAOrAlloca()) { + /* + * leave equal with + * mov rsp rbp + * pop rbp + */ + MOperator mLeaveOp = x64::MOP_leaveq; + Insn &popInsn = cgFunc.GetInsnBuilder()->BuildInsn(mLeaveOp, X64CG::kMd[mLeaveOp]); + cgFunc.GetCurBB()->AppendInsn(popInsn); + } else { + /* pop %rbp */ + MOperator mPopOp = x64::MOP_popq_r; + Insn &pushInsn = cgFunc.GetInsnBuilder()->BuildInsn(mPopOp, X64CG::kMd[mPopOp]); + RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + pushInsn.AddOpndChain(opndFpReg); + cgFunc.GetCurBB()->AppendInsn(pushInsn); + } + /* ret */ + MOperator mRetOp = x64::MOP_retq; + Insn &retInsn = cgFunc.GetInsnBuilder()->BuildInsn(mRetOp, X64CG::kMd[mRetOp]); + cgFunc.GetCurBB()->AppendInsn(retInsn); + + bb.AppendBBInsns(*x64CGFunc.GetDummyBB()); + x64CGFunc.GetDummyBB()->SetIsProEpilog(false); + cgFunc.SetCurBB(*formerCurBB); +} + +void X64GenProEpilog::Run() +{ + GenerateProlog(*(cgFunc.GetFirstBB())); + GenerateEpilog(*(cgFunc.GetLastBB())); +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_reaching.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_reaching.cpp new file mode 100644 index 0000000000000000000000000000000000000000..58345335b2ac4be26d6112594ad869a4d70b5bd9 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_reaching.cpp @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_reaching.h" +#include "x64_cg.h" +#include "insn.h" +#include "isa.h" +namespace maplebe { +/* find insn using register between startInsn and endInsn + * startInsn and endInsn must be in the same BB. + */ +bool X64ReachingDefinition::FindRegUseBetweenInsn(uint32 regNO, Insn *startInsn, Insn *endInsn, + InsnSet ®UseInsnSet) const +{ + DEBUG_ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + bool findFinish = false; + if (startInsn == nullptr || endInsn == nullptr) { + return findFinish; + } + for (Insn *insn = startInsn; insn != nullptr && insn != endInsn->GetNext(); insn = insn->GetNext()) { + if (!insn->IsMachineInstruction()) { + continue; + } + /* if insn is call and regNO is caller-saved register, then regNO will not be used later */ + if (insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + findFinish = true; + } + + if (IsDiv(*insn) && regNO == x64::RAX) { + /* div insn use rax implicitly */ + findFinish = true; + } + + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + /* handle def or def use */ + auto *regProp = md->opndMD[i]; + if (regProp->IsDef() && opnd.IsRegister() && + (static_cast(opnd).GetRegisterNumber() == regNO)) { + findFinish = true; + } + + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto listElem : listOpnd.GetOperands()) { + RegOperand *regOpnd = static_cast(listElem); + DEBUG_ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + (void)regUseInsnSet.insert(insn); + } + } + continue; + } + if (!regProp->IsUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + /* handle use */ + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + if ((base != nullptr && base->GetRegisterNumber() == regNO) || + (index != nullptr && index->GetRegisterNumber() == regNO)) { + (void)regUseInsnSet.insert(insn); + } + } else if (opnd.IsConditionCode()) { + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + if (rflagReg.GetRegisterNumber() == regNO) { + (void)regUseInsnSet.insert(insn); + } + } else if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == regNO)) { + (void)regUseInsnSet.insert(insn); + } + } + if (findFinish) { + break; + } + } + return findFinish; +} + +std::vector X64ReachingDefinition::FindRegDefBetweenInsnGlobal(uint32 regNO, Insn *startInsn, + Insn *endInsn) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +std::vector X64ReachingDefinition::FindMemDefBetweenInsn(uint32 offset, const Insn *startInsn, + Insn *endInsn) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +bool X64ReachingDefinition::FindRegUseBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn, BB *movBB) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +bool X64ReachingDefinition::FindMemUseBetweenInsn(uint32 offset, Insn *startInsn, const Insn *endInsn, + InsnSet &useInsnSet) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +bool X64ReachingDefinition::HasRegDefBetweenInsnGlobal(uint32 regNO, Insn &startInsn, Insn &endInsn) +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +bool X64ReachingDefinition::DFSFindRegDefBetweenBB(const BB &startBB, const BB &endBB, uint32 regNO, + std::vector &visitedBB) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +InsnSet X64ReachingDefinition::FindDefForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +InsnSet X64ReachingDefinition::FindDefForMemOpnd(Insn &insn, uint32 indexOrOffset, bool isOffset) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +InsnSet X64ReachingDefinition::FindUseForMemOpnd(Insn &insn, uint8 index, bool secondMem) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +bool X64ReachingDefinition::FindRegUsingBetweenInsn(uint32 regNO, Insn *startInsn, const Insn *endInsn) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +void X64ReachingDefinition::InitStartGen() +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::InitEhDefine(BB &bb) +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::InitGenUse(BB &bb, bool firstTime) +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::GenAllAsmDefRegs(BB &bb, Insn &insn, uint32 index) +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::GenAllAsmUseRegs(BB &bb, Insn &insn, uint32 index) +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::GenAllCallerSavedRegs(BB &bb, Insn &insn) +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +bool X64ReachingDefinition::KilledByCallBetweenInsnInSameBB(const Insn &startInsn, const Insn &endInsn, + regno_t regNO) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +void X64ReachingDefinition::AddRetPseudoInsn(BB &bb) +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +bool X64ReachingDefinition::IsCallerSavedReg(uint32 regNO) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +void X64ReachingDefinition::FindRegDefInBB(uint32 regNO, BB &bb, InsnSet &defInsnSet) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::FindMemDefInBB(uint32 offset, BB &bb, InsnSet &defInsnSet) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::DFSFindDefForRegOpnd(const BB &startBB, uint32 regNO, std::vector &visitedBB, + InsnSet &defInsnSet) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::DFSFindDefForMemOpnd(const BB &startBB, uint32 offset, std::vector &visitedBB, + InsnSet &defInsnSet) const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +int32 X64ReachingDefinition::GetStackSize() const +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return 0; +}; + +void X64ReachingDefinition::AddRetPseudoInsns() +{ + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +}; + +/* reg killed killed by call insn */ +bool X64ReachingDefinition::IsRegKilledByCallInsn(const Insn &insn, regno_t regNO) const +{ + return x64::IsCallerSaveReg((X64reg)regNO); +} + +bool X64ReachingDefinition::IsDiv(const Insn &insn) const +{ + MOperator mOp = insn.GetMachineOpcode(); + return (MOP_idivw_r <= mOp && mOp <= MOP_divq_m); +} + +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_reg_info.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_reg_info.cpp new file mode 100644 index 0000000000000000000000000000000000000000..927c3a62535c3fed3f5b80f88dd74743f28f0320 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_reg_info.cpp @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "becommon.h" +#include "x64_cgfunc.h" +#include "x64_reg_info.h" + +namespace maplebe { +using namespace maple; +using namespace x64; +void X64RegInfo::Init() +{ + for (regno_t regNO = kRinvalid; regNO < kMaxRegNum; ++regNO) { + /* when yieldpoint is enabled, the RYP(R12) can not be used. */ + if (IsYieldPointReg(static_cast(regNO))) { + continue; + } + if (!x64::IsAvailableReg(static_cast(regNO))) { + continue; + } + if (x64::IsGPRegister(static_cast(regNO))) { + AddToIntRegs(regNO); + } else { + AddToFpRegs(regNO); + } + AddToAllRegs(regNO); + } + return; +} + +void X64RegInfo::SaveCalleeSavedReg(MapleSet savedRegs) +{ + X64CGFunc *x64CGFunc = static_cast(GetCurrFunction()); + for (auto reg : savedRegs) { + x64CGFunc->AddtoCalleeSaved(static_cast(reg)); + } +} + +bool X64RegInfo::IsSpecialReg(regno_t regno) const +{ + X64reg reg = static_cast(regno); + if ((reg == RBP) || (reg == RSP)) { + return true; + } + + /* when yieldpoint is enabled, the dedicated register(RYP) can not be allocated. */ + if (IsYieldPointReg(reg)) { + return true; + } + return false; +} + +bool X64RegInfo::IsCalleeSavedReg(regno_t regno) const +{ + return x64::IsCalleeSavedReg(static_cast(regno)); +} + +bool X64RegInfo::IsYieldPointReg(regno_t regno) const +{ + return false; +} + +bool X64RegInfo::IsUnconcernedReg(regno_t regNO) const +{ + /* when yieldpoint is enabled, the RYP(R12) can not be used. */ + if (IsYieldPointReg(static_cast(regNO))) { + return true; + } + return false; +} + +bool X64RegInfo::IsUnconcernedReg(const RegOperand ®Opnd) const +{ + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return true; + } + uint32 regNO = regOpnd.GetRegisterNumber(); + return IsUnconcernedReg(regNO); +} + +void X64RegInfo::Fini() {} + +ListOperand *X64RegInfo::CreateListOperand() +{ + CHECK_FATAL(false, "CreateListOperand, unsupported"); + return nullptr; +} + +Insn *X64RegInfo::BuildMovInstruction(Operand &opnd0, Operand &opnd1) +{ + CHECK_FATAL(false, "BuildMovInstruction, unsupported"); + return nullptr; +} + +RegOperand *X64RegInfo::GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, RegType kind, uint32 flag) +{ + return &(GetCurrFunction()->GetOpndBuilder()->CreatePReg(regNO, size, kind)); +} + +Insn *X64RegInfo::BuildStrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) +{ + X64MOP_t mOp = x64::MOP_begin; + switch (regSize) { + case k8BitSize: + mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movb_r_m: x64::MOP_begin; + break; + case k16BitSize: + mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movw_r_m : x64::MOP_begin; + break; + case k32BitSize: + mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movl_r_m : x64::MOP_movfs_r_m; + break; + case k64BitSize: + mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movq_r_m : x64::MOP_movfd_r_m; + break; + default: + CHECK_FATAL(false, "NIY"); + break; + } + CHECK_FATAL(mOp != x64::MOP_begin, "NIY"); + Insn &insn = GetCurrFunction()->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(phyOpnd).AddOpndChain(memOpnd); + return &insn; +} + +Insn *X64RegInfo::BuildLdrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) +{ + X64MOP_t mOp = x64::MOP_begin; + switch (regSize) { + case k8BitSize: + mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movb_m_r : x64::MOP_begin; + break; + case k16BitSize: + mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movw_m_r : x64::MOP_begin; + break; + case k32BitSize: + mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movl_m_r : x64::MOP_movfs_m_r; + break; + case k64BitSize: + mOp = (phyOpnd.GetRegisterType() == kRegTyInt) ? x64::MOP_movq_m_r : x64::MOP_movfd_m_r; + break; + default: + CHECK_FATAL(false, "NIY"); + break; + } + CHECK_FATAL(mOp != x64::MOP_begin, "should not happen"); + Insn &insn = GetCurrFunction()->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(memOpnd).AddOpndChain(phyOpnd); + return &insn; +} + +Insn *X64RegInfo::BuildCommentInsn(const std::string &comment) +{ + CHECK_FATAL(false, "Comment Insn, unsupported"); + GetCurrFunction()->GetOpndBuilder()->CreateComment(comment); + return nullptr; +} + +void X64RegInfo::FreeSpillRegMem(regno_t vrNum) +{ + X64CGFunc *x64CGFunc = static_cast(GetCurrFunction()); + x64CGFunc->FreeSpillRegMem(vrNum); +} + +MemOperand *X64RegInfo::GetOrCreatSpillMem(regno_t vrNum, uint32 bitSize) +{ + X64CGFunc *x64CGFunc = static_cast(GetCurrFunction()); + return x64CGFunc->GetOrCreatSpillMem(vrNum, bitSize); +} + +MemOperand *X64RegInfo::AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, bool isDest, Insn &insn, + regno_t regNum, bool &isOutOfRange) +{ + isOutOfRange = false; + return memOpnd; +} + +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_standardize.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_standardize.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4b3bc53c46df13de8e91c782ef6144a86c61ad2f --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/x86_64/x64_standardize.cpp @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "x64_standardize.h" +#include "x64_isa.h" +#include "x64_cg.h" +#include "insn.h" + +namespace maplebe { +#define DEFINE_MAPPING(ABSTRACT_IR, X64_MOP, ...) {ABSTRACT_IR, X64_MOP}, +std::unordered_map x64AbstractMapping = { +#include "x64_abstract_mapping.def" +}; + +static inline X64MOP_t GetMopFromAbstraceIRMop(MOperator mOp) +{ + auto iter = x64AbstractMapping.find(mOp); + if (iter == x64AbstractMapping.end()) { + CHECK_FATAL(false, "NIY mapping"); + } + CHECK_FATAL(iter->second != x64::MOP_begin, "NIY mapping"); + return iter->second; +} + +void X64Standardize::StdzMov(maplebe::Insn &insn) +{ + X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + insn.CommuteOperands(kInsnFirstOpnd, kInsnSecondOpnd); +} + +void X64Standardize::StdzStrLdr(Insn &insn) +{ + StdzMov(insn); +} + +void X64Standardize::StdzBasicOp(Insn &insn) +{ + X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + Operand &dest = insn.GetOperand(kInsnFirstOpnd); + Operand &src2 = insn.GetOperand(kInsnThirdOpnd); + insn.CleanAllOperand(); + insn.AddOpndChain(src2).AddOpndChain(dest); +} + +void X64Standardize::StdzUnaryOp(Insn &insn, CGFunc &cgFunc) +{ + MOperator mOp = insn.GetMachineOpcode(); + if (mOp == abstract::MOP_neg_f_32 || mOp == abstract::MOP_neg_f_64) { + StdzFloatingNeg(insn, cgFunc); + return; + } + X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + Operand &dest = insn.GetOperand(kInsnFirstOpnd); + insn.CleanAllOperand(); + insn.AddOpndChain(dest); +} + +void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) +{ + uint32 OpndDesSize = insn.GetDesc()->GetOpndDes(kInsnFirstOpnd)->GetSize(); + uint32 destSize = OpndDesSize; + uint32 OpndSrcSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); + uint32 srcSize = OpndSrcSize; + switch (insn.GetMachineOpcode()) { + case abstract::MOP_zext_rr_64_8: + case abstract::MOP_zext_rr_64_16: + case abstract::MOP_zext_rr_64_32: + destSize = k32BitSize; + break; + case abstract::MOP_cvt_fr_u32: + srcSize = k64BitSize; + break; + case abstract::MOP_cvt_rf_u32: + destSize = k64BitSize; + break; + default: + break; + } + MOperator directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + if (directlyMappingMop != abstract::MOP_undef) { + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + Operand *opnd0 = &insn.GetOperand(kInsnSecondOpnd); + RegOperand *src = static_cast(opnd0); + if (srcSize != OpndSrcSize) { + src = &cgFunc.GetOpndBuilder()->CreateVReg(src->GetRegisterNumber(), srcSize, src->GetRegisterType()); + } + Operand *opnd1 = &insn.GetOperand(kInsnFirstOpnd); + RegOperand *dest = static_cast(opnd1); + if (destSize != OpndDesSize) { + dest = &cgFunc.GetOpndBuilder()->CreateVReg(dest->GetRegisterNumber(), destSize, dest->GetRegisterType()); + } + insn.CleanAllOperand(); + insn.AddOpndChain(*src).AddOpndChain(*dest); + } else { + CHECK_FATAL(false, "NIY mapping"); + } +} + +/* x86 does not have floating point neg instruction + * neg_f operand0 operand1 + * ==> + * movd xmm0 R1 + * 64: movabsq 0x8000000000000000 R2 + * xorq R2 R1 + * 32: xorl 0x80000000 R1 + * movd R1 xmm0 +*/ +void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc) +{ + MOperator mOp = insn.GetMachineOpcode(); + uint32 bitSize = mOp == abstract::MOP_neg_f_32 ? k32BitSize : k64BitSize; + + // mov dest -> tmpOperand0 + MOperator movOp = mOp == abstract::MOP_neg_f_32 ? x64::MOP_movd_fr_r : x64::MOP_movq_fr_r; + RegOperand *tmpOperand0 = &cgFunc.GetOpndBuilder()->CreateVReg(bitSize, kRegTyInt); + Insn &movInsn0 = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); + Operand &dest = insn.GetOperand(kInsnFirstOpnd); + movInsn0.AddOpndChain(dest).AddOpndChain(*tmpOperand0); + insn.GetBB()->InsertInsnBefore(insn, movInsn0); + + // 32 : xorl 0x80000000 tmpOperand0 + // 64 : movabs 0x8000000000000000 tmpOperand1 + // xorq tmpOperand1 tmpOperand0 + ImmOperand &imm = cgFunc.GetOpndBuilder()->CreateImm(bitSize, (static_cast(1) << (bitSize - 1))); + if (mOp == abstract::MOP_neg_f_64) { + Operand *tmpOperand1 = &cgFunc.GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + Insn &movabs = cgFunc.GetInsnBuilder()->BuildInsn(x64::MOP_movabs_i_r, X64CG::kMd[x64::MOP_movabs_i_r]); + movabs.AddOpndChain(imm).AddOpndChain(*tmpOperand1); + insn.GetBB()->InsertInsnBefore(insn, movabs); + + MOperator xorOp = x64::MOP_xorq_r_r; + Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); + xorq.AddOpndChain(*tmpOperand1).AddOpndChain(*tmpOperand0); + insn.GetBB()->InsertInsnBefore(insn, xorq); + } else { + MOperator xorOp = x64::MOP_xorl_i_r; + Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); + xorq.AddOpndChain(imm).AddOpndChain(*tmpOperand0); + insn.GetBB()->InsertInsnBefore(insn, xorq); + } + + // mov tmpOperand0 -> dest + Insn &movq = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); + movq.AddOpndChain(*tmpOperand0).AddOpndChain(dest); + insn.GetBB()->InsertInsnBefore(insn, movq); + + insn.GetBB()->RemoveInsn(insn); + return; +} + +void X64Standardize::StdzShiftOp(Insn &insn, CGFunc &cgFunc) +{ + RegOperand *countOpnd = static_cast(&insn.GetOperand(kInsnThirdOpnd)); + /* count operand cvt -> PTY_u8 */ + if (countOpnd->GetSize() != GetPrimTypeBitSize(PTY_u8)) { + countOpnd = &cgFunc.GetOpndBuilder()->CreateVReg(countOpnd->GetRegisterNumber(), GetPrimTypeBitSize(PTY_u8), + countOpnd->GetRegisterType()); + } + /* copy count operand to cl(rcx) register */ + RegOperand &clOpnd = cgFunc.GetOpndBuilder()->CreatePReg(x64::RCX, GetPrimTypeBitSize(PTY_u8), kRegTyInt); + X64MOP_t copyMop = x64::MOP_movb_r_r; + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(copyMop, X64CG::kMd[copyMop]); + copyInsn.AddOpndChain(*countOpnd).AddOpndChain(clOpnd); + insn.GetBB()->InsertInsnBefore(insn, copyInsn); + /* shift OP */ + X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + RegOperand &destOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + insn.CleanAllOperand(); + insn.AddOpndChain(clOpnd).AddOpndChain(destOpnd); +} + +} // namespace maplebe diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/cg/yieldpoint.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/cg/yieldpoint.cpp new file mode 100644 index 0000000000000000000000000000000000000000..54bdf4ef6f944da67534c20c421e505ecadd1234 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/cg/yieldpoint.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "yieldpoint.h" +#if TARGAARCH64 +#include "aarch64_yieldpoint.h" +#elif TARGRISCV64 +#include "riscv64_yieldpoint.h" +#endif +#if TARGARM32 +#include "arm32_yieldpoint.h" +#endif +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; + +bool CgYieldPointInsertion::PhaseRun(maplebe::CGFunc &f) +{ + YieldPointInsertion *yieldPoint = nullptr; +#if TARGAARCH64 || TARGRISCV64 + yieldPoint = GetPhaseAllocator()->New(f); +#endif +#if TARGARM32 + yieldPoint = GetPhaseAllocator()->New(f); +#endif + yieldPoint->Run(); + return false; +} +} /* namespace maplebe */ diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/litecg/litecg.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/litecg/litecg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..615d3e2153134dea8480e52bbfc82ef4172f3a66 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/litecg/litecg.cpp @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "litecg.h" +#include "mir_builder.h" +#include "cg_option.h" +#include "mad.h" +#include "cg.h" +#include "maple_phase_support.h" +#include "maple_phase.h" +#include "cg_phasemanager.h" +#include "triple.h" +#include + +namespace maple { + +namespace litecg { + +using namespace maplebe; + +LiteCG::LiteCG(Module &mirModule) : module(mirModule) +{ + // Create CGOption: set up default options + // TODO: should we make CGOptions local? + cgOptions = &CGOptions::GetInstance(); + cgOptions->EnableLiteCG(); + cgOptions->SetEmitFileType("obj"); // TODO: to kElf + // cgOptions->SetTarget(X86_64); + // cgOptions->SetDebug(); + cgOptions->SetQuiet(true); +#if TARGAARCH64 + Triple::GetTriple().Init(); +#endif + // cgOptions->GetDumpPhases().insert("*"); + // cgOptions->FuncFilter("*"); + // cgOptions->SetDefaultOptions(module); + + // module information prepare + std::string moduleName = module.GetFileName(); + GStrIdx fileNameStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(moduleName); + + // TODO: is this strictly required? + GStrIdx nameStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("INFO_filename"); + module.PushFileInfoPair(MIRInfoPair(nameStrIdx, fileNameStrIdx.GetIdx())); + module.PushFileInfoIsString(true); + + module.SetFlavor(kFlavorUnknown); // TODO: need a new flavor + // module.SetSrcLang(kSrcLangC); // TODO: fix this + module.GetImportFiles().clear(); + + // Setup output file name + module.SetOutputFileName(moduleName + ".s"); +} + +LiteCG &LiteCG::SetOutputType(OutputType config) +{ + cgOptions->SetEmitFileType((config == kAsm) ? "asm" : "obj"); + return *this; +} + +LiteCG &LiteCG::SetTargetType(TargetType config) +{ + // TODO: update target support + // cgOptions->SetTarget(X86_64); + return *this; +} + +LiteCG &LiteCG::SetDebugType(DebugType config) +{ + // TODO: fix the exposed debug options + // cgOptions->SetDebug(?); + return *this; +} + +LiteCG &LiteCG::SetVerbose(InfoType config) +{ + cgOptions->SetQuiet((config == kQuiet) ? true : false); + return *this; +} + +void LiteCG::DumpIRToFile(const std::string &fileName) +{ + module.DumpToFile(fileName); +} + +LiteCG &LiteCG::SetupLiteCGEmitMemoryManager( + void *codeSpace, MemoryManagerAllocateDataSectionCallback dataSectionAllocator, + MemoryManagerSaveFunc2AddressInfoCallback funcAddressSaver, + maplebe::MemoryManagerSaveFunc2FPtoPrevSPDeltaCallback funcFpSPDeltaSaver, + maplebe::MemoryManagerSaveFunc2CalleeOffsetInfoCallback funcCallOffsetSaver, + maplebe::MemoryManagerSavePC2DeoptInfoCallback pc2DeoptInfoSaver, + maplebe::MemoryManagerSavePC2CallSiteInfoCallback pc2CallSiteInfoSaver) +{ + cgOptions->SetupEmitMemoryManager(codeSpace, dataSectionAllocator, funcAddressSaver, funcFpSPDeltaSaver, + funcCallOffsetSaver, pc2DeoptInfoSaver, pc2CallSiteInfoSaver); + return *this; +} + +void LiteCG::DoCG() +{ + bool timePhases = false; + // MPLTimer timer; + // timer.Start(); + + Globals::GetInstance()->SetOptimLevel(cgOptions->GetOptimizeLevel()); + + // TODO: not sure how to do this. + auto cgPhaseManager = std::make_unique(memPoolCtrler, "cg function phasemanager"); + const MaplePhaseInfo *cgPMInfo = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(&CgFuncPM::id); + auto *cgfuncPhaseManager = static_cast(cgPMInfo->GetConstructor()(cgPhaseManager.get())); + cgfuncPhaseManager->SetQuiet(CGOptions::IsQuiet()); + + if (timePhases) { + cgfuncPhaseManager->InitTimeHandler(); + } + + /* It is a specifc work around (need refactor) */ + cgfuncPhaseManager->SetCGOptions(cgOptions); + (void)cgfuncPhaseManager->PhaseRun(module); + + if (timePhases) { + cgfuncPhaseManager->DumpPhaseTime(); + } + // timer.Stop(); + // LogInfo::MapleLogger() << "Mplcg consumed " << timer.ElapsedMilliseconds() << "ms" << '\n'; +} + +} // namespace litecg + +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/litecg/litecg_test.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/litecg/litecg_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4da0e77a985700e0b898e58ce5e48eac3d39930b --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/litecg/litecg_test.cpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + a basic example to use litecg & lmir builder API. + + At this stage, it shows: + - The basic workflow of using litecg API. + - using lmir builder API to construct in-memory IR input to litecg. + - and then dump the input IR to a text-format maple IR file. + */ + +#include "litecg.h" + +using namespace maple::litecg; + +#define __ irBuilder-> // make the code looks better + +void generateIR(LMIRBuilder *irBuilder) +{ + /* case 1: Note here parameters are implicitly defined without return + Var handle, thus requires GetLocalVar. + + i32 function1(i32 param1, i64 param2) { + return param1 + (i32) param2; + } + */ + Function &function1 = __ DefineFunction("function1") + .Param(__ i32Type, "param1") + .Param(__ i64Type, "param2") + .Return(__ i32Type) + .Done(); + + __ SetCurFunc(function1); + + BB &bb = __ CreateBB(); + Stmt &retStmt = __ Return(__ Add(__ i32Type, __ Dread(__ GetLocalVar("param1")), + __ Trunc(__ i64Type, __ i32Type, __ Dread(__ GetLocalVar("param2"))))); + __ AppendStmt(bb, retStmt); + __ AppendBB(bb); + + // TODO: to be complete + /* case 2 + + */ +} + +int main() +{ + LiteCG liteCG("lmirexample"); + auto irBuilder = liteCG.GetIRBuilder(); + generateIR(&irBuilder); + + liteCG.DumpIRToFile("lmirexample.mpl"); + return 0; +} diff --git a/ecmascript/compiler/codegen/maple/maple_be/src/litecg/lmir_builder.cpp b/ecmascript/compiler/codegen/maple/maple_be/src/litecg/lmir_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0e808782e07f8f4a6e60575c5beb327e2a262d52 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_be/src/litecg/lmir_builder.cpp @@ -0,0 +1,965 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "lmir_builder.h" +#include "mir_builder.h" + +namespace maple { +namespace litecg { + +// not exposed any longer +inline Type *GetPrimitiveType(PrimType type) +{ + return GlobalTables::GetTypeTable().GetPrimType(type); +} + +Module *CreateModuleWithName(const std::string &name) +{ + return new Module(name); +} + +void ReleaseModule(Module *module) +{ + delete module; + // clean current globals + GlobalTables::Reset(); +} + +bool Expr::IsDread() const +{ + return node->GetOpCode() == OP_dread; +} + +bool Expr::IsRegread() const +{ + return node->GetOpCode() == OP_regread; +} + +bool Expr::IsConstValue() const +{ + return node->GetOpCode() == OP_constval; +} + +LMIRBuilder::LMIRBuilder(Module &module_) : mirBuilder(*module_.GetMIRBuilder()), module(module_) +{ + i8Type = GetPrimitiveType(PTY_i8); + i16Type = GetPrimitiveType(PTY_i16); + i32Type = GetPrimitiveType(PTY_i32); + i64Type = GetPrimitiveType(PTY_i64); + i128Type = GetPrimitiveType(PTY_i128); + u1Type = GetPrimitiveType(PTY_u1); + u8Type = GetPrimitiveType(PTY_u8); + u16Type = GetPrimitiveType(PTY_u16); + u32Type = GetPrimitiveType(PTY_u32); + u64Type = GetPrimitiveType(PTY_u64); + u128Type = GetPrimitiveType(PTY_u128); + voidType = GetPrimitiveType(PTY_void); + f32Type = GetPrimitiveType(PTY_f32); + f64Type = GetPrimitiveType(PTY_f64); + + // builtin types: commonly used derived types + strType = CreatePtrType(u8Type); // u8PtrType + i64PtrType = CreatePtrType(i64Type); + i64RefType = CreateRefType(i64Type); +} + +void LMIRBuilder::DumpIRToFile(const std::string fileName) +{ + module.DumpToFile(fileName); +} + +LiteCGTypeKind LMIRBuilder::LiteCGGetTypeKind(Type *type) const +{ + switch (type->GetKind()) { + case MIRTypeKind::kTypeInvalid: + return kLiteCGTypeInvalid; + case MIRTypeKind::kTypeUnknown: + return kLiteCGTypeUnknown; + case MIRTypeKind::kTypeScalar: + return kLiteCGTypeScalar; + case MIRTypeKind::kTypeBitField: + return kLiteCGTypeBitField; + case MIRTypeKind::kTypeArray: + return kLiteCGTypeArray; + case MIRTypeKind::kTypeFArray: + return kLiteCGTypeFArray; + case MIRTypeKind::kTypeJArray: + return kLiteCGTypeJArray; + case MIRTypeKind::kTypeStruct: + return kLiteCGTypeStruct; + case MIRTypeKind::kTypeUnion: + return kLiteCGTypeUnion; + case MIRTypeKind::kTypeClass: + return kLiteCGTypeClass; + case MIRTypeKind::kTypeInterface: + return kLiteCGTypeInterface; + case MIRTypeKind::kTypeStructIncomplete: + return kLiteCGTypeStructIncomplete; + case MIRTypeKind::kTypeClassIncomplete: + return kLiteCGTypeClassIncomplete; + case MIRTypeKind::kTypeConstString: + return kLiteCGTypeConstString; + case MIRTypeKind::kTypeInterfaceIncomplete: + return kLiteCGTypeInterfaceIncomplete; + case MIRTypeKind::kTypePointer: + return kLiteCGTypePointer; + case MIRTypeKind::kTypeFunction: + return kLiteCGTypeFunction; + case MIRTypeKind::kTypeVoid: + return kLiteCGTypeVoid; + case MIRTypeKind::kTypeByName: + return kLiteCGTypeByName; + case MIRTypeKind::kTypeParam: + return kLiteCGTypeParam; + case MIRTypeKind::kTypeInstantVector: + return kLiteCGTypeInstantVector; + case MIRTypeKind::kTypeGenericInstant: + return kLiteCGTypeGenericInstant; + default: + return kLiteCGTypeUnknown; + } +} + +void LMIRBuilder::SetCallStmtDeoptBundleInfo(Stmt &callNode, + const std::unordered_map &deoptBundleInfo) +{ + MapleUnorderedMap deoptInfos(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (const auto itr : deoptBundleInfo) { + auto value = itr.second; + if (value.kind == kPregKind) { + deoptInfos.insert(std::pair(itr.first, MapleValue(value.pregIdx))); + } else if (itr.second.kind == kSymbolKind) { + CHECK_FATAL(false, "symbol is not supported currently"); + deoptInfos.insert(std::pair(itr.first, MapleValue(value.symbol))); + } else { + deoptInfos.insert(std::pair(itr.first, MapleValue(value.constVal))); + } + } + if (callNode.GetOpCode() == OP_call) { + static_cast(callNode).SetDeoptBundleInfo(deoptInfos); + } else { + static_cast(callNode).SetDeoptBundleInfo(deoptInfos); + } +} + +Type *LMIRBuilder::CreatePtrType(Type *mirType) +{ + auto type = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirType, PTY_ptr); + return type; +} + +Type *LMIRBuilder::CreateRefType(Type *mirType) +{ + auto type = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirType, PTY_ref); + return type; +} + +bool LMIRBuilder::IsHeapPointerType(Type *mirType) const +{ + return mirType->GetPrimType() == PTY_ref; +} + +ArrayType *LMIRBuilder::CreateArrayType(Type *elemType, std::vector &dimSize) +{ + auto type = GlobalTables::GetTypeTable().GetOrCreateArrayType(*elemType, dimSize.size(), dimSize.data()); + return static_cast(type); +} + +Type *LMIRBuilder::CreateStructTypeInternal(const String &name, + std::vector> &fields_) +{ + FieldVector parentFields; // parentFields not used. + // TODO: not sure about the cost + FieldVector fields; + for (auto field : fields_) { + auto strIdx = mirBuilder.GetOrCreateStringIndex(field.first.data()); + fields.push_back(FieldPair(strIdx, TyIdxFieldAttrPair(field.second->GetTypeIndex(), FieldAttrs()))); + } + auto type = GlobalTables::GetTypeTable().GetOrCreateStructType(name, fields, parentFields, module); + return type; +} + +Type *LMIRBuilder::GetStructType(const String &name) +{ + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(name); + TyIdx typeIdx = GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(strIdx); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(typeIdx); + return type; +} + +StructConst &LMIRBuilder::CreateStructConstInternal(StructType *type) +{ + return *module.GetMemPool()->New(module, *type); +} + +ArrayConst &LMIRBuilder::CreateArrayConstInternal(ArrayType *type) +{ + return *module.GetMemPool()->New(module, *type); +} + +FieldOffset LMIRBuilder::GetFieldOffset(StructType *structType, FieldId fieldId) +{ + // TODO: we should avoid access CG internals here + // return Globals::GetInstance()->GetBECommon()->GetFieldOffset(*structType, fieldId); + return std::pair(0, 0); +} + +Type *LMIRBuilder::CreateFuncType(std::vector params_, Type *retType, bool isVarg) +{ + std::vector params; + std::vector attrs; // not used so far + + for (const auto param : params_) { + params.push_back(param->GetTypeIndex()); + attrs.push_back(TypeAttrs()); + } + + auto type = GlobalTables::GetTypeTable().GetOrCreateFunctionType(retType->GetTypeIndex(), params, attrs, isVarg); + return type; +} + +Type *LMIRBuilder::LiteCGGetPointedType(Type *type) +{ + if (type == nullptr || !type->IsMIRPtrType()) { + return nullptr; + } + return static_cast(type)->GetPointedFuncType(); +} + +std::vector LMIRBuilder::LiteCGGetFuncParamTypes(Type *type) +{ + std::vector ¶mTypeList = static_cast(type)->GetParamTypeList(); + std::vector paramTypes; + for (const auto paramType : paramTypeList) { + paramTypes.push_back(GlobalTables::GetTypeTable().GetTypeFromTyIdx(paramType)); + } + return paramTypes; +} + +Type *LMIRBuilder::LiteCGGetFuncReturnType(Type *type) +{ + if (type == nullptr || !type->IsMIRFuncType()) { + return nullptr; + } + TyIdx retTypeIndex = static_cast(type)->GetRetTyIdx(); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(retTypeIndex); +} + +// TODO: not sure it's FUNCATTR_local or FUNCATTR_static +static const FuncAttrKind FuncAttrMapTable[] = { + // FUNC_global, FUNC_weak, FUNC_internal + FUNCATTR_extern, FUNCATTR_weak, FUNCATTR_local}; + +static const FuncAttrKind FuncConvAttrMapTable[] = { + // CCall, Web_Kit_JS_Call, GHC_Call + FUNCATTR_ccall, FUNCATTR_webkitjscall, FUNCATTR_ghcall}; + +static const StmtAttrKind StmtConvAttrMapTable[] = {STMTATTR_ccall, STMTATTR_webkitjscall, STMTATTR_ghcall}; + +Function &LMIRBuilder::CreateFunctionInternal(const String &name, Type *retType, Params ¶ms_, bool isVargs, + bool needBody, FuncAttr attr, ConvAttr convAttr) +{ + ArgVector params(module.GetMPAllocator().Adapter()); + for (auto param : params_) { + params.push_back(param); + } + auto &function = *mirBuilder.CreateFunction(name, *retType, params, isVargs, needBody); + // TODO: check for attr + function.SetAttr(FuncAttrMapTable[attr]); + function.SetAttr(FuncConvAttrMapTable[convAttr]); + // It defines a function, add to module + if (needBody) { + module.AddFunction(&function); + } + return function; +} + +Function *LMIRBuilder::GetFunc(const String &name) +{ + return mirBuilder.GetFunctionFromName(name); +} + +void LMIRBuilder::SetCurFunc(Function &function) +{ + module.SetCurFunction(&function); +} + +Function &LMIRBuilder::GetCurFunction() const +{ + return *module.CurFunction(); +} + +void LMIRBuilder::RenameFormal2Preg(Function &func) +{ + if (!func.GetPregTab()) { + // func no body, skip rename + return; + } + auto &formalVec = func.GetFormalDefVec(); + for (uint32 i = 0; i < formalVec.size(); i++) { + MIRSymbol *oldSym = formalVec[i].formalSym; + if (!oldSym || !IsPrimitiveScalar(oldSym->GetType()->GetPrimType())) { + continue; + } + PregIdx regid = CreatePreg(oldSym->GetType()); + MIRSymbol *newSym = mirBuilder.CreatePregFormalSymbol(oldSym->GetTyIdx(), regid, func); + formalVec[i].formalSym = newSym; + } +} + +void LMIRBuilder::SetFuncFrameResverdSlot(int slot) +{ + module.CurFunction()->GetFuncAttrs().SetFrameResverdSlot(slot); +} + +void LMIRBuilder::SetFuncFramePointer(const String &val) +{ + module.CurFunction()->GetFuncAttrs().SetFramePointer(val); +} + +MIRPreg *LMIRBuilder::LiteCGGetPreg(Function &func, int32_t pRegNo) +{ + return func.GetPregItem(pRegNo); +} + +Expr LMIRBuilder::LiteCGGetPregFP(Function &func) +{ + return Regread(kSregFp); +} + +Expr LMIRBuilder::LiteCGGetPregSP() +{ + return Regread(kSregSp); +} + +// TODO: not sure it's FUNCATTR_local or FUNCATTR_static +static const AttrKind VarAttrMapTable[] = { + // VAR_external, VAR_weak, VAR_internal, VAR_global, VAR_readonly + ATTR_extern, ATTR_weak, ATTR_local, ATTR_extern, ATTR_readonly}; + +Var &LMIRBuilder::CreateGlobalVar(Type *type, const String &name, GlobalVarAttr attr) +{ + Var *var = mirBuilder.GetOrCreateSymbol(type->GetTypeIndex(), name, kStVar, kScGlobal, nullptr, kScopeGlobal, + false); // sameType? + var->SetAttr(VarAttrMapTable[attr]); + return *var; +} + +Var &LMIRBuilder::CreateGlobalVar(Type *type, const String &name, Const &init, GlobalVarAttr attr) +{ + Var &var = CreateGlobalVar(type, name, attr); + var.SetKonst(&init); + return var; +} + +Var *LMIRBuilder::GetGlobalVar(const String &name) +{ + return mirBuilder.GetGlobalDecl(name); +} + +Var &LMIRBuilder::CreateLocalVar(Type *type, const String &name) +{ + return *mirBuilder.GetOrCreateLocalDecl(name, *type); +} + +Var *LMIRBuilder::GetLocalVar(const String &name) +{ + return mirBuilder.GetLocalDecl(name); +} + +Var *LMIRBuilder::GetLocalVarFromExpr(Expr inExpr) +{ + auto *node = inExpr.GetNode(); + if ((node == nullptr) || (node->GetOpCode() != OP_dread)) { + return nullptr; + } + return GetCurFunction().GetSymbolTabItem(static_cast(node)->GetStIdx().Idx(), true); +} + +void LMIRBuilder::SetFunctionDerived2BaseRef(PregIdx derived, PregIdx base) +{ + return GetCurFunction().SetDerived2BaseRef(derived, base); +} + +PregIdx LMIRBuilder::GetPregIdxFromExpr(const Expr &expr) +{ + auto *node = expr.GetNode(); + if ((node == nullptr) || (node->GetOpCode() != OP_regread)) { + return 0; + } + return static_cast(node)->GetRegIdx(); +} + +Var &LMIRBuilder::GetParam(Function &function, size_t index) const +{ + return *function.GetFormal(index); +} + +Expr LMIRBuilder::GenExprFromVar(Var &var) +{ + if (var.IsPreg()) { + return Regread(var.GetPreg()->GetPregNo()); + } + return Dread(var); +} + +Const &LMIRBuilder::CreateIntConst(Type *type, int64_t val) +{ + return *GlobalTables::GetIntConstTable().GetOrCreateIntConst(val, *type); +} + +Const &LMIRBuilder::CreateFloatConst(float val) +{ + return *GlobalTables::GetFpConstTable().GetOrCreateFloatConst(val); +} + +Const &LMIRBuilder::CreateDoubleConst(double val) +{ + return *GlobalTables::GetFpConstTable().GetOrCreateDoubleConst(val); +} + +Const &LMIRBuilder::CreateStrConst(const String &constStr) +{ + // TODO: fix the type for string const + return *module.GetMemPool()->New(constStr, *strType); +} + +Const *LMIRBuilder::GetConstFromExpr(const Expr &expr) +{ + auto *node = expr.GetNode(); + if ((node == nullptr) || (node->GetOpCode() != OP_constval)) { + return nullptr; + } + return static_cast(node)->GetConstVal(); +} + +BB &LMIRBuilder::CreateBB(bool needLabel) +{ + // TODO: not sure block-node is a correct representation + // create block statement in current function + BB &bb = *module.CurFuncCodeMemPool()->New(); + if (needLabel) { + // generate implement label statement as the first statement + LabelIdx labelIdx = module.CurFunction()->GetLabelTab()->CreateLabel(); + (void)module.CurFunction()->GetLabelTab()->AddToStringLabelMap(labelIdx); + auto *labelStmt = module.CurFuncCodeMemPool()->New(); + labelStmt->SetLabelIdx(labelIdx); + bb.AddStatement(labelStmt); + } + return bb; +} + +void LMIRBuilder::AppendStmt(BB &bb, Stmt &stmt) +{ + bb.AddStatement(&stmt); +} + +void LMIRBuilder::AppendStmtBeforeBranch(BB &bb, Stmt &stmt) +{ + bool inserted = false; + auto &nodes = bb.GetStmtNodes(); + for (auto it = nodes.crbegin(); it != nodes.crend(); it++) { + auto &node = *it; + if (!node.IsCondBr() && (node.GetOpCode() != OP_goto)) { + bb.InsertAfter(&node, &stmt); + inserted = true; + break; + } + } + CHECK_FATAL(inserted, "PreBB must have a non jump stmt to insert PhiVarAssagin Stmt."); +} + +bool LMIRBuilder::IsEmptyBB(BB &bb) +{ + return bb.IsEmpty() || (bb.GetFirst() == bb.GetLast() && bb.GetFirst()->GetOpCode() == OP_label); +} + +void LMIRBuilder::SetStmtCallConv(Stmt &stmt, ConvAttr convAttr) +{ + stmt.SetAttr(StmtConvAttrMapTable[convAttr]); +} + +void LMIRBuilder::AppendBB(BB &bb) +{ + module.CurFunction()->GetBody()->AddStatement(&bb); +} + +void LMIRBuilder::AppendToLast(BB &bb) +{ + module.CurFunction()->GetLastPosBody()->AddStatement(&bb); +} + +BB &LMIRBuilder::GetLastAppendedBB() +{ + BB *pb = dynamic_cast(module.CurFunction()->GetLastPosBody()->GetLast()); + return *pb; +} + +BB &LMIRBuilder::GetLastPosBB() +{ + return *module.CurFunction()->GetLastPosBody(); +} + +LabelIdx GetBBLabelIdx(BB &bb) +{ + LabelNode *labelNode = dynamic_cast(bb.GetFirst()); + DEBUG_ASSERT(labelNode != nullptr, "BB should have a label statment"); + + return labelNode->GetLabelIdx(); +} + +Stmt &LMIRBuilder::Goto(BB &dest) +{ + return *mirBuilder.CreateStmtGoto(OP_goto, GetBBLabelIdx(dest)); +} + +Stmt &LMIRBuilder::CondGoto(Var &cond, BB &target, bool inverseCond) +{ + auto opcode = inverseCond ? OP_brtrue : OP_brfalse; + return *mirBuilder.CreateStmtCondGoto(Dread(cond).GetNode(), opcode, GetBBLabelIdx(target)); +} + +Stmt &LMIRBuilder::CondGoto(Expr cond, BB &target, bool inverseCond) +{ + auto opcode = inverseCond ? OP_brtrue : OP_brfalse; + return *mirBuilder.CreateStmtCondGoto(cond.GetNode(), opcode, GetBBLabelIdx(target)); +} + +// TODO: not ready yet +Stmt &LMIRBuilder::CreateSwitchInternal(Type *type, Expr cond, BB &defaultBB, + std::vector> &cases) +{ + CaseVector switchTable(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); +#if 0 + for (const std::pair casePair : cases) { + } +#endif + return *mirBuilder.CreateStmtSwitch(cond.GetNode(), GetBBLabelIdx(defaultBB), switchTable); +} + +Stmt &LMIRBuilder::Call(Function &func, Args &args_, Var *result) +{ + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (const auto &arg : args_) { + args.emplace_back(arg.GetNode()); + } + + if (result == nullptr) { + return *mirBuilder.CreateStmtCall(func.GetPuidx(), args); + } else { + return *mirBuilder.CreateStmtCallAssigned(func.GetPuidx(), args, result); + } +} + +Stmt &LMIRBuilder::Call(Function &func, Args &args_, PregIdx pregIdx) +{ + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (const auto &arg : args_) { + args.emplace_back(arg.GetNode()); + } + return *mirBuilder.CreateStmtCallRegassigned(func.GetPuidx(), args, pregIdx, OP_callassigned); +} + +Stmt &LMIRBuilder::ICall(Expr funcAddr, Args &args_, Var *result) +{ + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + args.push_back(funcAddr.GetNode()); + for (const auto &arg : args_) { + args.emplace_back(arg.GetNode()); + } + + if (result == nullptr) { + return *mirBuilder.CreateStmtIcall(args); + } else { + return *mirBuilder.CreateStmtIcallAssigned(args, *result); + } +} + +Stmt &LMIRBuilder::ICall(Expr funcAddr, Args &args_, PregIdx pregIdx) +{ + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + args.push_back(funcAddr.GetNode()); + for (const auto &arg : args_) { + args.emplace_back(arg.GetNode()); + } + return *mirBuilder.CreateStmtIcallAssigned(args, pregIdx); +} + +Stmt &LMIRBuilder::IntrinsicCall(IntrinsicId func_, Args &args_, Var *result) +{ + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (const auto &arg : args_) { + args.emplace_back(arg.GetNode()); + } + + // TODO: need to fix the type for IntrinsicId + auto func = static_cast(func_); + if (result == nullptr) { + return *mirBuilder.CreateStmtIntrinsicCall(func, args); + } else { + return *mirBuilder.CreateStmtIntrinsicCallAssigned(func, args, result); + } +} + +Stmt &LMIRBuilder::Return(Expr returnVal) +{ + return *mirBuilder.CreateStmtReturn(returnVal.GetNode()); +} + +Stmt &LMIRBuilder::Comment(std::string comment) +{ + return *mirBuilder.CreateStmtComment(comment); +} + +Stmt &LMIRBuilder::Dassign(Expr src, Var &var, FieldId fieldId) +{ + return *mirBuilder.CreateStmtDassign(var, fieldId, src.GetNode()); +} + +Stmt &LMIRBuilder::Iassign(Expr src, Expr addr, Type *baseType, FieldId fieldId) +{ + return *mirBuilder.CreateStmtIassign(*baseType, fieldId, addr.GetNode(), src.GetNode()); +} + +Expr LMIRBuilder::Dread(Var &var) +{ + return Expr(mirBuilder.CreateExprDread(var), var.GetType()); +} + +Expr LMIRBuilder::DreadWithField(Var &var, FieldId id) +{ + auto *type = var.GetType(); + CHECK_FATAL(type->IsStructType(), "DreadWithField: must be a struct type!"); + auto *fldType = static_cast(type)->GetFieldType(id); + return Expr(mirBuilder.CreateExprDread(*fldType, id, var), fldType); +} + +Expr LMIRBuilder::Iread(Type *type, Expr addr, Type *baseType, FieldId fieldId) +{ + return Expr(mirBuilder.CreateExprIread(*type, *baseType, fieldId, addr.GetNode()), type); +} + +PregIdx LMIRBuilder::CreatePreg(Type *mtype) +{ + if ((mtype->GetPrimType() != PTY_ptr) && (mtype->GetPrimType() != PTY_ref)) { + // primitive type + return GetCurFunction().GetPregTab()->CreatePreg(mtype->GetPrimType()); + } + return GetCurFunction().GetPregTab()->CreatePreg(mtype->GetPrimType(), mtype); +} + +Stmt &LMIRBuilder::Regassign(Expr src, PregIdx pregIdx) +{ + return *(mirBuilder.CreateStmtRegassign(src.GetType()->GetPrimType(), pregIdx, src.GetNode())); +} + +Expr LMIRBuilder::Regread(PregIdx pregIdx) +{ + MIRPreg *preg = GetCurFunction().GetPregTab()->PregFromPregIdx(pregIdx); + if (pregIdx < 0) { + // special register + return Expr(mirBuilder.CreateExprRegread(PTY_i64, pregIdx), i64Type); + } + if (preg->GetMIRType() != nullptr) { + return Expr(mirBuilder.CreateExprRegread(preg->GetPrimType(), pregIdx), preg->GetMIRType()); + } + // the type of value in reg is primitive type + Type *type = GetPrimitiveType(preg->GetPrimType()); + return Expr(mirBuilder.CreateExprRegread(preg->GetPrimType(), pregIdx), type); +} + +Expr LMIRBuilder::Addrof(Var &var) +{ + return Expr(mirBuilder.CreateAddrof(var), var.GetType()); +} + +Expr LMIRBuilder::ConstVal(Const &constVal) +{ + return Expr(mirBuilder.CreateConstval(&constVal), &constVal.GetType()); +} + +Expr LMIRBuilder::Lnot(Type *type, Expr src) +{ + return Expr(mirBuilder.CreateExprUnary(OP_lnot, *type, src.GetNode()), type); +} + +Expr LMIRBuilder::Bnot(Type *type, Expr src) +{ + return Expr(mirBuilder.CreateExprUnary(OP_bnot, *type, src.GetNode()), type); +} + +Expr LMIRBuilder::Sqrt(Type *type, Expr src) +{ + return Expr(mirBuilder.CreateExprUnary(OP_sqrt, *type, src.GetNode()), type); +} + +inline Expr CreateBinOpInternal(MIRBuilder &mirBuilder, Opcode op, Type *type, Expr src1, Expr src2) +{ + // we don't check for type mismatch and insert type-conversion here + return Expr(mirBuilder.CreateExprBinary(op, *type, src1.GetNode(), src2.GetNode()), type); +} + +Expr LMIRBuilder::Add(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_add, type, src1, src2); +} + +Expr LMIRBuilder::Sub(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_sub, type, src1, src2); +} + +Expr LMIRBuilder::Mul(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_mul, type, src1, src2); +} + +Expr LMIRBuilder::UDiv(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_div, type, src1, src2); +} + +Expr LMIRBuilder::SDiv(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_div, type, src1, src2); +} + +Expr LMIRBuilder::URem(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_rem, type, src1, src2); +} + +Expr LMIRBuilder::SRem(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_rem, type, src1, src2); +} + +Expr LMIRBuilder::Shl(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_shl, type, src1, src2); +} + +Expr LMIRBuilder::LShr(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_lshr, type, src1, src2); +} + +Expr LMIRBuilder::AShr(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_ashr, type, src1, src2); +} + +Expr LMIRBuilder::And(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_band, type, src1, src2); +} + +Expr LMIRBuilder::Or(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_bior, type, src1, src2); +} + +Expr LMIRBuilder::Xor(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_bxor, type, src1, src2); +} + +Expr LMIRBuilder::ICmpEQ(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_eq, type, src1, src2); +} + +Expr LMIRBuilder::ICmpNE(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_ne, type, src1, src2); +} + +Expr LMIRBuilder::ICmpULT(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_lt, type, src1, src2); +} + +Expr LMIRBuilder::ICmpULE(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_le, type, src1, src2); +} + +Expr LMIRBuilder::ICmpUGT(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_gt, type, src1, src2); +} + +Expr LMIRBuilder::ICmpUGE(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_ge, type, src1, src2); +} + +Expr LMIRBuilder::ICmpSLT(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_lt, type, src1, src2); +} + +Expr LMIRBuilder::ICmpSLE(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_le, type, src1, src2); +} + +Expr LMIRBuilder::ICmpSGT(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_gt, type, src1, src2); +} + +Expr LMIRBuilder::ICmpSGE(Type *type, Expr src1, Expr src2) +{ + return CreateBinOpInternal(mirBuilder, OP_ge, type, src1, src2); +} + +inline Expr CreateExprCompare(MIRBuilder &mirBuilder, Opcode op, Type *type, Expr src1, Expr src2) +{ + // we don't check for type mismatch and insert type-conversion here + return Expr(mirBuilder.CreateExprCompare(op, *type, *src1.GetType(), src1.GetNode(), src2.GetNode()), type); +} + +Expr LMIRBuilder::ICmp(Type *type, Expr src1, Expr src2, IntCmpCondition cond) +{ + Opcode opCode = OP_eq; + bool isSigned = true; + switch (cond) { + case kEQ: + opCode = OP_eq; + break; + case kNE: + opCode = OP_ne; + break; + case kULT: + isSigned = false; + opCode = OP_lt; + break; + case kULE: + isSigned = false; + opCode = OP_le; + break; + case kUGT: + isSigned = false; + opCode = OP_gt; + break; + case kUGE: + isSigned = false; + opCode = OP_ge; + break; + case kSLT: + opCode = OP_lt; + break; + case kSLE: + opCode = OP_le; + break; + case kSGT: + opCode = OP_gt; + break; + case kSGE: + opCode = OP_ge; + break; + } + PrimType originType = src1.GetNode()->GetPrimType(); + PrimType newType = originType; + if (!isSigned) { + switch (originType) { + case PTY_i8: + newType = PTY_u8; + break; + case PTY_i16: + newType = PTY_u16; + break; + case PTY_i32: + newType = PTY_u32; + break; + case PTY_i64: + newType = PTY_u64; + break; + default: + break; + } + } + Expr cmpExpr = CreateExprCompare(mirBuilder, opCode, type, src1, src2); + static_cast(cmpExpr.GetNode())->SetOpndType(newType); + return cmpExpr; +} + +Expr LMIRBuilder::FCmp(Type *type, Expr src1, Expr src2, FloatCmpCondition cond) +{ + Opcode opCode = OP_eq; + switch (cond) { + case kOLT: + opCode = OP_lt; + break; + case kOLE: + opCode = OP_le; + break; + case kOGT: + opCode = OP_gt; + break; + case kOGE: + opCode = OP_ge; + break; + case kOEQ: + opCode = OP_eq; + break; + case kONE: + opCode = OP_ne; + break; + } + return CreateExprCompare(mirBuilder, opCode, type, src1, src2); +} + +Expr LMIRBuilder::Select(Type *type, Expr cond, Expr ifTrue, Expr ifFalse) +{ + return Expr(mirBuilder.CreateExprTernary(OP_select, *type, cond.GetNode(), ifTrue.GetNode(), ifFalse.GetNode()), + type); +} + +Expr LMIRBuilder::Trunc(Type *fromType, Type *toType, Expr opnd) +{ + return Expr(mirBuilder.CreateExprTypeCvt(OP_cvt, toType->GetPrimType(), fromType->GetPrimType(), *opnd.GetNode()), + toType); +} + +Expr LMIRBuilder::ZExt(Type *fromType, Type *toType, Expr opnd) +{ + return Expr(mirBuilder.CreateExprExtractbits(OP_zext, toType->GetPrimType(), 0, + GetPrimTypeActualBitSize(fromType->GetPrimType()), opnd.GetNode()), + toType); +} + +Expr LMIRBuilder::Cvt(Type *fromType, Type *toType, Expr opnd) +{ + if (fromType->GetPrimType() != toType->GetPrimType()) { + return Expr(mirBuilder.CreateExprTypeCvt(OP_cvt, *toType, *fromType, opnd.GetNode()), toType); + } + return Expr(opnd.GetNode(), toType); +} + +Expr LMIRBuilder::SExt(Type *fromType, Type *toType, Expr opnd) +{ + return Expr(mirBuilder.CreateExprExtractbits(OP_sext, toType->GetPrimType(), 0, + GetPrimTypeActualBitSize(fromType->GetPrimType()), opnd.GetNode()), + toType); +} + +Expr LMIRBuilder::BitCast(Type *fromType, Type *toType, Expr opnd) +{ + return Expr(mirBuilder.CreateExprRetype(*toType, *fromType, opnd.GetNode()), toType); +} +} // namespace litecg +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_driver/BUILD.gn b/ecmascript/compiler/codegen/maple/maple_driver/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..b274d3a6125fabee85c0c9fa3d7acdb7c381a8c8 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_driver/BUILD.gn @@ -0,0 +1,44 @@ +# Copyright (c) 2023 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//arkcompiler/ets_runtime/js_runtime_config.gni") + +ohos_static_library("libmaple_driver") { + stack_protector_ret = false + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = [ "src/triple.cpp" ] + + include_dirs = [ + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/maple_util/include", + ] + + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" + part_name = "ets_runtime" + subsystem_name = "arkcompiler" +} + +ohos_static_library("libdriver_option") { + stack_protector_ret = false + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = [ "src/driver_options.cpp" ] + + include_dirs = [ + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/maple_util/include", + ] + + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" + part_name = "ets_runtime" + subsystem_name = "arkcompiler" +} diff --git a/ecmascript/compiler/codegen/maple/maple_driver/include/driver_options.h b/ecmascript/compiler/codegen/maple/maple_driver/include/driver_options.h new file mode 100644 index 0000000000000000000000000000000000000000..00a099a704c63e4754b2060d0b3550f1ae42ec11 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_driver/include/driver_options.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_DRIVER_INCLUDE_DRIVER_OPTIONS_H +#define MAPLE_DRIVER_INCLUDE_DRIVER_OPTIONS_H + +#include "cl_option.h" +#include "cl_parser.h" + +#include + +static maplecl::OptionCategory &driverCategory = maplecl::CommandLine::GetCommandLine().defaultCategory; + +static maplecl::OptionCategory &clangCategory = maplecl::CommandLine::GetCommandLine().clangCategory; +static maplecl::OptionCategory &hir2mplCategory = maplecl::CommandLine::GetCommandLine().hir2mplCategory; +static maplecl::OptionCategory &mpl2mplCategory = maplecl::CommandLine::GetCommandLine().mpl2mplCategory; +static maplecl::OptionCategory &meCategory = maplecl::CommandLine::GetCommandLine().meCategory; +static maplecl::OptionCategory &cgCategory = maplecl::CommandLine::GetCommandLine().cgCategory; +static maplecl::OptionCategory &asCategory = maplecl::CommandLine::GetCommandLine().asCategory; +static maplecl::OptionCategory &ldCategory = maplecl::CommandLine::GetCommandLine().ldCategory; + +static maplecl::OptionCategory &jbc2mplCategory = maplecl::CommandLine::GetCommandLine().jbc2mplCategory; +static maplecl::OptionCategory &ipaCategory = maplecl::CommandLine::GetCommandLine().ipaCategory; + +namespace opts { + +/* ##################### BOOL Options ############################################################### */ + +extern maplecl::Option verbose; +extern maplecl::Option genLMBC; +extern maplecl::Option profileGen; +extern maplecl::Option profileUse; + +} // namespace opts + +#endif /* MAPLE_DRIVER_INCLUDE_DRIVER_OPTIONS_H */ diff --git a/test/typeinfer/module_test/module_member_initialization/module_member_initialization.ts b/ecmascript/compiler/codegen/maple/maple_driver/include/mpl_options.h similarity index 59% rename from test/typeinfer/module_test/module_member_initialization/module_member_initialization.ts rename to ecmascript/compiler/codegen/maple/maple_driver/include/mpl_options.h index 44d7308f2990e52c216839ebc84a4e3bab4be23b..eb1060ace54a85ffdda5b924aa187d885187d6b2 100644 --- a/test/typeinfer/module_test/module_member_initialization/module_member_initialization.ts +++ b/ecmascript/compiler/codegen/maple/maple_driver/include/mpl_options.h @@ -13,18 +13,14 @@ * limitations under the License. */ -declare function AssertType(value:any, type:string):void; +#ifndef MAPLE_DRIVER_INCLUDE_MPL_OPTIONS_H +#define MAPLE_DRIVER_INCLUDE_MPL_OPTIONS_H -import {foo1, foo2} from "./export" +namespace maple { -let f1 = new foo1(); -let f2 = new foo2(); -AssertType(f1.b1, "(string, string) => string"); -AssertType(f1.b2, "number"); -AssertType(f1.b1("a", "b"), "string"); +enum SafetyCheckMode { kNoCheck, kStaticCheck, kDynamicCheck, kDynamicCheckSilent }; -AssertType(f2.a1, "number"); -AssertType(f2.a2, "string"); -AssertType(f2.a3, "foo1"); -AssertType(f2.a3.b2, "number"); -AssertType(f2.a4, "(number, number) => number"); \ No newline at end of file +enum Level { kLevelZero = 0, kLevelOne = 1, kLevelTwo = 2, kLevelThree = 3, kLevelFour = 4 }; + +} // namespace maple +#endif // MAPLE_DRIVER_INCLUDE_MPL_OPTIONS_H diff --git a/ecmascript/compiler/codegen/maple/maple_driver/include/triple.h b/ecmascript/compiler/codegen/maple/maple_driver/include/triple.h new file mode 100644 index 0000000000000000000000000000000000000000..6f8bc1a4a5e124050fe2c845314ec6800599f80e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_driver/include/triple.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_TRIPLE_H +#define MAPLE_TRIPLE_H + +#include +#include + +#include +#include +#include + +namespace maple { + +class Triple { +public: + /* Currently, only aarch64 is supported */ + enum ArchType { UnknownArch, aarch64, aarch64_be, LastArchType }; + + /* Currently, only ILP32 and LP64 are supported */ + enum EnvironmentType { UnknownEnvironment, GNU, GNUILP32, LastEnvironmentType }; + + ArchType GetArch() const + { + return arch; + } + EnvironmentType GetEnvironment() const + { + return environment; + } + + bool IsBigEndian() const + { + return (GetArch() == ArchType::aarch64_be); + } + + std::string Str() const; + std::string GetArchName() const; + std::string GetEnvironmentName() const; + + static Triple &GetTriple() + { + static Triple triple; + return triple; + } + Triple(const Triple &) = delete; + Triple &operator=(const Triple &) = delete; + + void Init(const std::string &target); + void Init(); + +private: + std::string data; + ArchType arch; + EnvironmentType environment; + + Triple() : arch(UnknownArch), environment(UnknownEnvironment) {} + + Triple::ArchType ParseArch(std::string_view archStr); + Triple::EnvironmentType ParseEnvironment(std::string_view environmentType); +}; + +} // namespace maple + +#endif /* MAPLE_TRIPLE_H */ diff --git a/ecmascript/compiler/codegen/maple/maple_driver/src/driver_options.cpp b/ecmascript/compiler/codegen/maple/maple_driver/src/driver_options.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bf44b890df5418d093a81a246adae24b1e8ce655 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_driver/src/driver_options.cpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "driver_options.h" +#include "cl_option.h" + +namespace opts { + +/* ##################### BOOL Options ############################################################### */ + +maplecl::Option verbose({"-verbose"}, " -verbose \tPrint informations\n", + {driverCategory, jbc2mplCategory, hir2mplCategory, meCategory, mpl2mplCategory, + cgCategory}); + +maplecl::Option genLMBC({"--genlmbc"}, " --genlmbc \tGenerate .lmbc file\n", + {driverCategory, mpl2mplCategory}); + +maplecl::Option profileGen({"--profileGen"}, + " --profileGen \tGenerate profile data for static languages\n", + {driverCategory, meCategory, mpl2mplCategory, cgCategory}); + +maplecl::Option profileUse({"--profileUse"}, + " --profileUse \tOptimize static languages with profile data\n", + {driverCategory, mpl2mplCategory}); + +} /* namespace opts */ diff --git a/ecmascript/compiler/codegen/maple/maple_driver/src/triple.cpp b/ecmascript/compiler/codegen/maple/maple_driver/src/triple.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c7e263ca4320709f584ac6e9b484016d05b66956 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_driver/src/triple.cpp @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "triple.h" +#include "driver_options.h" + +namespace maple { + +Triple::ArchType Triple::ParseArch(std::string_view archStr) +{ + if (maple::utils::Contains({"aarch64", "aarch64_le"}, archStr)) { + return Triple::ArchType::aarch64; + } else if (maple::utils::Contains({"aarch64_be"}, archStr)) { + return Triple::ArchType::aarch64_be; + } + + // Currently Triple support only aarch64 + return Triple::UnknownArch; +} + +Triple::EnvironmentType Triple::ParseEnvironment(std::string_view archStr) +{ + if (maple::utils::Contains({"ilp32", "gnu_ilp32", "gnuilp32"}, archStr)) { + return Triple::EnvironmentType::GNUILP32; + } else if (maple::utils::Contains({"gnu"}, archStr)) { + return Triple::EnvironmentType::GNU; + } + + // Currently Triple support only ilp32 and default gnu/LP64 ABI + return Triple::UnknownEnvironment; +} + +void Triple::Init() +{ + /* Currently Triple is used only to configure aarch64: be/le, ILP32/LP64 + * Other architectures (TARGX86_64, TARGX86, TARGARM32, TARGVM) are configured with compiler build config */ +#if TARGAARCH64 + arch = Triple::ArchType::aarch64; + environment = Triple::EnvironmentType::GNU; +#endif +} + +void Triple::Init(const std::string &target) +{ + data = target; + + /* Currently Triple is used only to configure aarch64: be/le, ILP32/LP64. + * Other architectures (TARGX86_64, TARGX86, TARGARM32, TARGVM) are configured with compiler build config */ +#if TARGAARCH64 + Init(); + + std::vector components; + maple::StringUtils::SplitSV(data, components, '-'); + if (components.size() == 0) { // as minimum 1 component must be + return; + } + + auto tmpArch = ParseArch(components[0]); // to not overwrite arch seting by opts::bigendian + if (tmpArch == Triple::UnknownArch) { + return; + } + arch = tmpArch; + + /* Try to check environment in option. + * As example, it can be: aarch64-none-linux-gnu or aarch64-linux-gnu or aarch64-gnu, where gnu is environment */ + for (int i = 1; i < components.size(); ++i) { + auto tmpEnvironment = ParseEnvironment(components[i]); + if (tmpEnvironment != Triple::UnknownEnvironment) { + environment = tmpEnvironment; + break; + } + } +#endif +} + +std::string Triple::GetArchName() const +{ + switch (arch) { + case ArchType::aarch64_be: + return "aarch64_be"; + case ArchType::aarch64: + return "aarch64"; + default: + DEBUG_ASSERT(false, "Unknown Architecture Type\n"); + } + return ""; +} + +std::string Triple::GetEnvironmentName() const +{ + switch (environment) { + case EnvironmentType::GNUILP32: + return "gnu_ilp32"; + case EnvironmentType::GNU: + return "gnu"; + default: + DEBUG_ASSERT(false, "Unknown Environment Type\n"); + } + return ""; +} + +std::string Triple::Str() const +{ + if (!data.empty()) { + return data; + } + + if (GetArch() != ArchType::UnknownArch && GetEnvironment() != Triple::EnvironmentType::UnknownEnvironment) { + /* only linux platform is supported, so "-linux-" is hardcoded */ + return GetArchName() + "-linux-" + GetEnvironmentName(); + } + + CHECK_FATAL(false, "Only aarch64/aarch64_be GNU/GNUILP32 targets are supported\n"); + return data; +} + +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ipa/include/old/ea_connection_graph.h b/ecmascript/compiler/codegen/maple/maple_ipa/include/old/ea_connection_graph.h new file mode 100644 index 0000000000000000000000000000000000000000..817ce2d5e90a089e8d00a40fac815e737315f2d2 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ipa/include/old/ea_connection_graph.h @@ -0,0 +1,771 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLEIPA_INCLUDE_ESCAPEANALYSIS_H +#define MAPLEIPA_INCLUDE_ESCAPEANALYSIS_H +#include +#include +#include +#include "call_graph.h" +#include "me_ir.h" +#include "irmap.h" + +namespace maple { +enum NodeKind { kObejectNode, kReferenceNode, kActualNode, kFieldNode, kPointerNode }; + +enum EAStatus { kNoEscape, kReturnEscape, kArgumentEscape, kGlobalEscape }; + +const inline std::string EscapeName(EAStatus esc) +{ + switch (esc) { + case kNoEscape: + return "NoEsc"; + case kReturnEscape: + return "RetEsc"; + case kArgumentEscape: + return "ArgEsc"; + case kGlobalEscape: + return "GlobalEsc"; + default: + return ""; + } +} + +class Location { +public: + Location(const std::string &modName, uint32 fileId, uint32 lineId) + : modName(modName), fileId(fileId), lineId(lineId) {}; + ~Location() = default; + + const std::string &GetModName() const + { + return modName; + } + + uint32 GetFileId() const + { + return fileId; + } + + uint32 GetLineId() const + { + return lineId; + } + +private: + std::string modName; + uint32 fileId; + uint32 lineId; +}; + +class EACGBaseNode; +class EACGObjectNode; +class EACGFieldNode; +class EACGRefNode; +class EACGActualNode; +class EACGPointerNode; + +class EAConnectionGraph { +public: + friend class BinaryMplExport; + friend class BinaryMplImport; + friend class EACGBaseNode; + friend class EACGObjectNode; + friend class EACGFieldNode; + friend class EACGRefNode; + friend class EACGPointerNode; + // If import is false, need init globalNode. + EAConnectionGraph(MIRModule *m, MapleAllocator *allocator, const GStrIdx &funcName, bool import = false) + : mirModule(m), + alloc(allocator), + nodes(allocator->Adapter()), + expr2Nodes(allocator->Adapter()), + funcArgNodes(allocator->Adapter()), + callSite2Nodes(allocator->Adapter()), + funcStIdx(funcName), + hasUpdated(false), + needConv(false), + imported(import), + exprIdMax(0), + globalObj(nullptr), + globalRef(nullptr), + globalField(nullptr) {}; + ~EAConnectionGraph() = default; + + EACGObjectNode *CreateObjectNode(MeExpr *expr, EAStatus initialEas, bool isPh, TyIdx tyIdx); + EACGRefNode *CreateReferenceNode(MeExpr *expr, EAStatus initialEas, bool isStatic); + EACGActualNode *CreateActualNode(EAStatus initialEas, bool isReurtn, bool isPh, uint8 argIdx, uint32 callSiteInfo); + EACGFieldNode *CreateFieldNode(MeExpr *expr, EAStatus initialEas, FieldID fId, EACGObjectNode *belongTo, bool isPh); + EACGPointerNode *CreatePointerNode(MeExpr *expr, EAStatus initialEas, int inderictL); + EACGBaseNode *GetCGNodeFromExpr(MeExpr *me); + EACGFieldNode *GetOrCreateFieldNodeFromIdx(EACGObjectNode &obj, int32 fieldID); + EACGActualNode *GetReturnNode() const; + const MapleVector *GetFuncArgNodeVector() const; + void TouchCallSite(uint32 callSiteInfo); + MapleVector *GetCallSiteArgNodeVector(uint32 callSite); + bool ExprCanBeOptimized(MeExpr &expr); + + bool CGHasUpdated() const + { + return hasUpdated; + } + + void UnSetCGUpdateFlag() + { + hasUpdated = false; + } + + void SetCGHasUpdated() + { + hasUpdated = true; + } + + void SetExprIdMax(int max) + { + exprIdMax = max; + } + + void SetNeedConservation() + { + needConv = true; + } + + bool GetNeedConservation() const + { + return needConv; + } + + GStrIdx GetFuncNameStrIdx() const + { + return funcStIdx; + } + + EACGObjectNode *GetGlobalObject() + { + return globalObj; + } + + const EACGObjectNode *GetGlobalObject() const + { + return globalObj; + } + + EACGRefNode *GetGlobalReference() + { + return globalRef; + } + + const EACGRefNode *GetGlobalReference() const + { + return globalRef; + } + + const MapleVector &GetNodes() const + { + return nodes; + } + + void ResizeNodes(size_t size, EACGBaseNode *val) + { + nodes.resize(size, val); + } + + EACGBaseNode *GetNode(uint32 idx) const + { + CHECK_FATAL(idx < nodes.size(), "array check fail"); + return nodes[idx]; + } + + void SetNodeAt(size_t index, EACGBaseNode *val) + { + nodes[index] = val; + } + + const MapleVector &GetFuncArgNodes() const + { + return funcArgNodes; + } + + const MapleMap *> &GetCallSite2Nodes() const + { + return callSite2Nodes; + } + + void InitGlobalNode(); + void AddMaps2Object(EACGObjectNode *caller, EACGObjectNode *callee); + void UpdateExprOfNode(EACGBaseNode &node, MeExpr *me); + void UpdateExprOfGlobalRef(MeExpr *me); + void PropogateEAStatus(); + bool MergeCG(MapleVector &caller, const MapleVector *callee); + void TrimGlobalNode() const; + void UpdateEACGFromCaller(const MapleVector &callerCallSiteArg, + const MapleVector &calleeFuncArg); + void DumpDotFile(const IRMap *irMap, bool dumpPt, MapleVector *dumpVec = nullptr); + void DeleteEACG() const; + void RestoreStatus(bool old); + void CountObjEAStatus() const; + + const std::string &GetFunctionName() const + { + return GlobalTables::GetStrTable().GetStringFromStrIdx(funcStIdx); + } + +private: + MIRModule *mirModule; + MapleAllocator *alloc; + MapleVector nodes; + MapleMap *> expr2Nodes; + // this vector contain func arg nodes first in declaration order and the last is return node + MapleVector funcArgNodes; + MapleMap *> callSite2Nodes; + GStrIdx funcStIdx; + bool hasUpdated; + bool needConv; + bool imported; + int exprIdMax; + EACGObjectNode *globalObj; + EACGRefNode *globalRef; + EACGFieldNode *globalField; + // this is used as a tmp varible for merge cg + std::map> callee2Caller; + void CheckArgNodeOrder(MapleVector &funcArgV); + void UpdateCallerNodes(const MapleVector &caller, const MapleVector &callee); + void UpdateCallerRetNode(MapleVector &caller, const MapleVector &callee); + void UpdateCallerEdges(); + void UpdateCallerEdgesInternal(EACGObjectNode *node1, int32 fieldID, EACGObjectNode *node2); + void UpdateNodes(const EACGBaseNode &actualInCallee, EACGBaseNode &actualInCaller, bool firstTime); + void UpdateCallerWithCallee(EACGObjectNode &objInCaller, const EACGObjectNode &objInCallee, bool firstTime); + + void SetCGUpdateFlag() + { + hasUpdated = true; + } +}; + +class EACGBaseNode { +public: + friend class BinaryMplExport; + friend class BinaryMplImport; + friend class EACGObjectNode; + friend class EACGFieldNode; + friend class EACGActualNode; + friend class EACGRefNode; + friend class EACGPointerNode; + friend class EAConnectionGraph; + + EACGBaseNode(MIRModule *m, MapleAllocator *a, NodeKind nk, EAConnectionGraph *ec) + : locInfo(nullptr), mirModule(m), alloc(a), kind(nk), meExpr(nullptr), eaStatus(kNoEscape), id(0), eaCG(ec) + { + } + + EACGBaseNode(MIRModule *m, MapleAllocator *a, NodeKind nk, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, + int i) + : locInfo(nullptr), mirModule(m), alloc(a), kind(nk), meExpr(expr), eaStatus(initialEas), id(i), eaCG(&ec) + { + ec.SetCGUpdateFlag(); + } + + virtual ~EACGBaseNode() = default; + + virtual bool IsFieldNode() const + { + return kind == kFieldNode; + } + + virtual bool IsObjectNode() const + { + return kind == kObejectNode; + } + + virtual bool IsReferenceNode() const + { + return kind == kReferenceNode; + } + + virtual bool IsActualNode() const + { + return kind == kActualNode; + } + + virtual bool IsPointerNode() const + { + return kind == kPointerNode; + } + + virtual const MeExpr *GetMeExpr() const + { + return meExpr; + } + + virtual void SetMeExpr(MeExpr &newExpr) + { + if (IsFieldNode() && newExpr.GetMeOp() != kMeOpIvar && newExpr.GetMeOp() != kMeOpOp) { + CHECK_FATAL(false, "must be kMeOpIvar or kMeOpOp"); + } else if (IsReferenceNode() == true && newExpr.GetMeOp() != kMeOpVar && newExpr.GetMeOp() != kMeOpReg && + newExpr.GetMeOp() != kMeOpAddrof && newExpr.GetMeOp() != kMeOpConststr) { + CHECK_FATAL(false, "must be kMeOpVar, kMeOpReg, kMeOpAddrof or kMeOpConststr"); + } + meExpr = &newExpr; + } + + const std::set &GetPointsToSet() const + { + CHECK_FATAL(!IsPointerNode(), "must be pointer node"); + return pointsTo; + }; + + virtual bool AddOutNode(EACGBaseNode &newOut); + + virtual EAStatus GetEAStatus() const + { + return eaStatus; + } + + virtual const std::set &GetInSet() const + { + return in; + } + + virtual void InsertInSet(EACGBaseNode *val) + { + (void)in.insert(val); + } + + virtual const std::set &GetOutSet() const + { + CHECK_FATAL(IsActualNode(), "must be actual node"); + return out; + } + + virtual void InsertOutSet(EACGBaseNode *val) + { + (void)out.insert(val); + } + + virtual bool UpdateEAStatus(EAStatus newEas) + { + if (newEas > eaStatus) { + eaStatus = newEas; + PropagateEAStatusForNode(this); + eaCG->SetCGUpdateFlag(); + return true; + } + return false; + } + + bool IsBelongTo(const EAConnectionGraph *cg) const + { + return this->eaCG == cg; + } + + const EAConnectionGraph *GetEACG() const + { + return eaCG; + } + + EAConnectionGraph *GetEACG() + { + return eaCG; + } + + void SetEACG(EAConnectionGraph *cg) + { + this->eaCG = cg; + } + + void SetID(int setId) + { + this->id = static_cast(setId); + } + + bool CanIgnoreRC() const; + +protected: + Location *locInfo; + MIRModule *mirModule; + MapleAllocator *alloc; + NodeKind kind; + MeExpr *meExpr; + EAStatus eaStatus; + size_t id; + // OBJ<->Field will not in following Set + std::set in; + std::set out; + std::set pointsTo; + EAConnectionGraph *eaCG; + + virtual void CheckAllConnectionInNodes(); + virtual std::string GetName(const IRMap *irMap) const; + virtual void DumpDotFile(std::ostream &, std::map &, bool, const IRMap *irMap = nullptr) = 0; + virtual void PropagateEAStatusForNode(const EACGBaseNode *subRoot) const; + virtual void GetNodeFormatInDot(std::string &label, std::string &color) const; + virtual bool UpdatePointsTo(const std::set &cPointsTo); + + virtual void SetEAStatus(EAStatus status) + { + this->eaStatus = status; + } + + virtual NodeKind GetNodeKind() const + { + return kind; + } + +private: + virtual bool ReplaceByGlobalNode() + { + CHECK_FATAL(false, "impossible"); + return false; + } +}; + +class EACGPointerNode : public EACGBaseNode { +public: + friend class BinaryMplExport; + friend class BinaryMplImport; + EACGPointerNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kPointerNode, ec), indirectLevel(0) + { + } + + EACGPointerNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, + int i, int indirectL) + : EACGBaseNode(md, alloc, kPointerNode, ec, expr, initialEas, i), indirectLevel(indirectL) {}; + ~EACGPointerNode() = default; + + void SetLocation(Location *loc) + { + this->locInfo = loc; + } + + int GetIndirectLevel() const + { + return indirectLevel; + } + + bool AddOutNode(EACGBaseNode &newOut) override + { + if (indirectLevel == 1) { + CHECK_FATAL(!newOut.IsPointerNode(), "must be pointer node"); + (void)pointingTo.insert(&newOut); + (void)out.insert(&newOut); + (void)newOut.in.insert(this); + } else { + pointingTo.insert(&newOut); + CHECK_FATAL(pointingTo.size() == 1, "the size must be one"); + CHECK_FATAL(newOut.IsPointerNode(), "must be pointer node"); + CHECK_FATAL((indirectLevel - static_cast(newOut).GetIndirectLevel()) == 1, + "must be one"); + (void)out.insert(&newOut); + (void)newOut.in.insert(this); + } + return false; + } + + const std::set &GetPointingTo() const + { + return pointingTo; + } + + bool UpdatePointsTo(const std::set &) override + { + CHECK_FATAL(false, "impossible to update PointsTo"); + return true; + }; + + void PropagateEAStatusForNode(const EACGBaseNode *) const override + { + CHECK_FATAL(false, "impossible to propagate EA status for node"); + } + + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr) override; + void CheckAllConnectionInNodes() override {} + +private: + int indirectLevel; + std::set pointingTo; + bool ReplaceByGlobalNode() override + { + CHECK_FATAL(false, "impossible to replace by global node"); + return true; + } +}; + +class EACGObjectNode : public EACGBaseNode { +public: + friend class EACGFieldNode; + friend class BinaryMplExport; + friend class BinaryMplImport; + EACGObjectNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kObejectNode, ec), rcOperations(0), ignorRC(false), isPhantom(false) + { + } + + EACGObjectNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, + int i, bool isPh) + : EACGBaseNode(md, alloc, kObejectNode, ec, expr, initialEas, i), + rcOperations(0), + ignorRC(false), + isPhantom(isPh) + { + (void)pointsBy.insert(this); + (void)pointsTo.insert(this); + }; + ~EACGObjectNode() = default; + bool IsPhantom() const + { + return isPhantom == true; + }; + + void SetLocation(Location *loc) + { + this->locInfo = loc; + } + + const std::map &GetFieldNodeMap() const + { + return fieldNodes; + } + + EACGFieldNode *GetFieldNodeFromIdx(FieldID fId) + { + if (fieldNodes.find(-1) != fieldNodes.end()) { // -1 expresses global + return fieldNodes[-1]; + } + if (fieldNodes.find(fId) == fieldNodes.end()) { + return nullptr; + } + return fieldNodes[fId]; + } + + bool AddOutNode(EACGBaseNode &newOut) override; + bool UpdatePointsTo(const std::set &) override + { + CHECK_FATAL(false, "impossible"); + return true; + }; + + bool IsPointedByFieldNode() const; + void PropagateEAStatusForNode(const EACGBaseNode *subRoot) const override; + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr) override; + void CheckAllConnectionInNodes() override; + + void Insert2PointsBy(EACGBaseNode *node) + { + (void)pointsBy.insert(node); + } + + void EraseNodeFromPointsBy(EACGBaseNode *node) + { + pointsBy.erase(node); + } + + void IncresRCOperations() + { + ++rcOperations; + } + + void IncresRCOperations(int num) + { + rcOperations += num; + } + + int GetRCOperations() const + { + return rcOperations; + } + + bool GetIgnorRC() const + { + return ignorRC; + } + + void SetIgnorRC(bool ignore) + { + ignorRC = ignore; + } + +private: + std::set pointsBy; + int rcOperations; + bool ignorRC; + bool isPhantom; + std::map fieldNodes; + bool ReplaceByGlobalNode() override; +}; + +class EACGRefNode : public EACGBaseNode { +public: + friend class BinaryMplExport; + friend class BinaryMplImport; + EACGRefNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kReferenceNode, ec), isStaticField(false), sym(nullptr), version(0) + { + } + + EACGRefNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, int i, + bool isS = false) + : EACGBaseNode(md, alloc, kReferenceNode, ec, expr, initialEas, i), + isStaticField(isS), + sym(nullptr), + version(0) {}; + ~EACGRefNode() = default; + bool IsStaticRef() const + { + return isStaticField; + }; + void SetSymbolAndVersion(MIRSymbol *mirSym, int versionIdx) + { + if (sym != nullptr) { + CHECK_FATAL(sym == mirSym, "must be sym"); + CHECK_FATAL(versionIdx == version, "must be version "); + } + sym = mirSym; + version = versionIdx; + }; + + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr); + +private: + bool isStaticField; + MIRSymbol *sym; + int version; + bool ReplaceByGlobalNode(); +}; +class EACGFieldNode : public EACGBaseNode { +public: + friend class BinaryMplExport; + friend class BinaryMplImport; + friend class EACGObjectNode; + EACGFieldNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kFieldNode, ec), fieldID(0), isPhantom(false), sym(nullptr), version(0), mirFieldId(0) + { + } + + EACGFieldNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, int i, + FieldID fId, EACGObjectNode *bt, bool isPh) + : EACGBaseNode(md, alloc, kFieldNode, ec, expr, initialEas, i), + fieldID(fId), + isPhantom(isPh), + sym(nullptr), + version(0), + mirFieldId(0) + { + bt->fieldNodes[fieldID] = this; + (void)belongsTo.insert(bt); + }; + + ~EACGFieldNode() = default; + + FieldID GetFieldID() const + { + return fieldID; + }; + + void SetFieldID(FieldID id) + { + fieldID = id; + } + + bool IsPhantom() const + { + return isPhantom; + } + + const std::set &GetBelongsToObj() const + { + return belongsTo; + } + + void AddBelongTo(EACGObjectNode *newObj) + { + (void)belongsTo.insert(newObj); + } + + void SetSymbolAndVersion(MIRSymbol *mirSym, int versionIdx, FieldID fID) + { + if (sym != nullptr) { + CHECK_FATAL(sym == mirSym, "must be mirSym"); + CHECK_FATAL(version == versionIdx, "must be version"); + CHECK_FATAL(mirFieldId == fID, "must be mir FieldId"); + } + sym = mirSym; + version = versionIdx; + mirFieldId = fID; + }; + + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr); + +private: + FieldID fieldID; + std::set belongsTo; + bool isPhantom; + MIRSymbol *sym; + int version; + FieldID mirFieldId; + bool ReplaceByGlobalNode(); +}; + +class EACGActualNode : public EACGBaseNode { +public: + friend class BinaryMplExport; + friend class BinaryMplImport; + EACGActualNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kActualNode, ec), isReturn(false), isPhantom(false), argIdx(0), callSiteInfo(0) {}; + EACGActualNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, + int i, bool isR, bool isPh, uint8 aI, uint32 callSite) + : EACGBaseNode(md, alloc, kActualNode, ec, expr, initialEas, i), + isReturn(isR), + isPhantom(isPh), + argIdx(aI), + callSiteInfo(callSite) {}; + ~EACGActualNode() = default; + + bool IsReturn() const + { + return isReturn; + }; + + bool IsPhantom() const + { + return isPhantom; + }; + + uint32 GetArgIndex() const + { + return argIdx; + }; + + uint32 GetCallSite() const + { + return callSiteInfo; + } + + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr); + +private: + bool isReturn; + bool isPhantom; + uint8 argIdx; + uint32 callSiteInfo; + bool ReplaceByGlobalNode(); +}; +} // namespace maple +#endif diff --git a/ecmascript/compiler/codegen/maple/maple_ipa/src/old/ea_connection_graph.cpp b/ecmascript/compiler/codegen/maple/maple_ipa/src/old/ea_connection_graph.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cfab50ac2ac5b36fa7ac251acd283b7df0bcb528 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ipa/src/old/ea_connection_graph.cpp @@ -0,0 +1,1121 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ea_connection_graph.h" + +namespace maple { +constexpr maple::uint32 kInvalid = 0xffffffff; +void EACGBaseNode::CheckAllConnectionInNodes() +{ +#ifdef DEBUG + for (EACGBaseNode *inNode : in) { + ASSERT_NOT_NULL(eaCG->nodes[inNode->id - 1]); + DEBUG_ASSERT(eaCG->nodes[inNode->id - 1] == inNode, "must be inNode"); + } + for (EACGBaseNode *outNode : out) { + ASSERT_NOT_NULL(eaCG->nodes[outNode->id - 1]); + DEBUG_ASSERT(eaCG->nodes[outNode->id - 1] == outNode, "must be outNode"); + } + for (EACGObjectNode *obj : pointsTo) { + ASSERT_NOT_NULL(eaCG->nodes[obj->id - 1]); + DEBUG_ASSERT(eaCG->nodes[obj->id - 1] == obj, "must be obj"); + } + if (IsFieldNode()) { + for (EACGObjectNode *obj : static_cast(this)->GetBelongsToObj()) { + ASSERT_NOT_NULL(eaCG->nodes[obj->id - 1]); + DEBUG_ASSERT(eaCG->nodes[obj->id - 1] == obj, "must be obj"); + } + } +#endif +} + +bool EACGBaseNode::AddOutNode(EACGBaseNode &newOut) +{ + if (out.find(&newOut) != out.end()) { + return false; + } + bool newIsLocal = newOut.UpdateEAStatus(eaStatus); + if (eaStatus == kGlobalEscape && pointsTo.size() > 0) { + if (newIsLocal) { + eaCG->SetCGUpdateFlag(); + } + return newIsLocal; + } + (void)out.insert(&newOut); + (void)newOut.in.insert(this); + DEBUG_ASSERT(newOut.pointsTo.size() != 0, "must be greater than zero"); + bool hasChanged = UpdatePointsTo(newOut.pointsTo); + eaCG->SetCGUpdateFlag(); + return hasChanged; +} + +void EACGBaseNode::PropagateEAStatusForNode(const EACGBaseNode *subRoot) const +{ + for (EACGBaseNode *outNode : out) { + (void)outNode->UpdateEAStatus(eaStatus); + } +} + +std::string EACGBaseNode::GetName(const IRMap *irMap) const +{ + std::string name; + if (irMap == nullptr || meExpr == nullptr) { + name += std::to_string(id); + } else { + name += std::to_string(id); + name += "\\n"; + if (meExpr->GetMeOp() == kMeOpVar) { + VarMeExpr *varMeExpr = static_cast(meExpr); + const MIRSymbol *sym = varMeExpr->GetOst()->GetMIRSymbol(); + name += ((sym->GetStIdx().IsGlobal() ? "$" : "%") + sym->GetName() + "\\nmx" + + std::to_string(meExpr->GetExprID()) + " (field)" + std::to_string(varMeExpr->GetFieldID())); + } else if (meExpr->GetMeOp() == kMeOpIvar) { + IvarMeExpr *ivarMeExpr = static_cast(meExpr); + MeExpr *base = ivarMeExpr->GetBase(); + VarMeExpr *varMeExpr = nullptr; + if (base->GetMeOp() == kMeOpVar) { + varMeExpr = static_cast(base); + } else { + name += std::to_string(id); + return name; + } + const MIRSymbol *sym = varMeExpr->GetOst()->GetMIRSymbol(); + name += (std::string("base :") + (sym->GetStIdx().IsGlobal() ? "$" : "%") + sym->GetName() + "\\nmx" + + std::to_string(meExpr->GetExprID()) + " (field)" + std::to_string(ivarMeExpr->GetFieldID())); + } else if (meExpr->GetOp() == OP_gcmalloc || meExpr->GetOp() == OP_gcmallocjarray) { + name += "mx" + std::to_string(meExpr->GetExprID()); + } + } + return name; +} + +bool EACGBaseNode::UpdatePointsTo(const std::set &cPointsTo) +{ + size_t oldPtSize = pointsTo.size(); + pointsTo.insert(cPointsTo.begin(), cPointsTo.end()); + if (oldPtSize == pointsTo.size()) { + return false; + } + for (EACGObjectNode *pt : pointsTo) { + pt->Insert2PointsBy(this); + } + for (EACGBaseNode *pred : in) { + (void)pred->UpdatePointsTo(pointsTo); + } + return true; +} + +void EACGBaseNode::GetNodeFormatInDot(std::string &label, std::string &color) const +{ + switch (GetEAStatus()) { + case kNoEscape: + label += "NoEscape"; + color = "darkgreen"; + break; + case kArgumentEscape: + label += "ArgEscape"; + color = "brown"; + break; + case kReturnEscape: + label += "RetEscape"; + color = "orange"; + break; + case kGlobalEscape: + label += "GlobalEscape"; + color = "red"; + break; + } +} + +bool EACGBaseNode::CanIgnoreRC() const +{ + for (auto obj : pointsTo) { + if (!obj->GetIgnorRC()) { + return false; + } + } + return true; +} + +void EACGObjectNode::CheckAllConnectionInNodes() +{ +#ifdef DEBUG + for (EACGBaseNode *inNode : in) { + ASSERT_NOT_NULL(eaCG->nodes[inNode->id - 1]); + DEBUG_ASSERT(eaCG->nodes[inNode->id - 1] == inNode, "must be inNode"); + } + for (EACGBaseNode *outNode : out) { + ASSERT_NOT_NULL(eaCG->nodes[outNode->id - 1]); + DEBUG_ASSERT(eaCG->nodes[outNode->id - 1] == outNode, "must be outNode"); + } + for (EACGBaseNode *pBy : pointsBy) { + ASSERT_NOT_NULL(eaCG->nodes[pBy->id - 1]); + DEBUG_ASSERT(eaCG->nodes[pBy->id - 1] == pBy, "must be pBy"); + } + for (auto fieldPair : fieldNodes) { + EACGFieldNode *field = fieldPair.second; + DEBUG_ASSERT(field->fieldID == fieldPair.first, "must be fieldPair.first"); + ASSERT_NOT_NULL(eaCG->nodes[field->id - 1]); + DEBUG_ASSERT(eaCG->nodes[field->id - 1] == field, "must be filed"); + } +#endif +} + +bool EACGObjectNode::IsPointedByFieldNode() const +{ + for (EACGBaseNode *pBy : pointsBy) { + if (pBy->IsFieldNode()) { + return true; + } + } + return false; +} + +bool EACGObjectNode::AddOutNode(EACGBaseNode &newOut) +{ + DEBUG_ASSERT(newOut.IsFieldNode(), "must be fieldNode"); + EACGFieldNode *field = static_cast(&newOut); + fieldNodes[field->GetFieldID()] = field; + (void)newOut.UpdateEAStatus(eaStatus); + field->AddBelongTo(this); + return true; +} + +bool EACGObjectNode::ReplaceByGlobalNode() +{ + DEBUG_ASSERT(out.size() == 0, "must be zero"); + for (EACGBaseNode *node : pointsBy) { + node->pointsTo.erase(this); + (void)node->pointsTo.insert(eaCG->GetGlobalObject()); + } + pointsBy.clear(); + for (EACGBaseNode *inNode : in) { + (void)inNode->out.erase(this); + (void)inNode->out.insert(eaCG->GetGlobalObject()); + } + in.clear(); + for (auto fieldPair : fieldNodes) { + EACGFieldNode *field = fieldPair.second; + field->belongsTo.erase(this); + } + fieldNodes.clear(); + if (meExpr != nullptr) { + eaCG->expr2Nodes[meExpr]->clear(); + eaCG->expr2Nodes[meExpr]->insert(eaCG->GetGlobalObject()); + } + DEBUG_ASSERT(eaCG->nodes[id - 1] == this, "must be"); + eaCG->nodes[id - 1] = nullptr; + return true; +} + +void EACGObjectNode::PropagateEAStatusForNode(const EACGBaseNode *subRoot) const +{ + for (auto fieldNodePair : fieldNodes) { + EACGFieldNode *field = fieldNodePair.second; + (void)field->UpdateEAStatus(eaStatus); + } +} + +void EACGObjectNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) +{ + if (dumped[this]) { + return; + } + dumped[this] = true; + + std::string name = GetName(nullptr); + std::string label; + label = GetName(irMap) + " Object\\n"; + std::string color; + GetNodeFormatInDot(label, color); + std::string style; + if (IsPhantom()) { + style = "dotted"; + } else { + style = "bold"; + } + fout << name << " [shape=box, label=\"" << label << "\", fontcolor=" << color << ", style=" << style << "];\n"; + for (auto fieldPair : fieldNodes) { + EACGBaseNode *field = fieldPair.second; + fout << name << "->" << field->GetName(nullptr) << ";" + << "\n"; + } + for (auto fieldPair : fieldNodes) { + EACGBaseNode *field = fieldPair.second; + field->DumpDotFile(fout, dumped, dumpPt, irMap); + } +} + +void EACGRefNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) +{ + if (dumped[this]) { + return; + } + dumped[this] = true; + + std::string name = GetName(nullptr); + std::string label; + label = GetName(irMap) + " Reference\\n"; + if (IsStaticRef()) { + label += "Static\\n"; + } + std::string color; + GetNodeFormatInDot(label, color); + fout << name << " [shape=ellipse, label=\"" << label << "\", fontcolor=" << color << "];" + << "\n"; + if (dumpPt) { + for (auto obj : pointsTo) { + fout << name << "->" << obj->GetName(nullptr) << ";" + << "\n"; + } + for (auto obj : pointsTo) { + obj->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } else { + for (auto outNode : out) { + std::string edgeStyle; + if (!outNode->IsObjectNode()) { + edgeStyle = " [style =\"dotted\"]"; + } + fout << name << "->" << outNode->GetName(nullptr) << edgeStyle << ";" + << "\n"; + } + for (auto outNode : out) { + outNode->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } +} + +bool EACGRefNode::ReplaceByGlobalNode() +{ + for (EACGBaseNode *inNode : in) { + DEBUG_ASSERT(inNode->id > 3, "must be greater than three"); // the least valid idx is 3 + (void)inNode->out.erase(this); + (void)inNode->out.insert(eaCG->GetGlobalReference()); + } + in.clear(); + for (EACGBaseNode *outNode : out) { + (void)outNode->in.erase(this); + } + out.clear(); + for (EACGObjectNode *base : pointsTo) { + base->EraseNodeFromPointsBy(this); + } + pointsTo.clear(); + if (meExpr != nullptr) { + eaCG->expr2Nodes[meExpr]->clear(); + eaCG->expr2Nodes[meExpr]->insert(eaCG->GetGlobalReference()); + } + DEBUG_ASSERT(eaCG->nodes[id - 1] == this, "must be this"); + eaCG->nodes[id - 1] = nullptr; + return true; +} + +void EACGPointerNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) +{ + if (dumped[this]) { + return; + } + dumped[this] = true; + std::string name = GetName(nullptr); + std::string label; + label = GetName(irMap) + "\\nPointer Indirect Level : " + std::to_string(indirectLevel) + "\\n"; + std::string color; + GetNodeFormatInDot(label, color); + fout << name << " [shape=ellipse, label=\"" << label << "\", fontcolor=" << color << "];" + << "\n"; + for (EACGBaseNode *outNode : out) { + fout << name << "->" << outNode->GetName(nullptr) << " [style =\"dotted\", color = \"blue\"];" + << "\n"; + } + for (auto outNode : out) { + outNode->DumpDotFile(fout, dumped, dumpPt, irMap); + } +} + +void EACGActualNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) +{ + if (dumped[this]) { + return; + } + dumped[this] = true; + + std::string name = GetName(nullptr); + std::string label; + if (IsReturn()) { + label = GetName(irMap) + "\\nRet Idx : " + std::to_string(GetArgIndex()) + "\\n"; + } else { + label = GetName(irMap) + "\\nArg Idx : " + std::to_string(GetArgIndex()) + + " Call Site : " + std::to_string(GetCallSite()) + "\\n"; + } + std::string style; + if (IsPhantom()) { + style = "dotted"; + } else { + style = "bold"; + } + std::string color; + GetNodeFormatInDot(label, color); + fout << name << " [shape=ellipse, label=\"" << label << "\", fontcolor=" << color << ", style=" << style << "];\n"; + if (dumpPt) { + for (auto obj : pointsTo) { + fout << name << "->" << obj->GetName(nullptr) << ";\n"; + } + for (auto obj : pointsTo) { + obj->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } else { + for (auto outNode : out) { + std::string edgeStyle; + if (!outNode->IsObjectNode()) { + edgeStyle = " [style =\"dotted\"]"; + } + fout << name << "->" << outNode->GetName(nullptr) << edgeStyle << ";\n"; + } + for (auto outNode : out) { + outNode->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } +} + +bool EACGActualNode::ReplaceByGlobalNode() +{ + DEBUG_ASSERT(callSiteInfo == kInvalid, "must be invalid"); + DEBUG_ASSERT(out.size() == 1, "the size of out must be one"); + DEBUG_ASSERT(pointsTo.size() == 1, "the size of pointsTo must be one"); + for (EACGBaseNode *inNode : in) { + inNode->out.erase(this); + } + in.clear(); + return false; +} + +void EACGFieldNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) +{ + if (dumped[this]) { + return; + } + dumped[this] = true; + std::string name = GetName(nullptr); + std::string label; + label = GetName(irMap) + "\\nFIdx : " + std::to_string(GetFieldID()) + "\\n"; + std::string color; + GetNodeFormatInDot(label, color); + std::string style; + if (IsPhantom()) { + style = "dotted"; + } else { + style = "bold"; + } + fout << name << " [shape=circle, label=\"" << label << "\", fontcolor=" << color << ", style=" << style + << ", margin=0];\n"; + if (dumpPt) { + for (auto obj : pointsTo) { + fout << name << "->" << obj->GetName(nullptr) << ";\n"; + } + for (auto obj : pointsTo) { + obj->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } else { + for (auto outNode : out) { + std::string edgeStyle; + if (!outNode->IsObjectNode()) { + edgeStyle = " [style =\"dotted\"]"; + } + fout << name << "->" << outNode->GetName(nullptr) << edgeStyle << ";\n"; + } + for (auto outNode : out) { + outNode->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } +} + +bool EACGFieldNode::ReplaceByGlobalNode() +{ + for (EACGObjectNode *obj : pointsTo) { + obj->pointsBy.erase(this); + } + pointsTo.clear(); + (void)pointsTo.insert(eaCG->GetGlobalObject()); + for (EACGBaseNode *outNode : out) { + outNode->in.erase(this); + } + out.clear(); + (void)out.insert(eaCG->GetGlobalObject()); + bool canDelete = true; + std::set tmp = belongsTo; + for (EACGObjectNode *obj : tmp) { + if (obj->GetEAStatus() != kGlobalEscape) { + canDelete = false; + } else { + belongsTo.erase(obj); + } + } + if (canDelete) { + DEBUG_ASSERT(eaCG->nodes[id - 1] == this, "must be this"); + eaCG->nodes[id - 1] = nullptr; + for (EACGBaseNode *inNode : in) { + DEBUG_ASSERT(!inNode->IsObjectNode(), "must be ObjectNode"); + inNode->out.erase(this); + (void)inNode->out.insert(eaCG->globalField); + } + for (auto exprPair : eaCG->expr2Nodes) { + size_t eraseSize = exprPair.second->erase(this); + if (eraseSize != 0 && exprPair.first->GetMeOp() != kMeOpIvar && exprPair.first->GetMeOp() != kMeOpOp) { + DEBUG_ASSERT(false, "must be kMeOpIvar or kMeOpOp"); + } + if (exprPair.second->size() == 0) { + exprPair.second->insert(eaCG->globalField); + } + } + in.clear(); + return true; + } + return false; +} + +void EAConnectionGraph::DeleteEACG() const +{ + for (EACGBaseNode *node : nodes) { + if (node == nullptr) { + continue; + } + delete node; + node = nullptr; + } +} + +void EAConnectionGraph::TrimGlobalNode() const +{ + for (EACGBaseNode *node : nodes) { + if (node == nullptr) { + continue; + } + constexpr int leastIdx = 3; + if (node->id <= leastIdx) { + continue; + } + bool canDelete = false; + if (node->GetEAStatus() == kGlobalEscape) { + canDelete = node->ReplaceByGlobalNode(); + } +#ifdef DEBUG + node->CheckAllConnectionInNodes(); +#endif + if (canDelete) { + delete node; + node = nullptr; + } + } +} + +void EAConnectionGraph::InitGlobalNode() +{ + globalObj = CreateObjectNode(nullptr, kNoEscape, true, TyIdx(0)); + globalRef = CreateReferenceNode(nullptr, kNoEscape, true); + (void)globalRef->AddOutNode(*globalObj); + (void)globalRef->AddOutNode(*globalRef); + globalField = CreateFieldNode(nullptr, kNoEscape, -1, globalObj, true); // -1 expresses global + (void)globalField->AddOutNode(*globalObj); + (void)globalField->AddOutNode(*globalRef); + (void)globalField->AddOutNode(*globalField); + (void)globalRef->AddOutNode(*globalField); + globalObj->eaStatus = kGlobalEscape; + globalField->eaStatus = kGlobalEscape; + globalRef->eaStatus = kGlobalEscape; +} + +EACGObjectNode *EAConnectionGraph::CreateObjectNode(MeExpr *expr, EAStatus initialEas, bool isPh, TyIdx tyIdx) +{ + EACGObjectNode *newObjNode = + new (std::nothrow) EACGObjectNode(mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, isPh); + ASSERT_NOT_NULL(newObjNode); + nodes.push_back(newObjNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newObjNode); + } else { + DEBUG_ASSERT(false, "must find expr"); + } + } + return newObjNode; +} + +EACGPointerNode *EAConnectionGraph::CreatePointerNode(MeExpr *expr, EAStatus initialEas, int inderictL) +{ + EACGPointerNode *newPointerNode = + new (std::nothrow) EACGPointerNode(mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, inderictL); + ASSERT_NOT_NULL(newPointerNode); + nodes.push_back(newPointerNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newPointerNode); + } else { + DEBUG_ASSERT(false, "must find expr"); + } + } + return newPointerNode; +} + +EACGRefNode *EAConnectionGraph::CreateReferenceNode(MeExpr *expr, EAStatus initialEas, bool isStatic) +{ + EACGRefNode *newRefNode = + new (std::nothrow) EACGRefNode(mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, isStatic); + ASSERT_NOT_NULL(newRefNode); + nodes.push_back(newRefNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newRefNode); + } else { + DEBUG_ASSERT(false, "must find expr"); + } + if (expr->GetMeOp() != kMeOpVar && expr->GetMeOp() != kMeOpAddrof && expr->GetMeOp() != kMeOpReg && + expr->GetMeOp() != kMeOpOp) { + DEBUG_ASSERT(false, "must be kMeOpVar, kMeOpAddrof, kMeOpReg or kMeOpOp"); + } + } + return newRefNode; +} + +void EAConnectionGraph::TouchCallSite(uint32 callSiteInfo) +{ + CHECK_FATAL(callSite2Nodes.find(callSiteInfo) != callSite2Nodes.end(), "find failed"); + if (callSite2Nodes[callSiteInfo] == nullptr) { + MapleVector *tmp = alloc->GetMemPool()->New>(alloc->Adapter()); + callSite2Nodes[callSiteInfo] = tmp; + } +} + +EACGActualNode *EAConnectionGraph::CreateActualNode(EAStatus initialEas, bool isReurtn, bool isPh, uint8 argIdx, + uint32 callSiteInfo) +{ + MeExpr *expr = nullptr; + DEBUG_ASSERT(isPh, "must be ph"); + DEBUG_ASSERT(callSiteInfo != 0, "must not be zero"); + EACGActualNode *newActNode = new (std::nothrow) EACGActualNode( + mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, isReurtn, isPh, argIdx, callSiteInfo); + ASSERT_NOT_NULL(newActNode); + nodes.push_back(newActNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newActNode); + } else { + DEBUG_ASSERT(false, "must find expr"); + } + } + if (callSiteInfo != kInvalid) { + DEBUG_ASSERT(callSite2Nodes[callSiteInfo] != nullptr, "must touched before"); + callSite2Nodes[callSiteInfo]->push_back(newActNode); +#ifdef DEBUG + CheckArgNodeOrder(*callSite2Nodes[callSiteInfo]); +#endif + } else { + funcArgNodes.push_back(newActNode); + } + return newActNode; +} + +EACGFieldNode *EAConnectionGraph::CreateFieldNode(MeExpr *expr, EAStatus initialEas, FieldID fId, + EACGObjectNode *belongTo, bool isPh) +{ + EACGFieldNode *newFieldNode = new (std::nothrow) + EACGFieldNode(mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, fId, belongTo, isPh); + ASSERT_NOT_NULL(newFieldNode); + nodes.push_back(newFieldNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newFieldNode); + } else { + expr2Nodes[expr]->insert(newFieldNode); + } + if (expr->GetMeOp() != kMeOpIvar && expr->GetMeOp() != kMeOpOp) { + DEBUG_ASSERT(false, "must be kMeOpIvar or kMeOpOp"); + } + } + return newFieldNode; +} + +EACGBaseNode *EAConnectionGraph::GetCGNodeFromExpr(MeExpr *me) +{ + if (expr2Nodes.find(me) == expr2Nodes.end()) { + return nullptr; + } + return *(expr2Nodes[me]->begin()); +} + +void EAConnectionGraph::UpdateExprOfNode(EACGBaseNode &node, MeExpr *me) +{ + if (expr2Nodes.find(me) == expr2Nodes.end()) { + expr2Nodes[me] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[me]->insert(&node); + } else { + if (node.IsFieldNode()) { + expr2Nodes[me]->insert(&node); + } else { + if (expr2Nodes[me]->find(&node) == expr2Nodes[me]->end()) { + CHECK_FATAL(false, "must be filed node"); + } + } + } + node.SetMeExpr(*me); +} + +void EAConnectionGraph::UpdateExprOfGlobalRef(MeExpr *me) +{ + UpdateExprOfNode(*globalRef, me); +} + +EACGActualNode *EAConnectionGraph::GetReturnNode() const +{ + if (funcArgNodes.size() == 0) { + return nullptr; + } + EACGActualNode *ret = static_cast(funcArgNodes[funcArgNodes.size() - 1]); + if (ret->IsReturn()) { + return ret; + } + return nullptr; +} +#ifdef DEBUG +void EAConnectionGraph::CheckArgNodeOrder(MapleVector &funcArgV) +{ + uint8 preIndex = 0; + for (size_t i = 0; i < funcArgV.size(); ++i) { + DEBUG_ASSERT(funcArgV[i]->IsActualNode(), "must be ActualNode"); + EACGActualNode *actNode = static_cast(funcArgV[i]); + if (i == funcArgV.size() - 1) { + if (actNode->IsReturn()) { + continue; + } else { + DEBUG_ASSERT(actNode->GetArgIndex() >= preIndex, "must be greater than preIndex"); + } + } else { + DEBUG_ASSERT(!actNode->IsReturn(), "must be return"); + DEBUG_ASSERT(actNode->GetArgIndex() >= preIndex, "must be greater than preIndex"); + } + preIndex = actNode->GetArgIndex(); + } +} +#endif +bool EAConnectionGraph::ExprCanBeOptimized(MeExpr &expr) +{ + if (expr2Nodes.find(&expr) == expr2Nodes.end()) { + MeExpr *rhs = nullptr; + if (expr.GetMeOp() == kMeOpVar) { + DEBUG_ASSERT(static_cast(&expr)->GetDefBy() == kDefByStmt, "must be kDefByStmt"); + DEBUG_ASSERT(static_cast(&expr)->GetDefStmt()->GetOp() == OP_dassign, "must be OP_dassign"); + MeStmt *defStmt = static_cast(&expr)->GetDefStmt(); + DassignMeStmt *dassignStmt = static_cast(defStmt); + rhs = dassignStmt->GetRHS(); + } else if (expr.GetMeOp() == kMeOpReg) { + DEBUG_ASSERT(static_cast(&expr)->GetDefBy() == kDefByStmt, "must be kDefByStmt"); + DEBUG_ASSERT(static_cast(&expr)->GetDefStmt()->GetOp() == OP_regassign, + "must be OP_regassign"); + MeStmt *defStmt = static_cast(&expr)->GetDefStmt(); + AssignMeStmt *regassignStmt = static_cast(defStmt); + rhs = regassignStmt->GetRHS(); + } else { + CHECK_FATAL(false, "impossible"); + } + DEBUG_ASSERT(expr2Nodes.find(rhs) != expr2Nodes.end(), "impossible"); + expr = *rhs; + } + MapleSet &nodesTmp = *expr2Nodes[&expr]; + + for (EACGBaseNode *node : nodesTmp) { + for (EACGObjectNode *obj : node->GetPointsToSet()) { + if (obj->GetEAStatus() != kNoEscape && obj->GetEAStatus() != kReturnEscape) { + return false; + } + } + } + return true; +} + +MapleVector *EAConnectionGraph::GetCallSiteArgNodeVector(uint32 callSite) +{ + CHECK_FATAL(callSite2Nodes.find(callSite) != callSite2Nodes.end(), "find failed"); + ASSERT_NOT_NULL(callSite2Nodes[callSite]); + return callSite2Nodes[callSite]; +} + +// if we have scc of connection graph, it will be more efficient. +void EAConnectionGraph::PropogateEAStatus() +{ + bool oldStatus = CGHasUpdated(); + do { + UnSetCGUpdateFlag(); + for (EACGBaseNode *node : nodes) { + if (node == nullptr) { + continue; + } + if (node->IsObjectNode()) { + EACGObjectNode *obj = static_cast(node); + for (auto fieldPair : obj->GetFieldNodeMap()) { + EACGBaseNode *field = fieldPair.second; + (void)field->UpdateEAStatus(obj->GetEAStatus()); + } + } else { + for (EACGBaseNode *pointsToNode : node->GetPointsToSet()) { + (void)pointsToNode->UpdateEAStatus(node->GetEAStatus()); + } + } + } + DEBUG_ASSERT(!CGHasUpdated(), "must be Updated"); + } while (CGHasUpdated()); + RestoreStatus(oldStatus); +} + +const MapleVector *EAConnectionGraph::GetFuncArgNodeVector() const +{ + return &funcArgNodes; +} + +// this func is called from callee context +void EAConnectionGraph::UpdateEACGFromCaller(const MapleVector &callerCallSiteArg, + const MapleVector &calleeFuncArg) +{ + DEBUG_ASSERT(abs(static_cast(callerCallSiteArg.size()) - static_cast(calleeFuncArg.size())) <= 1, + "greater than"); + + UnSetCGUpdateFlag(); + for (uint32 i = 0; i < callerCallSiteArg.size(); ++i) { + EACGBaseNode *callerNode = callerCallSiteArg[i]; + ASSERT_NOT_NULL(callerNode); + DEBUG_ASSERT(callerNode->IsActualNode(), "must be ActualNode"); + if ((i == callerCallSiteArg.size() - 1) && static_cast(callerNode)->IsReturn()) { + continue; + } + bool hasGlobalEA = false; + for (EACGObjectNode *obj : callerNode->GetPointsToSet()) { + if (obj->GetEAStatus() == kGlobalEscape) { + hasGlobalEA = true; + break; + } + } + if (hasGlobalEA) { + EACGBaseNode *calleeNode = (calleeFuncArg)[i]; + for (EACGObjectNode *obj : calleeNode->GetPointsToSet()) { + (void)obj->UpdateEAStatus(kGlobalEscape); + } + } + } + if (CGHasUpdated()) { + PropogateEAStatus(); + } + TrimGlobalNode(); +} + +void EAConnectionGraph::DumpDotFile(const IRMap *irMap, bool dumpPt, MapleVector *dumpVec) +{ + if (dumpVec == nullptr) { + dumpVec = &nodes; + } + std::filebuf fb; + std::string outFile = GlobalTables::GetStrTable().GetStringFromStrIdx(funcStIdx) + "-connectiongraph.dot"; + fb.open(outFile, std::ios::trunc | std::ios::out); + CHECK_FATAL(fb.is_open(), "open file failed"); + std::ostream cgDotFile(&fb); + cgDotFile << "digraph connectiongraph{\n"; + std::map dumped; + for (auto node : nodes) { + dumped[node] = false; + } + for (EACGBaseNode *node : *dumpVec) { + if (node == nullptr) { + continue; + } + if (dumped[node]) { + continue; + } + node->DumpDotFile(cgDotFile, dumped, dumpPt, irMap); + dumped[node] = true; + } + cgDotFile << "}\n"; + fb.close(); +} + +void EAConnectionGraph::CountObjEAStatus() const +{ + int sum = 0; + int eaCount[4]; // There are four EAStatus. + for (size_t i = 0; i < 4; ++i) { + eaCount[i] = 0; + } + for (EACGBaseNode *node : nodes) { + if (node == nullptr) { + continue; + } + + if (node->IsObjectNode()) { + EACGObjectNode *objNode = static_cast(node); + if (!objNode->IsPhantom()) { + CHECK_FATAL(objNode->locInfo != nullptr, "Impossible"); + MIRType *type = nullptr; + const MeExpr *expr = objNode->GetMeExpr(); + CHECK_FATAL(expr != nullptr, "Impossible"); + if (expr->GetOp() == OP_gcmalloc || expr->GetOp() == OP_gcpermalloc) { + TyIdx tyIdx = static_cast(expr)->GetTyIdx(); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + } else { + TyIdx tyIdx = static_cast(expr)->GetTyIdx(); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + } + LogInfo::MapleLogger() << "[LOCATION] [" << objNode->locInfo->GetModName() << " " + << objNode->locInfo->GetFileId() << " " << objNode->locInfo->GetLineId() << " " + << EscapeName(objNode->GetEAStatus()) << " " << expr->GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << "]\n"; + ++sum; + ++eaCount[node->GetEAStatus()]; + } + } + } + LogInfo::MapleLogger() << "[gcmalloc object statistics] " + << GlobalTables::GetStrTable().GetStringFromStrIdx(funcStIdx) << " " + << "Gcmallocs: " << sum << " " + << "NoEscape: " << eaCount[kNoEscape] << " " + << "RetEscape: " << eaCount[kReturnEscape] << " " + << "ArgEscape: " << eaCount[kArgumentEscape] << " " + << "GlobalEscape: " << eaCount[kGlobalEscape] << "\n"; +} + +void EAConnectionGraph::RestoreStatus(bool old) +{ + if (old) { + SetCGHasUpdated(); + } else { + UnSetCGUpdateFlag(); + } +} + +// Update caller's ConnectionGraph using callee's summary information. +// If the callee's summary is not found, we just mark all the pointsTo nodes of caller's actual node to GlobalEscape. +// Otherwise, we do these steps: +// +// 1, update caller nodes using callee's summary, new node might be added into caller's CG in this step. +// +// 2, update caller edges using callee's summary, new points-to edge might be added into caller's CG in this step. +bool EAConnectionGraph::MergeCG(MapleVector &caller, const MapleVector *callee) +{ + TrimGlobalNode(); + bool cgChanged = false; + bool oldStatus = CGHasUpdated(); + UnSetCGUpdateFlag(); + if (callee == nullptr) { + for (EACGBaseNode *actualInCaller : caller) { + for (EACGObjectNode *p : actualInCaller->GetPointsToSet()) { + (void)p->UpdateEAStatus(EAStatus::kGlobalEscape); + } + } + cgChanged = CGHasUpdated(); + if (!cgChanged) { + RestoreStatus(oldStatus); + } + TrimGlobalNode(); + return cgChanged; + } + size_t callerSize = caller.size(); + size_t calleeSize = callee->size(); + if (callerSize > calleeSize) { + DEBUG_ASSERT((callerSize - calleeSize) <= 1, "must be one in EAConnectionGraph::MergeCG()"); + } else { + DEBUG_ASSERT((calleeSize - callerSize) <= 1, "must be one in EAConnectionGraph::MergeCG()"); + } + if (callerSize == 0 || calleeSize == 0) { + cgChanged = CGHasUpdated(); + if (!cgChanged) { + RestoreStatus(oldStatus); + } + return cgChanged; + } + if ((callerSize != calleeSize) && + (callerSize != calleeSize + 1 || static_cast(callee->back())->IsReturn()) && + (callerSize != calleeSize - 1 || !static_cast(callee->back())->IsReturn())) { + DEBUG_ASSERT(false, "Impossible"); + } + + callee2Caller.clear(); + UpdateCallerNodes(caller, *callee); + UpdateCallerEdges(); + UpdateCallerRetNode(caller, *callee); + callee2Caller.clear(); + + cgChanged = CGHasUpdated(); + if (!cgChanged) { + RestoreStatus(oldStatus); + } + TrimGlobalNode(); + return cgChanged; +} + +void EAConnectionGraph::AddMaps2Object(EACGObjectNode *caller, EACGObjectNode *callee) +{ + if (callee2Caller.find(callee) == callee2Caller.end()) { + std::set callerSet; + callee2Caller[callee] = callerSet; + } + (void)callee2Caller[callee].insert(caller); +} + +void EAConnectionGraph::UpdateCallerRetNode(MapleVector &caller, + const MapleVector &callee) +{ + EACGActualNode *lastInCaller = static_cast(caller.back()); + EACGActualNode *lastInCallee = static_cast(callee.back()); + if (!lastInCaller->IsReturn()) { + return; + } + CHECK_FATAL(lastInCaller->GetOutSet().size() == 1, "Impossible"); + for (EACGBaseNode *callerRetNode : lastInCaller->GetOutSet()) { + for (EACGObjectNode *calleeRetNode : lastInCallee->GetPointsToSet()) { + for (EACGObjectNode *objInCaller : callee2Caller[calleeRetNode]) { + auto pointsToSet = callerRetNode->GetPointsToSet(); + if (pointsToSet.find(objInCaller) == pointsToSet.end()) { + (void)callerRetNode->AddOutNode(*objInCaller); + } + } + } + } +} + +// Update caller node by adding some nodes which are mapped from callee. +void EAConnectionGraph::UpdateCallerNodes(const MapleVector &caller, + const MapleVector &callee) +{ + const size_t callerSize = caller.size(); + const size_t calleeSize = callee.size(); + const size_t actualCount = ((callerSize < calleeSize) ? callerSize : calleeSize); + bool firstTime = true; + + for (size_t i = 0; i < actualCount; ++i) { + EACGBaseNode *actualInCaller = caller.at(i); + EACGBaseNode *actualInCallee = callee.at(i); + UpdateNodes(*actualInCallee, *actualInCaller, firstTime); + } +} + +// Update caller edges using information from callee. +void EAConnectionGraph::UpdateCallerEdges() +{ + std::set set; + for (auto pair : callee2Caller) { + (void)set.insert(pair.first); + } + for (EACGObjectNode *p : set) { + for (auto tempPair : p->GetFieldNodeMap()) { + int32 fieldID = tempPair.first; + EACGBaseNode *fieldNode = tempPair.second; + for (EACGObjectNode *q : fieldNode->GetPointsToSet()) { + UpdateCallerEdgesInternal(p, fieldID, q); + } + } + } +} + +// Update caller edges using information of given ObjectNode from callee. +void EAConnectionGraph::UpdateCallerEdgesInternal(EACGObjectNode *node1, int32 fieldID, EACGObjectNode *node2) +{ + CHECK_FATAL(callee2Caller.find(node1) != callee2Caller.end(), "find failed"); + CHECK_FATAL(callee2Caller.find(node2) != callee2Caller.end(), "find failed"); + for (EACGObjectNode *p1 : callee2Caller[node1]) { + for (EACGObjectNode *q1 : callee2Caller[node2]) { + EACGFieldNode *fieldNode = p1->GetFieldNodeFromIdx(fieldID); + if (fieldNode == nullptr) { + CHECK_NULL_FATAL(node1); + fieldNode = node1->GetFieldNodeFromIdx(fieldID); + CHECK_FATAL(fieldNode != nullptr, "fieldNode must not be nullptr because we have handled it before!"); + CHECK_FATAL(fieldNode->IsBelongTo(this), "must be belong to this"); + (void)p1->AddOutNode(*fieldNode); + } + (void)fieldNode->AddOutNode(*q1); + } + } +} + +void EAConnectionGraph::UpdateNodes(const EACGBaseNode &actualInCallee, EACGBaseNode &actualInCaller, bool firstTime) +{ + DEBUG_ASSERT(actualInCallee.GetPointsToSet().size() > 0, "actualInCallee->GetPointsToSet().size() must gt 0!"); + for (EACGObjectNode *objInCallee : actualInCallee.GetPointsToSet()) { + if (actualInCaller.GetPointsToSet().size() == 0) { + std::set &mapsTo = callee2Caller[objInCallee]; + if (mapsTo.size() > 0) { + for (EACGObjectNode *temp : mapsTo) { + (void)actualInCaller.AddOutNode(*temp); + } + } else if (objInCallee->IsBelongTo(this)) { + DEBUG_ASSERT(false, "must be belong to this"); + } else { + EACGObjectNode *phantom = CreateObjectNode(nullptr, actualInCaller.GetEAStatus(), true, TyIdx(0)); + (void)actualInCaller.AddOutNode(*phantom); + AddMaps2Object(phantom, objInCallee); + UpdateCallerWithCallee(*phantom, *objInCallee, firstTime); + } + } else { + for (EACGObjectNode *objInCaller : actualInCaller.GetPointsToSet()) { + std::set &mapsTo = callee2Caller[objInCallee]; + if (mapsTo.find(objInCaller) == mapsTo.end()) { + AddMaps2Object(objInCaller, objInCallee); + UpdateCallerWithCallee(*objInCaller, *objInCallee, firstTime); + } + } + } + } +} + +// The escape state of the nodes in MapsTo(which is the object node in caller) is marked +// GlobalEscape if the escape state of object node in callee is GlobalEscape. +// Otherwise, the escape state of the caller nodes is not affected. +void EAConnectionGraph::UpdateCallerWithCallee(EACGObjectNode &objInCaller, const EACGObjectNode &objInCallee, + bool firstTime) +{ + if (objInCallee.GetEAStatus() == EAStatus::kGlobalEscape) { + (void)objInCaller.UpdateEAStatus(EAStatus::kGlobalEscape); + } + + // At this moment, a node in caller is mapped to the corresponding node in callee, + // we need make sure that all the field nodes also exist in caller. If not, + // we create both the field node and the phantom object node it should point to for the caller. + for (auto tempPair : objInCallee.GetFieldNodeMap()) { + EACGFieldNode *fieldInCaller = objInCaller.GetFieldNodeFromIdx(tempPair.first); + EACGFieldNode *fieldInCallee = tempPair.second; + if (fieldInCaller == nullptr && fieldInCallee->IsBelongTo(this)) { + (void)objInCaller.AddOutNode(*fieldInCallee); + } + fieldInCaller = GetOrCreateFieldNodeFromIdx(objInCaller, tempPair.first); + UpdateNodes(*fieldInCallee, *fieldInCaller, firstTime); + } +} + +EACGFieldNode *EAConnectionGraph::GetOrCreateFieldNodeFromIdx(EACGObjectNode &obj, int32 fieldID) +{ + EACGFieldNode *ret = obj.GetFieldNodeFromIdx(fieldID); + if (ret == nullptr) { + // this node is always phantom + ret = CreateFieldNode(nullptr, obj.GetEAStatus(), fieldID, &obj, true); + } + return ret; +} +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ir/BUILD.gn b/ecmascript/compiler/codegen/maple/maple_ir/BUILD.gn new file mode 100755 index 0000000000000000000000000000000000000000..b179f367f7ea54096d74463e002bea59f37ab50b --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/BUILD.gn @@ -0,0 +1,77 @@ +# Copyright (c) 2023 Huawei Device Co., Ltd. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//arkcompiler/ets_runtime/js_runtime_config.gni") + +include_directories = [ + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/mempool/include", + "${MAPLEALL_THIRD_PARTY_ROOT}/bounds_checking_function/include", + "${MAPLEALL_ROOT}/maple_ipa/include", + "${MAPLEALL_ROOT}/maple_ipa/include/old", + "${MAPLEALL_ROOT}/maple_me/include", + "${MAPLEALL_ROOT}/maple_phase/include", +] + +src_libmplir = [ + "src/global_tables.cpp", + "src/intrinsics.cpp", + "src/lexer.cpp", + "src/mir_symbol_builder.cpp", + "src/mir_builder.cpp", + "src/mir_const.cpp", + "src/mir_scope.cpp", + "src/mir_function.cpp", + "src/mir_lower.cpp", + "src/mir_module.cpp", + "src/verification.cpp", + "src/verify_annotation.cpp", + "src/verify_mark.cpp", + "src/mir_nodes.cpp", + "src/mir_symbol.cpp", + "src/mir_type.cpp", + "src/opcode_info.cpp", + "src/option.cpp", + "src/mpl2mpl_options.cpp", + "src/parser.cpp", + "src/mir_parser.cpp", + "src/mir_pragma.cpp", + "src/printing.cpp", + "src/bin_func_import.cpp", + "src/bin_func_export.cpp", + "src/bin_mpl_import.cpp", + "src/bin_mpl_export.cpp", + "src/debug_info.cpp", + "src/debug_info_util.cpp", + "${MAPLEALL_ROOT}/maple_ipa/src/old/ea_connection_graph.cpp", +] + +ohos_static_library("libmplir") { + stack_protector_ret = false + configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + sources = src_libmplir + include_dirs = include_directories + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" + deps = [ + "${MAPLEALL_ROOT}/maple_driver:libdriver_option", + "${MAPLEALL_ROOT}/maple_phase:libmplphase", + "${MAPLEALL_ROOT}/maple_util:libcommandline", + "${MAPLEALL_ROOT}/maple_util:libcommandline", + "${MAPLEALL_ROOT}/maple_util:libmplutil", + ] + part_name = "ets_runtime" + subsystem_name = "arkcompiler" +} diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/all_attributes.def b/ecmascript/compiler/codegen/maple/maple_ir/include/all_attributes.def new file mode 100644 index 0000000000000000000000000000000000000000..9d07dcb3fddd6727ec2074d519b01c5d854ab466 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/all_attributes.def @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* all possible attribute names from typeattrs.def, funcattrs.def and fieldattrs.def */ + ATTR(const) + ATTR(final) + ATTR(generic) + ATTR(implicit) + ATTR(private) + ATTR(protected) + ATTR(public) + ATTR(static) + ATTR(synthetic) + ATTR(used) + ATTR(hiddenapiblack) + ATTR(hiddenapigrey) +#ifdef FUNC_ATTR + ATTR(bridge) + ATTR(constructor) + ATTR(critical_native) + ATTR(declared_synchronized) + ATTR(default) + ATTR(destructor) + ATTR(delete) + ATTR(fast_native) + ATTR(inline) + ATTR(always_inline) + ATTR(noinline) + ATTR(native) + ATTR(strict) + ATTR(varargs) + ATTR(virtual) + ATTR(nosideeffect) + ATTR(pure) + ATTR(noexcept) + ATTR(nodefargeffect) + ATTR(nodefeffect) + ATTR(noretglobal) + ATTR(nothrow_exception) + ATTR(noretarg) + ATTR(noprivate_defeffect) + ATTR(ipaseen) + ATTR(rclocalunowned) + ATTR(callersensitive) + ATTR(weakref) + ATTR(safed) + ATTR(unsafed) + ATTR(noreturn) +#endif +#if defined(FUNC_ATTR) || defined(TYPE_ATTR) + ATTR(abstract) + ATTR(extern) + ATTR(interface) + ATTR(local) + ATTR(optimized) + ATTR(synchronized) + ATTR(weak) +#endif +#if defined(TYPE_ATTR) || defined(FIELD_ATTR) +#include "memory_order_attrs.def" + ATTR(enum) + ATTR(restrict) + ATTR(transient) + ATTR(volatile) + ATTR(rcunowned) + ATTR(rcweak) + ATTR(final_boundary_size) + ATTR(tls_static) + ATTR(tls_dynamic) +#endif +#ifdef TYPE_ATTR + ATTR(annotation) + ATTR(readonly) + ATTR(verified) + ATTR(localrefvar) + ATTR(rcunownedthis) + ATTR(incomplete_array) + ATTR(may_alias) + ATTR(static_init_zero) +#endif +#ifdef FUNC_ATTR + ATTR(firstarg_return) + ATTR(called_once) +#endif +#ifdef STMT_ATTR + ATTR(insaferegion) +#endif + ATTR(oneelem_simd) + ATTR(nonnull) + ATTR(section) + ATTR(asmattr) +#if defined(FUNC_ATTR) && !defined(NOCONTENT_ATTR) + ATTR(alias) + ATTR(constructor_priority) + ATTR(destructor_priority) +#endif +#if (defined(TYPE_ATTR) || defined(FIELD_ATTR)) && !defined(NOCONTENT_ATTR) + ATTR(pack) +#endif +#ifdef FUNC_ATTR + ATTR(initialization) + ATTR(termination) +#endif +#if (defined(FUNC_ATTR) || defined(STMT_ATTR)) + ATTR(ccall) + ATTR(webkitjscall) + ATTR(ghcall) +#endif +#if defined(FUNC_ATTR) && !defined(NOCONTENT_ATTR) + ATTR(frame_pointer) + ATTR(frame_reserved_slots) +#endif \ No newline at end of file diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/bin_mir_file.h b/ecmascript/compiler/codegen/maple/maple_ir/include/bin_mir_file.h new file mode 100644 index 0000000000000000000000000000000000000000..f1eca03268a9a2a5f5d7159e49bd04acf0a3159f --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/bin_mir_file.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_BIN_MIR_FILE_H +#define MAPLE_IR_INCLUDE_BIN_MIR_FILE_H +#include +#include "types_def.h" + +namespace maple { +const std::string kBinMirFileID = "HWCMPL"; // for magic in file header +constexpr uint8 kVersionMajor = 0; // experimental version +constexpr uint8 kVersionMinor = 1; +constexpr int kMagicSize = 7; + +enum BinMirFileType { + kMjsvmFileTypeCmplV1, + kMjsvmFileTypeCmpl, // kCmpl v2 is the release version of + kMjsvmFileTypeUnknown +}; + +inline uint8 MakeVersionNum(uint8 major, uint8 minor) +{ + uint8 mj = major & 0x0Fu; + uint8 mn = minor & 0x0Fu; + constexpr uint8 shiftNum = 4; + return (mj << shiftNum) | mn; +} + +// file header for binary format kMmpl, 8B in total +// Note the header is different with the specification +struct BinMIRFileHeader { + char magic[kMagicSize]; // “HWCMPL”, or "HWLOS_" + uint8 segNum; // number of segments (e.g. one raw IR file is a segment unit) + uint8 type; // enum of type of VM file (e.g. MapleIR, TE) + uint8 version; // version of IR format (should be major.minor) +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_BIN_MIR_FILE_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/bin_mpl_export.h b/ecmascript/compiler/codegen/maple/maple_ir/include/bin_mpl_export.h new file mode 100644 index 0000000000000000000000000000000000000000..569cb19fcd0643cbdcd974263e61aa2765e456c7 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/bin_mpl_export.h @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_BIN_MPL_EXPORT_H +#define MAPLE_IR_INCLUDE_BIN_MPL_EXPORT_H +#include "mir_module.h" +#include "mir_nodes.h" +#include "mir_function.h" +#include "mir_preg.h" +#include "parser_opt.h" +#include "ea_connection_graph.h" + +namespace maple { +enum : uint8 { + kBinString = 1, + kBinUsrString = kBinString, + kBinInitConst = 2, + kBinSymbol = 3, + kBinFunction = 4, + kBinCallinfo = 5, + kBinKindTypeScalar = 6, + kBinKindTypeByName = 7, + kBinKindTypePointer = 8, + kBinKindTypeFArray = 9, + kBinKindTypeJarray = 10, + kBinKindTypeArray = 11, + kBinKindTypeFunction = 12, + kBinKindTypeParam = 13, + kBinKindTypeInstantVector = 14, + kBinKindTypeGenericInstant = 15, + kBinKindTypeBitField = 16, + kBinKindTypeStruct = 17, // for kTypeStruct, kTypeStructIncomplete and kTypeUnion + kBinKindTypeClass = 18, // for kTypeClass, and kTypeClassIncomplete + kBinKindTypeInterface = 19, // for kTypeInterface, and kTypeInterfaceIncomplete + kBinKindConstInt = 20, + kBinKindConstAddrof = 21, + kBinKindConstAddrofFunc = 22, + kBinKindConstStr = 23, + kBinKindConstStr16 = 24, + kBinKindConstFloat = 25, + kBinKindConstDouble = 26, + kBinKindConstAgg = 27, + kBinKindConstSt = 28, + kBinContentStart = 29, + kBinStrStart = 30, + kBinTypeStart = 31, + kBinCgStart = 32, + kBinSeStart = 33, + kBinFinish = 34, + kStartMethod = 35, + kBinEaCgNode = 36, + kBinEaCgActNode = 37, + kBinEaCgFieldNode = 38, + kBinEaCgRefNode = 39, + kBinEaCgObjNode = 40, + kBinEaCgStart = 41, + kBinEaStart = 42, + kBinNodeBlock = 43, + // kBinOpStatement : 44, + // kBinOpExpression : 45, + kBinReturnvals = 46, + kBinTypeTabStart = 47, + kBinSymStart = 48, + kBinSymTabStart = 49, + kBinFuncIdInfoStart = 50, + kBinFormalStart = 51, + kBinPreg = 52, + kBinSpecialReg = 53, + kBinLabel = 54, + kBinTypenameStart = 55, + kBinHeaderStart = 56, + kBinAliasMapStart = 57, + // kBinKindTypeViaTypename : 58, + // kBinKindSymViaSymname : 59, + // kBinKindFuncViaSymname : 60, + kBinFunctionBodyStart = 61, + kBinFormalWordsTypeTagged = 62, + kBinFormalWordsRefCounted = 63, + kBinLocalWordsTypeTagged = 64, + kBinLocalWordsRefCounter = 65, + kBinKindConstAddrofLabel = 66, + kBinKindConstAddrofLocal = 67, +}; + +// this value is used to check wether a file is a binary mplt file +constexpr int32 kMpltMagicNumber = 0xC0FFEE; +class BinaryMplExport { +public: + explicit BinaryMplExport(MIRModule &md); + virtual ~BinaryMplExport() = default; + + void Export(const std::string &fname, std::unordered_set *dumpFuncSet); + void WriteNum(int64 x); + void Write(uint8 b); + void OutputType(TyIdx tyIdx); + void WriteFunctionBodyField(uint64 contentIdx, std::unordered_set *dumpFuncSet); + void OutputConst(MIRConst *constVal); + void OutputConstBase(const MIRConst &constVal); + void OutputTypeBase(const MIRType &type); + void OutputTypePairs(const MIRInstantVectorType &type); + void OutputStr(const GStrIdx &gstr); + void OutputUsrStr(UStrIdx ustr); + void OutputTypeAttrs(const TypeAttrs &ta); + void OutputPragmaElement(const MIRPragmaElement &e); + void OutputPragma(const MIRPragma &p); + void OutputFieldPair(const FieldPair &fp); + void OutputMethodPair(const MethodPair &memPool); + void OutputFieldsOfStruct(const FieldVector &fields); + void OutputMethodsOfStruct(const MethodVector &methods); + void OutputStructTypeData(const MIRStructType &type); + void OutputImplementedInterfaces(const std::vector &interfaces); + void OutputInfoIsString(const std::vector &infoIsString); + void OutputInfo(const std::vector &info, const std::vector &infoIsString); + void OutputPragmaVec(const std::vector &pragmaVec); + void OutputClassTypeData(const MIRClassType &type); + void OutputSymbol(MIRSymbol *sym); + void OutputFunction(PUIdx puIdx); + void OutputInterfaceTypeData(const MIRInterfaceType &type); + void OutputSrcPos(const SrcPosition &pos); + void OutputAliasMap(MapleMap &aliasVarMap); + void OutputInfoVector(const MIRInfoVector &infoVector, const MapleVector &infoVectorIsString); + void OutputFuncIdInfo(MIRFunction *func); + void OutputLocalSymbol(MIRSymbol *sym); + void OutputPreg(MIRPreg *preg); + void OutputLabel(LabelIdx lidx); + void OutputLocalTypeNameTab(const MIRTypeNameTable *typeNameTab); + void OutputFormalsStIdx(MIRFunction *func); + void OutputFuncViaSym(PUIdx puIdx); + void OutputExpression(BaseNode *e); + void OutputBaseNode(const BaseNode *b); + void OutputReturnValues(const CallReturnVector *retv); + void OutputBlockNode(BlockNode *block); + + const MIRModule &GetMIRModule() const + { + return mod; + } + + bool not2mplt; // this export is not to an mplt file + MIRFunction *curFunc = nullptr; + +private: + using CallSite = std::pair; + void WriteEaField(const CallGraph &cg); + void WriteEaCgField(EAConnectionGraph *eaCg); + void OutEaCgNode(EACGBaseNode &node); + void OutEaCgBaseNode(const EACGBaseNode &node, bool firstPart); + void OutEaCgFieldNode(EACGFieldNode &field); + void OutEaCgRefNode(const EACGRefNode &ref); + void OutEaCgActNode(const EACGActualNode &act); + void OutEaCgObjNode(EACGObjectNode &obj); + void WriteCgField(uint64 contentIdx, const CallGraph *cg); + void WriteSeField(); + void OutputCallInfo(CallInfo &callInfo); + void WriteContentField4mplt(int fieldNum, uint64 *fieldStartP); + void WriteContentField4nonmplt(int fieldNum, uint64 *fieldStartP); + void WriteContentField4nonJava(int fieldNum, uint64 *fieldStartP); + void WriteStrField(uint64 contentIdx); + void WriteHeaderField(uint64 contentIdx); + void WriteTypeField(uint64 contentIdx, bool useClassList = true); + void Init(); + void WriteSymField(uint64 contentIdx); + void WriteInt(int32 x); + uint8 Read(); + int32 ReadInt(); + void WriteInt64(int64 x); + void WriteAsciiStr(const std::string &str); + void Fixup(size_t i, int32 x); + void DumpBuf(const std::string &name); + void AppendAt(const std::string &name, int32 offset); + void ExpandFourBuffSize(); + + MIRModule &mod; + size_t bufI = 0; + std::vector buf; + std::unordered_map gStrMark; + std::unordered_map funcMark; + std::string importFileName; + std::unordered_map uStrMark; + std::unordered_map symMark; + std::unordered_map typMark; + std::unordered_map localSymMark; + std::unordered_map localPregMark; + std::unordered_map labelMark; + friend class UpdateMplt; + std::unordered_map callInfoMark; + std::map *func2SEMap = nullptr; + std::unordered_map eaNodeMark; + bool inIPA = false; + static int typeMarkOffset; // offset of mark (tag in binmplimport) resulting from duplicated function +}; + +class UpdateMplt { +public: + UpdateMplt() = default; + ~UpdateMplt() = default; + class ManualSideEffect { + public: + ManualSideEffect(std::string name, bool p, bool u, bool d, bool o, bool e) + : funcName(name), pure(p), defArg(u), def(d), object(o), exception(e) {}; + virtual ~ManualSideEffect() = default; + + const std::string &GetFuncName() const + { + return funcName; + } + + bool GetPure() const + { + return pure; + } + + bool GetDefArg() const + { + return defArg; + } + + bool GetDef() const + { + return def; + } + + bool GetObject() const + { + return object; + } + + bool GetException() const + { + return exception; + } + + bool GetPrivateUse() const + { + return privateUse; + } + + bool GetPrivateDef() const + { + return privateDef; + } + + private: + std::string funcName; + bool pure; + bool defArg; + bool def; + bool object; + bool exception; + bool privateUse = false; + bool privateDef = false; + }; + void UpdateCgField(BinaryMplt &binMplt, const CallGraph &cg); +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_BIN_MPL_EXPORT_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/bin_mpl_import.h b/ecmascript/compiler/codegen/maple/maple_ir/include/bin_mpl_import.h new file mode 100644 index 0000000000000000000000000000000000000000..abe37631251246b57231c3b696c3f885e19f0075 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/bin_mpl_import.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_BIN_MPL_IMPORT_H +#define MAPLE_IR_INCLUDE_BIN_MPL_IMPORT_H +#include "mir_module.h" +#include "mir_nodes.h" +#include "mir_preg.h" +#include "parser_opt.h" +#include "mir_builder.h" +#include "ea_connection_graph.h" +namespace maple { +class BinaryMplImport { +public: + explicit BinaryMplImport(MIRModule &md) : mod(md), mirBuilder(&md) {} + BinaryMplImport &operator=(const BinaryMplImport &) = delete; + BinaryMplImport(const BinaryMplImport &) = delete; + + virtual ~BinaryMplImport() + { + for (MIRStructType *structPtr : tmpStruct) { + delete structPtr; + } + for (MIRClassType *classPtr : tmpClass) { + delete classPtr; + } + for (MIRInterfaceType *interfacePtr : tmpInterface) { + delete interfacePtr; + } + } + + uint64 GetBufI() const + { + return bufI; + } + void SetBufI(uint64 bufIVal) + { + bufI = bufIVal; + } + + bool IsBufEmpty() const + { + return buf.empty(); + } + size_t GetBufSize() const + { + return buf.size(); + } + + int32 GetContent(int64 key) const + { + return content.at(key); + } + + void SetImported(bool importedVal) + { + imported = importedVal; + } + + bool Import(const std::string &modid, bool readSymbols = false, bool readSe = false); + bool ImportForSrcLang(const std::string &modid, MIRSrcLang &srcLang); + MIRSymbol *GetOrCreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mclass, MIRStorageClass sclass, + MIRFunction *func, uint8 scpID); + int32 ReadInt(); + int64 ReadNum(); + +private: + void ReadContentField(); + void ReadStrField(); + void ReadHeaderField(); + void ReadTypeField(); + void ReadSymField(); + void ReadSymTabField(); + void ReadCgField(); + EAConnectionGraph *ReadEaCgField(); + void ReadEaField(); + EACGBaseNode &InEaCgNode(EAConnectionGraph &newEaCg); + void InEaCgBaseNode(EACGBaseNode &base, EAConnectionGraph &newEaCg, bool firstPart); + void InEaCgActNode(EACGActualNode &actual); + void InEaCgFieldNode(EACGFieldNode &field, EAConnectionGraph &newEaCg); + void InEaCgObjNode(EACGObjectNode &obj, EAConnectionGraph &newEaCg); + void InEaCgRefNode(EACGRefNode &ref); + CallInfo *ImportCallInfo(); + void MergeDuplicated(PUIdx methodPuidx, std::vector &targetSet, std::vector &newSet); + void ReadSeField(); + void Jump2NextField(); + void Reset(); + void SkipTotalSize(); + void ImportFieldsOfStructType(FieldVector &fields, uint32 methodSize); + MIRType &InsertInTypeTables(MIRType &ptype); + void InsertInHashTable(MIRType &ptype); + void SetupEHRootType(); + void UpdateMethodSymbols(); + void ImportConstBase(MIRConstKind &kind, MIRTypePtr &type); + MIRConst *ImportConst(MIRFunction *func); + GStrIdx ImportStr(); + UStrIdx ImportUsrStr(); + MIRType *CreateMirType(MIRTypeKind kind, GStrIdx strIdx, int64 tag) const; + MIRGenericInstantType *CreateMirGenericInstantType(GStrIdx strIdx) const; + MIRBitFieldType *CreateBitFieldType(uint8 fieldsize, PrimType pt, GStrIdx strIdx) const; + void CompleteAggInfo(TyIdx tyIdx); + TyIdx ImportType(bool forPointedType = false); + TyIdx ImportTypeNonJava(); + void ImportTypeBase(PrimType &primType, GStrIdx &strIdx, bool &nameIsLocal); + void InSymTypeTable(); + void ImportTypePairs(std::vector &insVecType); + TypeAttrs ImportTypeAttrs(); + MIRPragmaElement *ImportPragmaElement(); + MIRPragma *ImportPragma(); + void ImportFieldPair(FieldPair &fp); + void ImportMethodPair(MethodPair &memPool); + void ImportMethodsOfStructType(MethodVector &methods); + void ImportStructTypeData(MIRStructType &type); + void ImportInterfacesOfClassType(std::vector &interfaces); + void ImportInfoIsStringOfStructType(MIRStructType &type); + void ImportInfoOfStructType(MIRStructType &type); + void ImportPragmaOfStructType(MIRStructType &type); + void SetClassTyidxOfMethods(MIRStructType &type); + void ImportClassTypeData(MIRClassType &type); + void ImportInterfaceTypeData(MIRInterfaceType &type); + PUIdx ImportFunction(); + MIRSymbol *InSymbol(MIRFunction *func); + void ImportInfoVector(MIRInfoVector &infoVector, MapleVector &infoVectorIsString); + void ImportLocalTypeNameTable(MIRTypeNameTable *typeNameTab); + void ImportFuncIdInfo(MIRFunction *func); + MIRSymbol *ImportLocalSymbol(MIRFunction *func); + PregIdx ImportPreg(MIRFunction *func); + LabelIdx ImportLabel(MIRFunction *func); + void ImportFormalsStIdx(MIRFunction *func); + void ImportAliasMap(MIRFunction *func); + void ImportSrcPos(SrcPosition &pos); + void ImportBaseNode(Opcode &o, PrimType &typ); + PUIdx ImportFuncViaSym(MIRFunction *func); + BaseNode *ImportExpression(MIRFunction *func); + void ImportReturnValues(MIRFunction *func, CallReturnVector *retv); + BlockNode *ImportBlockNode(MIRFunction *fn); + void ReadFunctionBodyField(); + void ReadFileAt(const std::string &modid, int32 offset); + uint8 Read(); + int64 ReadInt64(); + void ReadAsciiStr(std::string &str); + int32 GetIPAFileIndex(std::string &name); + + bool inCG = false; + bool inIPA = false; + bool imported = true; // used only by irbuild to convert to ascii + bool importingFromMplt = false; // decided based on magic number + uint64 bufI = 0; + std::vector buf; + std::map content; + MIRModule &mod; + MIRBuilder mirBuilder; + std::vector gStrTab; + std::vector uStrTab; + std::vector tmpStruct; + std::vector tmpClass; + std::vector tmpInterface; + std::vector typTab; + std::vector funcTab; + std::vector symTab; + std::vector localSymTab; + std::vector localPregTab; + std::vector localLabelTab; + std::vector callInfoTab; + std::vector eaCgTab; + std::vector methodSymbols; + std::vector definedLabels; + std::string importFileName; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_BIN_MPL_IMPORT_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/bin_mplt.h b/ecmascript/compiler/codegen/maple/maple_ir/include/bin_mplt.h new file mode 100644 index 0000000000000000000000000000000000000000..a5f570926963cc859e6eba2ecfec78bf98b5200e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/bin_mplt.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_BIN_MPLT_H +#define MAPLE_IR_INCLUDE_BIN_MPLT_H +#include "mir_module.h" +#include "mir_nodes.h" +#include "mir_preg.h" +#include "parser_opt.h" +#include "bin_mpl_export.h" +#include "bin_mpl_import.h" + +namespace maple { +class BinaryMplt { +public: + explicit BinaryMplt(MIRModule &md) : mirModule(md), binImport(md), binExport(md) {} + + virtual ~BinaryMplt() = default; + + void Export(const std::string &suffix, std::unordered_set *dumpFuncSet = nullptr) + { + binExport.Export(suffix, dumpFuncSet); + } + + bool Import(const std::string &modID, bool readCG = false, bool readSE = false) + { + importFileName = modID; + return binImport.Import(modID, readCG, readSE); + } + + const MIRModule &GetMod() const + { + return mirModule; + } + + BinaryMplImport &GetBinImport() + { + return binImport; + } + + BinaryMplExport &GetBinExport() + { + return binExport; + } + + std::string &GetImportFileName() + { + return importFileName; + } + + void SetImportFileName(const std::string &fileName) + { + importFileName = fileName; + } + +private: + MIRModule &mirModule; + BinaryMplImport binImport; + BinaryMplExport binExport; + std::string importFileName; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_BIN_MPLT_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/binary_op.def b/ecmascript/compiler/codegen/maple/maple_ir/include/binary_op.def new file mode 100644 index 0000000000000000000000000000000000000000..09b1804ba09345d02ebf2eac61d6474d17a92205 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/binary_op.def @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +BINARYOP(add) +BINARYOP(ashr) +BINARYOP(band) +BINARYOP(bior) +BINARYOP(bxor) +BINARYOP(cand) +BINARYOP(cior) +BINARYOP(cmp) +BINARYOP(cmpl) +BINARYOP(cmpg) +BINARYOP(div) +BINARYOP(eq) +BINARYOP(gt) +BINARYOP(land) +BINARYOP(lior) +BINARYOP(le) +BINARYOP(lshr) +BINARYOP(lt) +BINARYOP(max) +BINARYOP(min) +BINARYOP(mul) +BINARYOP(ne) +BINARYOP(ge) +BINARYOP(rem) +BINARYOP(shl) +BINARYOP(ror) +BINARYOP(sub) +BINARYOP(CG_array_elem_add) + diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/cfg_primitive_types.h b/ecmascript/compiler/codegen/maple/maple_ir/include/cfg_primitive_types.h new file mode 100644 index 0000000000000000000000000000000000000000..f1f05983ad4581f3a62c4e3d041ad3ecc70756d6 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/cfg_primitive_types.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_CFG_PRIMITIVE_TYPES_H +#define MAPLE_IR_INCLUDE_CFG_PRIMITIVE_TYPES_H + +namespace maple { +uint8 GetPointerSize(); // Circular include dependency with mir_type.h + +// Declaration of enum PrimType +#define LOAD_ALGO_PRIMARY_TYPE +enum PrimType { + PTY_begin, // PrimType begin +#define PRIMTYPE(P) PTY_##P, +#include "prim_types.def" + PTY_end, // PrimType end +#undef PRIMTYPE +}; + +constexpr PrimType kPtyInvalid = PTY_begin; +// just for test, no primitive type for derived SIMD types to be defined +constexpr PrimType kPtyDerived = PTY_end; + +struct PrimitiveTypeProperty { + PrimType type; + + PrimitiveTypeProperty(PrimType type, bool isInteger, bool isUnsigned, bool isAddress, bool isFloat, bool isPointer, + bool isSimple, bool isDynamic, bool isDynamicAny, bool isDynamicNone, bool isVector) + : type(type), + isInteger(isInteger), + isUnsigned(isUnsigned), + isAddress(isAddress), + isFloat(isFloat), + isPointer(isPointer), + isSimple(isSimple), + isDynamic(isDynamic), + isDynamicAny(isDynamicAny), + isDynamicNone(isDynamicNone), + isVector(isVector) + { + } + + bool IsInteger() const + { + return isInteger; + } + bool IsUnsigned() const + { + return isUnsigned; + } + + bool IsAddress() const + { + if (type == PTY_u64 || type == PTY_u32) { + if ((type == PTY_u64 && GetPointerSize() == 8) || (type == PTY_u32 && GetPointerSize() == 4)) { + return true; + } else { + return false; + } + } else { + return isAddress; + } + } + + bool IsFloat() const + { + return isFloat; + } + + bool IsPointer() const + { + if (type == PTY_u64 || type == PTY_u32) { + if ((type == PTY_u64 && GetPointerSize() == 8) || (type == PTY_u32 && GetPointerSize() == 4)) { + return true; + } else { + return false; + } + } else { + return isPointer; + } + } + + bool IsSimple() const + { + return isSimple; + } + bool IsDynamic() const + { + return isDynamic; + } + bool IsDynamicAny() const + { + return isDynamicAny; + } + bool IsDynamicNone() const + { + return isDynamicNone; + } + bool IsVector() const + { + return isVector; + } + +private: + bool isInteger; + bool isUnsigned; + bool isAddress; + bool isFloat; + bool isPointer; + bool isSimple; + bool isDynamic; + bool isDynamicAny; + bool isDynamicNone; + bool isVector; +}; + +const PrimitiveTypeProperty &GetPrimitiveTypeProperty(PrimType pType); +} // namespace maple +#endif // MAPLE_IR_INCLUDE_CFG_PRIMITIVE_TYPES_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/cmpl.h b/ecmascript/compiler/codegen/maple/maple_ir/include/cmpl.h new file mode 100644 index 0000000000000000000000000000000000000000..d0d3f760086e533dc8ae7040a53f1d040d481e5a --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/cmpl.h @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This define the lowest level MAPLE IR data structures that are compatible +// with both the C++ and C coding environments of MAPLE +#ifndef MAPLE_INCLUDE_VM_CMPL_V2 +#define MAPLE_INCLUDE_VM_CMPL_V2 +// Still need constant value from MIR +#include +#include "mir_config.h" +#include "types_def.h" +#include "opcodes.h" +#include "prim_types.h" +#include "intrinsics.h" +#include "mir_module.h" + +namespace maple { +extern char appArray[]; +constexpr uint32 kTwoBitVectors = 2; +struct MirFuncT { // 28B + uint16 frameSize; + uint16 upFormalSize; + uint16 moduleID; + uint32 funcSize; // size of code in words + uint8 *formalWordsTypetagged; // bit vector where the Nth bit tells whether + // the Nth word in the formal parameters area + // addressed upward from %%FP (that means + // the word at location (%%FP + N*4)) has + // typetag; if yes, the typetag is the word + // at (%%FP + N*4 + 4); the bitvector's size + // is given by BlockSize2BitvectorSize(upFormalSize) + uint8 *localWordsTypetagged; // bit vector where the Nth bit tells whether + // the Nth word in the local stack frame + // addressed downward from %%FP (that means + // the word at location (%%FP - N*4)) has + // typetag; if yes, the typetag is the word + // at (%%FP - N*4 + 4); the bitvector's size + // is given by BlockSize2BitvectorSize(frameSize) + uint8 *formalWordsRefCounted; // bit vector where the Nth bit tells whether + // the Nth word in the formal parameters area + // addressed upward from %%FP (that means + // the word at location (%%FP + N*4)) points to + // a dynamic memory block that needs reference + // count; the bitvector's size is given by + // BlockSize2BitvectorSize(upFormalSize) + uint8 *localWordsRefCounted; // bit vector where the Nth bit tells whether + // the Nth word in the local stack frame + // addressed downward from %%FP (that means + // the word at location (%%FP - N*4)) points to + // a dynamic memory block that needs reference + // count; the bitvector's size is given by + // BlockSize2BitvectorSize(frameSize) + // uint16 numlabels; // removed. label table size + // StmtNode **lbl2stmt; // lbl2stmt table, removed; + // the first statement immediately follow MirFuncT + // since it starts with expression, BaseNodeT* is returned + void *FirstInst() const + { + return reinterpret_cast(const_cast(this)) + sizeof(MirFuncT); + } + + // there are 4 bitvectors that follow the function code + uint32 FuncCodeSize() const + { + return funcSize - (kTwoBitVectors * BlockSize2BitVectorSize(upFormalSize)) - + (kTwoBitVectors * BlockSize2BitVectorSize(frameSize)); + } +}; + +struct MirModuleT { +public: + MIRFlavor flavor; // should be kCmpl + MIRSrcLang srcLang; // the source language + uint16 id; + uint32 globalMemSize; // size of storage space for all global variables + uint8 *globalBlkMap; // the memory map of the block containing all the + // globals, for specifying static initializations + uint8 *globalWordsTypetagged; // bit vector where the Nth bit tells whether + // the Nth word in globalBlkMap has typetag; + // if yes, the typetag is the N+1th word; the + // bitvector's size is given by + // BlockSize2BitvectorSize(globalMemSize) + uint8 *globalWordsRefCounted; // bit vector where the Nth bit tells whether + // the Nth word points to a reference-counted + // dynamic memory block; the bitvector's size + // is given by BlockSize2BitvectorSize(globalMemSize) + PUIdx mainFuncID; // the entry function; 0 if no main function + uint32 numFuncs; // because puIdx 0 is reserved, numFuncs is also the highest puIdx + MirFuncT **funcs; // list of all funcs in the module. +#if 1 // the js2mpl buld always set HAVE_MMAP to 1 // binmir file mmap info + int binMirImageFd; // file handle for mmap +#endif // HAVE_MMAP + void *binMirImageStart; // binimage memory start + uint32 binMirImageLength; // binimage memory size + MirFuncT *FuncFromPuIdx(PUIdx puIdx) const + { + MIR_ASSERT(puIdx <= numFuncs); // puIdx starts from 1 + return funcs[puIdx - 1]; + } + + MirModuleT() = default; + ~MirModuleT() = default; + MirFuncT *MainFunc() const + { + return (mainFuncID == 0) ? static_cast(nullptr) : FuncFromPuIdx(mainFuncID); + } + + void SetCurFunction(MirFuncT *f) + { + curFunction = f; + } + + MirFuncT *GetCurFunction() const + { + return curFunction; + } + + MIRSrcLang GetSrcLang() const + { + return srcLang; + } + +private: + MirFuncT *curFunction = nullptr; +}; + +// At this stage, MirConstT don't need all information in MIRConst +// Note: only be used within Constval node: +// Warning: it's different from full feature MIR. +// only support 32bit int const (lower 32bit). higher 32bit are tags +union MirIntConstT { + int64 value; + uint32 val[2]; // ARM target load/store 2 32bit val instead of 1 64bit +}; + +// currently in VM, only intconst are used. +using MirConstT = MirIntConstT; +// +// It's a stacking of POD data structure to allow precise memory layout +// control and emulate the inheritance relationship of corresponding C++ +// data structures to keep the interface consistent (as much as possible). +// +// Rule: +// 1. base struct should be the first member (to allow safe pointer casting) +// 2. each node (just ops, no data) should be of either 4B or 8B. +// 3. casting the node to proper base type to access base type's fields. +// +// Current memory layout of nodes follows the postfix notation: +// Each operand instruction is positioned immediately before its parent or +// next operand. Memory layout of sub-expressions tree is done recursively. +// E.g. the code for (a + b) contains 3 instructions, starting with the READ a, +// READ b, and then followed by ADD. +// For (a + (b - c)), it is: +// +// READ a +// READ b +// READ c +// SUB +// ADD +// +// BaseNodeT is an abstraction of expression. +struct BaseNodeT { // 4B + Opcode op; + PrimType ptyp; + uint8 typeFlag; // a flag to speed up type related operations in the VM + uint8 numOpnds; // only used for N-ary operators, switch and rangegoto + // operands immediately before each node + virtual size_t NumOpnds() const + { + if (op == OP_switch || op == OP_rangegoto) { + return 1; + } + return numOpnds; + } + + virtual uint8 GetNumOpnds() const + { + return numOpnds; + } + virtual void SetNumOpnds(uint8 num) + { + numOpnds = num; + } + + virtual Opcode GetOpCode() const + { + return op; + } + + virtual void SetOpCode(Opcode o) + { + op = o; + } + + virtual PrimType GetPrimType() const + { + return ptyp; + } + + virtual void SetPrimType(PrimType type) + { + ptyp = type; + } + + BaseNodeT() : op(OP_undef), ptyp(kPtyInvalid), typeFlag(0), numOpnds(0) {} + + virtual ~BaseNodeT() = default; +}; + +// typeFlag is a 8bit flag to provide short-cut information for its +// associated PrimType, because many type related information extraction +// is not very lightweight. +// Here is the convention: +// | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | +// dyn f i sc c (log2(size)) +// +// bit 0 - bit 3 is for type size information. (now not used in VM?) +// bit 0-2 represents the size of concrete types (not void/aggregate) +// it's the result of log2 operation on the real size to fit in 3 bits. +// which has the following correspondence: +// | 2 | 1 | 0 | type size (in Bytes) +// 0 0 0 1 +// 0 0 1 2 +// 0 1 0 4 +// 0 1 1 8 +// 1 0 0 16 +// +// bit 3 is the flag of "concrete types", i.e., types we know the type +// details. +// when it's 1, the bit0-2 size are valid +// when it's 0, the size of the type is 0, and bit0-2 are meaningless. +// +// bit 4 is for scalar types (1 if it's a scalar type) +// bit 5 is for integer types (1 if it's an integer type) +// bit 6 is for floating types (1 if it's a floating type) +// bit 7 is for dynamic types (1 if it's a dynamic type) +// +// refer to mirtypes.h/mirtypes.cpp in maple_ir directory for more information. +const int32 kTypeflagZero = 0x00; +const int32 kTypeflagDynMask = 0x80; +const int32 kTypeflagFloatMask = 0x40; +const int32 kTypeflagIntergerMask = 0x20; +const int32 kTypeflagScalarMask = 0x10; +const int32 kTypeflagConcreteMask = 0x08; +const int32 kTypeflagSizeMask = 0x07; +const int32 kTypeflagDynFloatMask = (kTypeflagDynMask | kTypeflagFloatMask); +const int32 kTypeflagDynIntergerMask = (kTypeflagDynMask | kTypeflagIntergerMask); +inline bool IsDynType(uint8 typeFlag) +{ + return ((typeFlag & kTypeflagDynMask) != kTypeflagZero); +} + +inline bool IsDynFloat(uint8 typeFlag) +{ + return ((typeFlag & kTypeflagDynFloatMask) == kTypeflagDynFloatMask); +} + +inline bool IsDynInteger(uint8 typeFlag) +{ + return ((typeFlag & kTypeflagDynIntergerMask) == kTypeflagDynIntergerMask); +} + +// IsFloat means "is statically floating types", i.e., float, but not dynamic +inline bool IsFloat(uint8 typeFlag) +{ + return ((typeFlag & kTypeflagDynFloatMask) == kTypeflagFloatMask); +} + +inline bool IsScalarType(uint8 typeFlag) +{ + return ((typeFlag & kTypeflagScalarMask) != kTypeflagZero); +} + +inline Opcode GetOpcode(const BaseNodeT &nodePtr) +{ + return nodePtr.op; +} + +inline PrimType GetPrimType(const BaseNodeT &nodePtr) +{ + return nodePtr.ptyp; +} + +inline uint32 GetOperandsNum(const BaseNodeT &nodePtr) +{ + return nodePtr.numOpnds; +} + +using UnaryNodeT = BaseNodeT; // alias +struct TypecvtNodeT : public BaseNodeT { // 8B + PrimType fromPTyp; + uint8 fromTypeFlag; // a flag to speed up type related operations + uint8 padding[2]; + PrimType FromType() const + { + return fromPTyp; + } +}; + +struct ExtractbitsNodeT : public BaseNodeT { // 8B + uint8 bOffset; + uint8 bSize; + uint16 padding; +}; + +struct IreadoffNodeT : public BaseNodeT { // 8B + int32 offset; +}; + +using BinaryNodeT = BaseNodeT; +// Add expression types to compare node, to +// facilitate the evaluation of postorder stored kCmpl +// Note: the two operands should have the same type if they're +// not dynamic types +struct CompareNodeT : public BaseNodeT { // 8B + PrimType opndType; // type of operands. + uint8 opndTypeFlag; // typeFlag of opntype. + uint8 padding[2]; // every compare node has two opnds. +}; + +using TernaryNodeT = BaseNodeT; +using NaryNodeT = BaseNodeT; +// need to guarantee MIRIntrinsicID is 4B +// Note: this is not supported by c++0x +struct IntrinsicopNodeT : public BaseNodeT { // 8B + MIRIntrinsicID intrinsic; +}; + +struct ConstvalNodeT : public BaseNodeT { // 4B + 8B const value + MirConstT *Constval() const + { + auto *tempPtr = const_cast(this); + return (reinterpret_cast(reinterpret_cast(tempPtr) + sizeof(ConstvalNodeT))); + } +}; + +// full MIR exported a pointer to MirConstT +inline MirConstT *GetConstval(const ConstvalNodeT &node) +{ + return node.Constval(); +} + +// SizeoftypeNode shouldn't be seen here +// ArrayNode shouldn't be seen here +struct AddrofNodeT : public BaseNodeT { // 12B + StIdx stIdx; + FieldID fieldID; +}; + +using DreadNodeT = AddrofNodeT; // same shape. +struct AddroffuncNodeT : public BaseNodeT { // 8B + PUIdx puIdx; // 32bit now +}; + +struct RegreadNodeT : public BaseNodeT { // 8B + PregIdx regIdx; // 32bit, negative if special register +}; + +struct AddroflabelNodeT : public BaseNodeT { // 8B + uint32 offset; +}; +} // namespace maple +#endif // MAPLE_INCLUDE_VM_CMPL_V2 diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/debug_info.h b/ecmascript/compiler/codegen/maple/maple_ir/include/debug_info.h new file mode 100644 index 0000000000000000000000000000000000000000..1ff8ce66d61e05c99fb32bb9c627894fc43312e8 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/debug_info.h @@ -0,0 +1,865 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_DBG_INFO_H +#define MAPLE_IR_INCLUDE_DBG_INFO_H +#include + +#include "mpl_logging.h" +#include "types_def.h" +#include "prim_types.h" +#include "mir_nodes.h" +#include "mir_scope.h" +#include "namemangler.h" +#include "lexer.h" +#include "dwarf.h" + +namespace maple { +// for more color code: http://ascii-table.com/ansi-escape-sequences.php +#define RESET "\x1B[0m" +#define BOLD "\x1B[1m" +#define RED "\x1B[31m" +#define GRN "\x1B[32m" +#define YEL "\x1B[33m" + +const uint32 kDbgDefaultVal = 0xdeadbeef; +#define HEX(val) std::hex << "0x" << val << std::dec + +class MIRModule; +class MIRType; +class MIRSymbol; +class MIRSymbolTable; +class MIRTypeNameTable; +class DBGBuilder; +class DBGCompileMsgInfo; +class MIRLexer; + +// for compiletime warnings +class DBGLine { +public: + DBGLine(uint32 lnum, const char *l) : lineNum(lnum), codeLine(l) {} + virtual ~DBGLine() {} + + void Dump() + { + LogInfo::MapleLogger() << "LINE: " << lineNum << " " << codeLine << std::endl; + } + +private: + uint32 lineNum; + const char *codeLine; +}; + +#define MAXLINELEN 4096 + +class DBGCompileMsgInfo { +public: + DBGCompileMsgInfo(); + virtual ~DBGCompileMsgInfo() {} + void ClearLine(uint32 n); + void SetErrPos(uint32 lnum, uint32 cnum); + void UpdateMsg(uint32 lnum, const char *line); + void EmitMsg(); + +private: + uint32 startLine; // mod 3 + uint32 errLNum; + uint32 errCNum; + uint32 errPos; + uint32 lineNum[3]; + uint8 codeLine[3][MAXLINELEN]; // 3 round-robin line buffers +}; + +enum DBGDieKind { kDwTag, kDwAt, kDwOp, kDwAte, kDwForm, kDwCfa }; + +typedef uint32 DwTag; // for DW_TAG_* +typedef uint32 DwAt; // for DW_AT_* +typedef uint32 DwOp; // for DW_OP_* +typedef uint32 DwAte; // for DW_ATE_* +typedef uint32 DwForm; // for DW_FORM_* +typedef uint32 DwCfa; // for DW_CFA_* + +class DBGDieAttr; + +class DBGExpr { +public: + explicit DBGExpr(MIRModule *m) : dwOp(0), value(kDbgDefaultVal), opnds(m->GetMPAllocator().Adapter()) {} + + DBGExpr(MIRModule *m, DwOp op) : dwOp(op), value(kDbgDefaultVal), opnds(m->GetMPAllocator().Adapter()) {} + + virtual ~DBGExpr() {} + + void AddOpnd(uint64 val) + { + opnds.push_back(val); + } + + int GetVal() const + { + return value; + } + + void SetVal(int v) + { + value = v; + } + + DwOp GetDwOp() const + { + return dwOp; + } + + void SetDwOp(DwOp op) + { + dwOp = op; + } + + MapleVector &GetOpnd() + { + return opnds; + } + + size_t GetOpndSize() const + { + return opnds.size(); + } + + void Clear() + { + return opnds.clear(); + } + +private: + DwOp dwOp; + // for local var fboffset, global var strIdx + int value; + MapleVector opnds; +}; + +class DBGExprLoc { +public: + explicit DBGExprLoc(MIRModule *m) : module(m), exprVec(m->GetMPAllocator().Adapter()), symLoc(nullptr) + { + simpLoc = m->GetMemPool()->New(module); + } + + DBGExprLoc(MIRModule *m, DwOp op) : module(m), exprVec(m->GetMPAllocator().Adapter()), symLoc(nullptr) + { + simpLoc = m->GetMemPool()->New(module, op); + } + + virtual ~DBGExprLoc() {} + + bool IsSimp() const + { + return (exprVec.size() == 0 && simpLoc->GetVal() != static_cast(kDbgDefaultVal)); + } + + int GetFboffset() const + { + return simpLoc->GetVal(); + } + + void SetFboffset(int offset) + { + simpLoc->SetVal(offset); + } + + int GetGvarStridx() const + { + return simpLoc->GetVal(); + } + + void SetGvarStridx(int idx) + { + simpLoc->SetVal(idx); + } + + DwOp GetOp() const + { + return simpLoc->GetDwOp(); + } + + uint32 GetSize() const + { + return static_cast(simpLoc->GetOpndSize()); + } + + void ClearOpnd() + { + simpLoc->Clear(); + } + + void AddSimpLocOpnd(uint64 val) + { + simpLoc->AddOpnd(val); + } + + DBGExpr *GetSimpLoc() const + { + return simpLoc; + } + + void *GetSymLoc() + { + return symLoc; + } + + void SetSymLoc(void *loc) + { + symLoc = loc; + } + + void Dump(); + +private: + MIRModule *module; + DBGExpr *simpLoc; + MapleVector exprVec; + void *symLoc; +}; + +class DBGDieAttr { +public: + size_t SizeOf(DBGDieAttr *attr); + explicit DBGDieAttr(DBGDieKind k) : dieKind(k), dwAttr(DW_AT_deleted), dwForm(DW_FORM_GNU_strp_alt) + { + value.u = kDbgDefaultVal; + } + + virtual ~DBGDieAttr() {} + + void AddSimpLocOpnd(uint64 val) + { + value.ptr->AddSimpLocOpnd(val); + } + + void ClearSimpLocOpnd() + { + value.ptr->ClearOpnd(); + } + + void Dump(int indent); + + DBGDieKind GetKind() const + { + return dieKind; + } + + void SetKind(DBGDieKind kind) + { + dieKind = kind; + } + + DwAt GetDwAt() const + { + return dwAttr; + } + + void SetDwAt(DwAt at) + { + dwAttr = at; + } + + DwForm GetDwForm() const + { + return dwForm; + } + + void SetDwForm(DwForm form) + { + dwForm = form; + } + + int32 GetI() const + { + return value.i; + } + + void SetI(int32 val) + { + value.i = val; + } + + uint32 GetId() const + { + return value.id; + } + + void SetId(uint32 val) + { + value.id = val; + } + + int64 GetJ() const + { + return value.j; + } + + void SetJ(int64 val) + { + value.j = val; + } + + uint64 GetU() const + { + return value.u; + } + + void SetU(uint64 val) + { + value.u = val; + } + + float GetF() const + { + return value.f; + } + + void SetF(float val) + { + value.f = val; + } + + double GetD() const + { + return value.d; + } + + void SetD(double val) + { + value.d = val; + } + + DBGExprLoc *GetPtr() + { + return value.ptr; + } + + void SetPtr(DBGExprLoc *val) + { + value.ptr = val; + } + +private: + DBGDieKind dieKind; + DwAt dwAttr; + DwForm dwForm; // type for the attribute value + union { + int32 i; + uint32 id; // dieId when dwForm is of DW_FORM_ref + // strIdx when dwForm is of DW_FORM_string + int64 j; + uint64 u; + float f; + double d; + + DBGExprLoc *ptr; + } value; +}; + +class DBGDie { +public: + DBGDie(MIRModule *m, DwTag tag); + virtual ~DBGDie() {} + void AddAttr(DBGDieAttr *attr); + void AddSubVec(DBGDie *die); + + DBGDieAttr *AddAttr(DwAt attr, DwForm form, uint64 val); + DBGDieAttr *AddSimpLocAttr(DwAt at, DwForm form, uint64 val); + DBGDieAttr *AddGlobalLocAttr(DwAt at, DwForm form, uint64 val); + DBGDieAttr *AddFrmBaseAttr(DwAt at, DwForm form); + DBGExprLoc *GetExprLoc(); + bool SetAttr(DwAt attr, uint64 val); + bool SetAttr(DwAt attr, int64 val); + bool SetAttr(DwAt attr, uint32 val); + bool SetAttr(DwAt attr, int32 val); + bool SetAttr(DwAt attr, float val); + bool SetAttr(DwAt attr, double val); + bool SetSimpLocAttr(DwAt attr, int64 val); + bool SetAttr(DwAt attr, DBGExprLoc *ptr); + void ResetParentDie(); + void Dump(int indent); + + uint32 GetId() const + { + return id; + } + + void SetId(uint32 val) + { + id = val; + } + + DwTag GetTag() const + { + return tag; + } + + void SetTag(DwTag val) + { + tag = val; + } + + bool GetWithChildren() const + { + return withChildren; + } + + void SetWithChildren(bool val) + { + withChildren = val; + } + + DBGDie *GetParent() const + { + return parent; + } + + void SetParent(DBGDie *val) + { + parent = val; + } + + DBGDie *GetSibling() const + { + return sibling; + } + + void SetSibling(DBGDie *val) + { + sibling = val; + } + + DBGDie *GetFirstChild() const + { + return firstChild; + } + + void SetFirstChild(DBGDie *val) + { + firstChild = val; + } + + uint32 GetAbbrevId() const + { + return abbrevId; + } + + void SetAbbrevId(uint32 val) + { + abbrevId = val; + } + + uint32 GetTyIdx() const + { + return tyIdx; + } + + void SetTyIdx(uint32 val) + { + tyIdx = val; + } + + uint32 GetOffset() const + { + return offset; + } + + void SetOffset(uint32 val) + { + offset = val; + } + + uint32 GetSize() const + { + return size; + } + + void SetSize(uint32 val) + { + size = val; + } + + const MapleVector &GetAttrVec() const + { + return attrVec; + } + + MapleVector &GetAttrVec() + { + return attrVec; + } + + const MapleVector &GetSubDieVec() const + { + return subDieVec; + } + + MapleVector &GetSubDieVec() + { + return subDieVec; + } + + uint32 GetSubDieVecSize() const + { + return static_cast(subDieVec.size()); + } + + DBGDie *GetSubDieVecAt(uint32 i) const + { + return subDieVec[i]; + } + +private: + MIRModule *module; + DwTag tag; + uint32 id; // starts from 1 which is root die compUnit + bool withChildren; + DBGDie *parent; + DBGDie *sibling; + DBGDie *firstChild; + uint32 abbrevId; // id in .debug_abbrev + uint32 tyIdx; // for type TAG + uint32 offset; // Dwarf CU relative offset + uint32 size; // DIE Size in .debug_info + MapleVector attrVec; + MapleVector subDieVec; +}; + +class DBGAbbrevEntry { +public: + DBGAbbrevEntry(MIRModule *m, DBGDie *die); + virtual ~DBGAbbrevEntry() {} + bool Equalto(DBGAbbrevEntry *entry); + void Dump(int indent); + + DwTag GetTag() const + { + return tag; + } + + void SetTag(DwTag val) + { + tag = val; + } + + uint32 GetAbbrevId() const + { + return abbrevId; + } + + void SetAbbrevId(uint32 val) + { + abbrevId = val; + } + + bool GetWithChildren() const + { + return withChildren; + } + + void SetWithChildren(bool val) + { + withChildren = val; + } + + MapleVector &GetAttrPairs() + { + return attrPairs; + } + +private: + DwTag tag; + uint32 abbrevId; + bool withChildren; + MapleVector attrPairs; // kDwAt kDwForm pairs +}; + +class DBGAbbrevEntryVec { +public: + DBGAbbrevEntryVec(MIRModule *m, DwTag tag) : tag(tag), entryVec(m->GetMPAllocator().Adapter()) {} + + virtual ~DBGAbbrevEntryVec() {} + + uint32 GetId(MapleVector &attrs); + void Dump(int indent); + + DwTag GetTag() const + { + return tag; + } + + void SetTag(DwTag val) + { + tag = val; + } + + const MapleVector &GetEntryvec() const + { + return entryVec; + } + + MapleVector &GetEntryvec() + { + return entryVec; + } + +private: + DwTag tag; + MapleVector entryVec; +}; + +class DebugInfo { +public: + DebugInfo(MIRModule *m) + : module(m), + compUnit(nullptr), + dummyTypeDie(nullptr), + lexer(nullptr), + maxId(1), + builder(nullptr), + mplSrcIdx(0), + debugInfoLength(0), + curFunction(nullptr), + compileMsg(nullptr), + parentDieStack(m->GetMPAllocator().Adapter()), + idDieMap(std::less(), m->GetMPAllocator().Adapter()), + abbrevVec(m->GetMPAllocator().Adapter()), + tagAbbrevMap(std::less(), m->GetMPAllocator().Adapter()), + tyIdxDieIdMap(std::less(), m->GetMPAllocator().Adapter()), + stridxDieIdMap(std::less(), m->GetMPAllocator().Adapter()), + funcDefStrIdxDieIdMap(std::less(), m->GetMPAllocator().Adapter()), + typeDefTyIdxMap(std::less(), m->GetMPAllocator().Adapter()), + pointedPointerMap(std::less(), m->GetMPAllocator().Adapter()), + funcLstrIdxDieIdMap(std::less(), m->GetMPAllocator().Adapter()), + funcLstrIdxLabIdxMap(std::less(), m->GetMPAllocator().Adapter()), + strps(std::less(), m->GetMPAllocator().Adapter()) + { + /* valid entry starting from index 1 as abbrevid starting from 1 as well */ + abbrevVec.push_back(nullptr); + InitMsg(); + varPtrPrefix = std::string(namemangler::kPtrPrefixStr); + } + + virtual ~DebugInfo() {} + + void InitMsg() + { + compileMsg = module->GetMemPool()->New(); + } + + void UpdateMsg(uint32 lnum, const char *line) + { + compileMsg->UpdateMsg(lnum, line); + } + + void SetErrPos(uint32 lnum, uint32 cnum) + { + compileMsg->SetErrPos(lnum, cnum); + } + + void EmitMsg() + { + compileMsg->EmitMsg(); + } + + DBGDie *GetDie(uint32 id) + { + return idDieMap[id]; + } + + DBGDie *GetDummyTypeDie() + { + return dummyTypeDie; + } + + DBGDie *GetDie(const MIRFunction *func); + + void Init(); + void Finish(); + void SetupCU(); + void BuildDebugInfo(); + void Dump(int indent); + + // build tree to populate withChildren, sibling, firstChild + // also insert DW_AT_sibling attributes when needed + void BuildDieTree(); + + // replace type idx with die id in DW_AT_type attributes + void FillTypeAttrWithDieId(); + + void BuildAbbrev(); + uint32 GetAbbrevId(DBGAbbrevEntryVec *, DBGAbbrevEntry *); + + void SetLocalDie(GStrIdx strIdx, const DBGDie *die); + void SetLocalDie(MIRFunction *func, GStrIdx strIdx, const DBGDie *die); + DBGDie *GetLocalDie(GStrIdx strIdx); + DBGDie *GetLocalDie(MIRFunction *func, GStrIdx strIdx); + + LabelIdx GetLabelIdx(GStrIdx strIdx); + LabelIdx GetLabelIdx(MIRFunction *func, GStrIdx strIdx); + void SetLabelIdx(GStrIdx strIdx, LabelIdx idx); + void SetLabelIdx(MIRFunction *func, GStrIdx strIdx, LabelIdx idx); + + uint32 GetMaxId() const + { + return maxId; + } + + uint32 GetIncMaxId() + { + return maxId++; + } + + DBGDie *GetIdDieMapAt(uint32 i) + { + return idDieMap[i]; + } + + void SetIdDieMap(uint32 i, DBGDie *die) + { + idDieMap[i] = die; + } + + size_t GetParentDieSize() const + { + return parentDieStack.size(); + } + + DBGDie *GetParentDie() + { + return parentDieStack.top(); + } + + void PushParentDie(DBGDie *die) + { + parentDieStack.push(die); + } + + void PopParentDie() + { + parentDieStack.pop(); + } + + void ResetParentDie() + { + parentDieStack.clear(); + parentDieStack.push(compUnit); + } + + void AddStrps(uint32 val) + { + strps.insert(val); + } + + MapleSet &GetStrps() + { + return strps; + } + + uint32 GetDebugInfoLength() const + { + return debugInfoLength; + } + + MapleVector &GetAbbrevVec() + { + return abbrevVec; + } + + DBGDie *GetCompUnit() const + { + return compUnit; + } + + MIRFunction *GetCurFunction() + { + return curFunction; + } + + void SetCurFunction(MIRFunction *func) + { + curFunction = func; + } + + void SetTyidxDieIdMap(const TyIdx tyIdx, const DBGDie *die) + { + tyIdxDieIdMap[tyIdx.GetIdx()] = die->GetId(); + } + + DBGDieAttr *CreateAttr(DwAt attr, DwForm form, uint64 val); + + DBGDie *CreateVarDie(MIRSymbol *sym); + DBGDie *CreateVarDie(MIRSymbol *sym, GStrIdx strIdx); // use alt name + DBGDie *CreateFormalParaDie(MIRFunction *func, MIRType *type, MIRSymbol *sym); + DBGDie *CreateFieldDie(maple::FieldPair pair, uint32 lnum); + DBGDie *CreateBitfieldDie(const MIRBitFieldType *type, GStrIdx idx, uint32 prevBits); + DBGDie *CreateStructTypeDie(GStrIdx strIdx, const MIRStructType *type, bool update = false); + DBGDie *CreateClassTypeDie(GStrIdx strIdx, const MIRClassType *type); + DBGDie *CreateInterfaceTypeDie(GStrIdx strIdx, const MIRInterfaceType *type); + DBGDie *CreatePointedFuncTypeDie(MIRFuncType *func); + + DBGDie *GetOrCreateLabelDie(LabelIdx labid); + DBGDie *GetOrCreateTypeAttrDie(MIRSymbol *sym); + DBGDie *GetOrCreateConstTypeDie(TypeAttrs attr, DBGDie *typedie); + DBGDie *GetOrCreateVolatileTypeDie(TypeAttrs attr, DBGDie *typedie); + DBGDie *GetOrCreateFuncDeclDie(MIRFunction *func); + DBGDie *GetOrCreateFuncDefDie(MIRFunction *func, uint32 lnum); + DBGDie *GetOrCreatePrimTypeDie(MIRType *ty); + DBGDie *GetOrCreateTypeDie(MIRType *type); + DBGDie *GetOrCreatePointTypeDie(const MIRPtrType *type); + DBGDie *GetOrCreateArrayTypeDie(const MIRArrayType *type); + DBGDie *GetOrCreateStructTypeDie(const MIRType *type); + + void AddAliasDies(MapleMap &aliasMap); + void AddScopeDie(MIRScope *scope); + + // Functions for calculating the size and offset of each DW_TAG_xxx and DW_AT_xxx + void ComputeSizeAndOffsets(); + void ComputeSizeAndOffset(DBGDie *die, uint32 &offset); + +private: + MIRModule *module; + DBGDie *compUnit; // root die: compilation unit + DBGDie *dummyTypeDie; // workaround for unknown types + MIRLexer *lexer; + uint32 maxId; + DBGBuilder *builder; + GStrIdx mplSrcIdx; + uint32 debugInfoLength; + MIRFunction *curFunction; + + // for compilation messages + DBGCompileMsgInfo *compileMsg; + + MapleStack parentDieStack; + MapleMap idDieMap; + MapleVector abbrevVec; // valid entry starting from index 1 + MapleMap tagAbbrevMap; + + // to be used when derived type references a base type die + MapleMap tyIdxDieIdMap; + MapleMap stridxDieIdMap; + MapleMap funcDefStrIdxDieIdMap; + MapleMap typeDefTyIdxMap; // prevtyIdxtypidx_map + MapleMap pointedPointerMap; + MapleMap> funcLstrIdxDieIdMap; + MapleMap> funcLstrIdxLabIdxMap; + MapleSet strps; + std::string varPtrPrefix; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_DBG_INFO_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/dex2mpl/dexintrinsic.def b/ecmascript/compiler/codegen/maple/maple_ir/include/dex2mpl/dexintrinsic.def new file mode 100644 index 0000000000000000000000000000000000000000..9a338a28ca7828552a3d3d3c66bc250923ffc84b --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/dex2mpl/dexintrinsic.def @@ -0,0 +1,17 @@ +DEF_MIR_INTRINSIC(JAVA_INTERFACE_CALL,\ + "__dex_interface_call", INTRNISJAVA, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_PRINT,\ + "printf", INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CLINIT_CHECK_SGET,\ + "__dex_clinit_check_sget", INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CLINIT_CHECK_SPUT,\ + "__dex__clinit_check_sput", INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CLINIT_CHECK_NEW,\ + "__dex_clinit_check_new", INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_STR_TO_JSTR,\ + "__dex_str_to_jstr", INTRNISJAVA, kArgTyPtr, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +// __dex_random is used to generate a random value used in callback cfg +DEF_MIR_INTRINSIC(JAVA_RANDOM,\ + "__dex_random", INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_THROW_CLASSCAST,\ + "MCC_ThrowClassCastException", INTRNISJAVA | INTRNNEVERRETURN, kArgTyVoid, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/dwarf.def b/ecmascript/compiler/codegen/maple/maple_ir/include/dwarf.def new file mode 100644 index 0000000000000000000000000000000000000000..0df9b9317c02912004705916d9c462ae551c8eec --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/dwarf.def @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// TODO: Add other DW-based macros. +#if !( \ + defined DW_TAG || defined DW_AT || defined DW_FORM || \ + defined DW_OP || defined DW_LANG || defined DW_ATE) +#error "Missing definition of DW*" +#endif + +#ifndef DW_TAG +#define DW_TAG(ID, NAME) +#endif + +#ifndef DW_AT +#define DW_AT(ID, NAME) +#endif + +#ifndef DW_FORM +#define DW_FORM(ID, NAME) +#endif + +#ifndef DW_OP +#define DW_OP(ID, NAME) +#endif + +#ifndef DW_LANG +#define DW_LANG(ID, NAME, LOWER_BOUND) +#endif + +#ifndef DW_ATE +#define DW_ATE(ID, NAME) +#endif + +// Tag +DW_TAG(0x0000, null) +DW_TAG(0x0001, array_type) +DW_TAG(0x0002, class_type) +DW_TAG(0x0004, enumeration_type) +DW_TAG(0x0005, formal_parameter) +DW_TAG(0x000a, label) +DW_TAG(0x000b, lexical_block) +DW_TAG(0x000d, member) +DW_TAG(0x000f, pointer_type) +DW_TAG(0x0011, compile_unit) +DW_TAG(0x0013, structure_type) +DW_TAG(0x0015, subroutine_type) +DW_TAG(0x0016, typedef) +DW_TAG(0x0017, union_type) +DW_TAG(0x0018, unspecified_parameters) +DW_TAG(0x001c, inheritance) +DW_TAG(0x0021, subrange_type) +DW_TAG(0x0024, base_type) +DW_TAG(0x0026, const_type) +DW_TAG(0x0028, enumerator) +DW_TAG(0x002e, subprogram) +DW_TAG(0x0034, variable) +DW_TAG(0x0035, volatile_type) +// New in DWARF v3 +DW_TAG(0x0038, interface_type) + +// Attributes +DW_AT(0x01, sibling) +DW_AT(0x02, location) +DW_AT(0x03, name) +DW_AT(0x0b, byte_size) +DW_AT(0x0c, bit_offset) +DW_AT(0x0d, bit_size) +DW_AT(0x10, stmt_list) +DW_AT(0x11, low_pc) +DW_AT(0x12, high_pc) +DW_AT(0x13, language) +DW_AT(0x1b, comp_dir) +DW_AT(0x1c, const_value) +DW_AT(0x25, producer) +DW_AT(0x27, prototyped) +DW_AT(0x2f, upper_bound) +DW_AT(0x32, accessibility) +DW_AT(0x38, data_member_location) +DW_AT(0x39, decl_column) +DW_AT(0x3a, decl_file) +DW_AT(0x3b, decl_line) +DW_AT(0x3e, encoding) +DW_AT(0x3f, external) +DW_AT(0x40, frame_base) +DW_AT(0x47, specification) +DW_AT(0x49, type) +// New in DWARF v3 +DW_AT(0x64, object_pointer) +// New in DWARF v5 +DW_AT(0x8a, deleted) +// Vendor extensions +DW_AT (0x2116, GNU_all_tail_call_sites) + +// Attribute form encodings +DW_FORM(0x01, addr) +DW_FORM(0x05, data2) +DW_FORM(0x06, data4) +DW_FORM(0x07, data8) +DW_FORM(0x08, string) +DW_FORM(0x0b, data1) +DW_FORM(0x0c, flag) +DW_FORM(0x0e, strp) +DW_FORM(0x10, ref_addr) +DW_FORM(0x11, ref1) +DW_FORM(0x12, ref2) +DW_FORM(0x13, ref4) +DW_FORM(0x14, ref8) +// New in DWARF v4 +DW_FORM(0x17, sec_offset) +DW_FORM(0x18, exprloc) +DW_FORM(0x19, flag_present) +// This was defined out of sequence. +DW_FORM(0x20, ref_sig8) +// Alternate debug sections proposal (output of "dwz" tool). +DW_FORM(0x1f20, GNU_ref_alt) +DW_FORM(0x1f21, GNU_strp_alt) + +// DWARF Expression operators. +DW_OP(0x03, addr) +DW_OP(0x70, breg0) +DW_OP(0x71, breg1) +DW_OP(0x72, breg2) +DW_OP(0x73, breg3) +DW_OP(0x74, breg4) +DW_OP(0x75, breg5) +DW_OP(0x76, breg6) +DW_OP(0x77, breg7) +DW_OP(0x91, fbreg) +// New in DWARF v3 +DW_OP(0x9c, call_frame_cfa) + +// DWARF languages. +DW_LANG(0x000c, C99, 0) + +// DWARF attribute type encodings. +DW_ATE(0x01, address) +DW_ATE(0x02, boolean) +DW_ATE(0x03, complex_float) +DW_ATE(0x04, float) +DW_ATE(0x05, signed) +DW_ATE(0x06, signed_char) +DW_ATE(0x07, unsigned) +DW_ATE(0x08, unsigned_char) + +#undef DW_TAG +#undef DW_AT +#undef DW_FORM +#undef DW_OP +#undef DW_LANG +#undef DW_ATE diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/dwarf.h b/ecmascript/compiler/codegen/maple/maple_ir/include/dwarf.h new file mode 100644 index 0000000000000000000000000000000000000000..b5166e19bccffd064eee6cb39a77306bae73971e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/dwarf.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_DWARF_H +#define MAPLE_IR_INCLUDE_DWARF_H + +#include + +enum Tag : uint16_t { +#define DW_TAG(ID, NAME) DW_TAG_##NAME = (ID), +#include "dwarf.def" + DW_TAG_lo_user = 0x4080, + DW_TAG_hi_user = 0xffff, + DW_TAG_user_base = 0x1000 +}; + +enum Attribute : uint16_t { +#define DW_AT(ID, NAME) DW_AT_##NAME = (ID), +#include "dwarf.def" + DW_AT_lo_user = 0x2000, + DW_AT_hi_user = 0x3fff, +}; + +enum Form : uint16_t { +#define DW_FORM(ID, NAME) DW_FORM_##NAME = (ID), +#include "dwarf.def" + DW_FORM_lo_user = 0x1f00, +}; + +enum LocationAtom { +#define DW_OP(ID, NAME) DW_OP_##NAME = (ID), +#include "dwarf.def" + DW_OP_lo_user = 0xe0, + DW_OP_hi_user = 0xff, +}; + +enum TypeKind : uint8_t { +#define DW_ATE(ID, NAME) DW_ATE_##NAME = (ID), +#include "dwarf.def" + DW_ATE_lo_user = 0x80, + DW_ATE_hi_user = 0xff, + DW_ATE_void = 0x20 +}; + +enum AccessAttribute { DW_ACCESS_public = 0x01, DW_ACCESS_protected = 0x02, DW_ACCESS_private = 0x03 }; + +enum SourceLanguage { +#define DW_LANG(ID, NAME, LOWER_BOUND) DW_LANG_##NAME = (ID), +#include "dwarf.def" + DW_LANG_lo_user = 0x8000, + DW_LANG_hi_user = 0xffff +}; + +#endif diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/func_desc.h b/ecmascript/compiler/codegen/maple/maple_ir/include/func_desc.h new file mode 100644 index 0000000000000000000000000000000000000000..2b01485a718315dbbc514dc178a3ec887fe38c07 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/func_desc.h @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_FUNC_DESC_H +#define MAPLE_IR_INCLUDE_FUNC_DESC_H +#include "mpl_logging.h" +namespace maple { + +enum class FI { + kUnknown = 0, + kPure, // means this function will not modify any global memory. + kConst, // means this function will not read/modify any global memory. +}; + +static std::string kFIStr[] = {"kUnknown", "kPure", "kConst"}; + +enum class RI { + kUnknown = 0, // for ptr value, don't know anything. + kNoAlias, // for ptr value, no alias with any other ptr when this method is returned. As in malloc. + kAliasParam0, // for ptr value, it may alias with first param. As in memcpy. + kAliasParam1, + kAliasParam2, + kAliasParam3, + kAliasParam4, + kAliasParam5, +}; + +static std::string kRIStr[] = {"kUnknown", "kNoAlias", "kAliasParam0", "kAliasParam1", + "kAliasParam2", "kAliasParam3", "kAliasParam4", "kAliasParam5"}; + +enum class PI { + kUnknown = 0, // for ptr param, may read/write every level memory. + kReadWriteMemory, // for ptr param, only read & write the memory it points to. + kWriteMemoryOnly, // for ptr param, only write the memory it points to. + kReadMemoryOnly, // for ptr param, only read the memory it points to. + kReadSelfOnly, // for ptr param, only read the ptr itself, do not dereference. + kUnused, // this param is not used in this function. +}; + +static std::string kPIStr[] = {"kUnknown", "kReadWriteMemory", "kWriteMemoryOnly", + "kReadMemoryOnly", "kReadSelfOnly", "kUnused"}; + +// most function has less than 6 parameters. +const size_t kMaxParamCount = 6; +struct FuncDesc { + FI funcInfo {}; + RI returnInfo {}; + PI paramInfo[kMaxParamCount] {}; + bool configed = false; + + void InitToBest() + { + funcInfo = FI::kConst; + returnInfo = RI::kNoAlias; + for (size_t idx = 0; idx < kMaxParamCount; ++idx) { + paramInfo[idx] = PI::kUnused; + } + } + + bool Equals(const FuncDesc &desc) const + { + if (funcInfo != desc.funcInfo) { + return false; + } + if (returnInfo != desc.returnInfo) { + return false; + } + for (size_t idx = 0; idx < kMaxParamCount; ++idx) { + if (paramInfo[idx] != desc.paramInfo[idx]) { + return false; + } + } + return true; + } + + bool IsConfiged() const + { + return configed; + } + + void SetConfiged() + { + configed = true; + } + + bool IsConst() const + { + return funcInfo == FI::kConst; + } + + bool IsPure() const + { + return funcInfo == FI::kPure; + } + + bool IsReturnNoAlias() const + { + return returnInfo == RI::kNoAlias; + } + bool IsReturnAlias() const + { + return returnInfo >= RI::kAliasParam0; + } + + size_t EnumToIndex(const RI &ri) const + { + switch (ri) { + case RI::kAliasParam0: + return 0; + case RI::kAliasParam1: + return 1; + case RI::kAliasParam2: + return 2; + case RI::kAliasParam3: + return 3; + case RI::kAliasParam4: + return 4; + case RI::kAliasParam5: + return 5; + default: { + CHECK_FATAL(false, "Impossible."); + } + } + } + + size_t ReturnParamX() const + { + CHECK_FATAL(returnInfo >= RI::kAliasParam0, "Impossible."); + return EnumToIndex(returnInfo); + } + + PI GetParamInfo(size_t index) const + { + return paramInfo[index]; + } + + bool IsArgReadSelfOnly(size_t index) const + { + if (index >= kMaxParamCount) { + return false; + } + return paramInfo[index] == PI::kReadSelfOnly; + } + + bool IsArgReadMemoryOnly(size_t index) const + { + if (index >= kMaxParamCount) { + return false; + } + return paramInfo[index] == PI::kReadMemoryOnly; + } + + bool IsArgWriteMemoryOnly(size_t index) const + { + if (index >= kMaxParamCount) { + return false; + } + return paramInfo[index] == PI::kWriteMemoryOnly; + } + + bool IsArgUnused(size_t index) const + { + if (index >= kMaxParamCount) { + return false; + } + return paramInfo[index] == PI::kUnused; + } + + void SetFuncInfo(const FI fi) + { + funcInfo = fi; + } + + void SetFuncInfoNoBetterThan(const FI fi) + { + auto oldValue = static_cast(funcInfo); + auto newValue = static_cast(fi); + if (newValue < oldValue) { + SetFuncInfo(static_cast(newValue)); + } + } + + void SetReturnInfo(const RI ri) + { + returnInfo = ri; + } + + void SetParamInfo(const size_t idx, const PI pi) + { + if (idx >= kMaxParamCount) { + return; + } + paramInfo[idx] = pi; + } + + void SetParamInfoNoBetterThan(const size_t idx, const PI pi) + { + size_t oldValue = static_cast(paramInfo[idx]); + size_t newValue = static_cast(pi); + if (newValue < oldValue) { + SetParamInfo(idx, static_cast(newValue)); + } + } + + void Dump(size_t numParam = kMaxParamCount) + { + auto dumpCount = numParam > kMaxParamCount ? kMaxParamCount : numParam; + LogInfo::MapleLogger() << kFIStr[static_cast(funcInfo)] << " " + << kRIStr[static_cast(returnInfo)]; + for (size_t i = 0; i < dumpCount; ++i) { + LogInfo::MapleLogger() << " " << kPIStr[static_cast(paramInfo[i])]; + } + LogInfo::MapleLogger() << "\n"; + } +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_FUNC_DESC_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/global_tables.h b/ecmascript/compiler/codegen/maple/maple_ir/include/global_tables.h new file mode 100644 index 0000000000000000000000000000000000000000..46718f624b33762ea17fdc15e800632dde01093b --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/global_tables.h @@ -0,0 +1,1064 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_GLOBAL_TABLES_H +#define MAPLE_IR_INCLUDE_GLOBAL_TABLES_H +#include +#include +#include +#include +#include +#include "thread_env.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "types_def.h" +#include "prim_types.h" +#include "mir_module.h" +#include "namemangler.h" +#include "mir_type.h" +#include "mir_const.h" + +namespace maple { +using TyIdxFieldAttrPair = std::pair; +using FieldPair = std::pair; +using FieldVector = std::vector; + +class BinaryMplImport; // circular dependency exists, no other choice + +// to facilitate the use of unordered_map +class TyIdxHash { +public: + std::size_t operator()(const TyIdx &tyIdx) const + { + return std::hash {}(tyIdx); + } +}; + +// to facilitate the use of unordered_map +class GStrIdxHash { +public: + std::size_t operator()(const GStrIdx &gStrIdx) const + { + return std::hash {}(gStrIdx); + } +}; + +// to facilitate the use of unordered_map +class UStrIdxHash { +public: + std::size_t operator()(const UStrIdx &uStrIdx) const + { + return std::hash {}(uStrIdx); + } +}; + +class IntConstKey { + friend class IntConstHash; + friend class IntConstCmp; + +public: + IntConstKey(int64 v, TyIdx tyIdx) : val(v), tyIdx(tyIdx) {} + virtual ~IntConstKey() {} + +private: + int64 val; + TyIdx tyIdx; +}; + +class IntConstHash { +public: + std::size_t operator()(const IntConstKey &key) const + { + return std::hash {}(key.val) ^ (std::hash {}(static_cast(key.tyIdx)) << 1); + } +}; + +class IntConstCmp { +public: + bool operator()(const IntConstKey &lkey, const IntConstKey &rkey) const + { + return lkey.val == rkey.val && lkey.tyIdx == rkey.tyIdx; + } +}; + +class TypeTable { + friend BinaryMplImport; + +public: + static MIRType *voidPtrType; + + TypeTable(); + TypeTable(const TypeTable &) = delete; + TypeTable &operator=(const TypeTable &) = delete; + ~TypeTable(); + + void Init(); + void Reset(); + void ReleaseTypes(); + + std::vector &GetTypeTable() + { + return typeTable; + } + + const std::vector &GetTypeTable() const + { + return typeTable; + } + + auto &GetTypeHashTable() const + { + return typeHashTable; + } + + auto &GetPtrTypeMap() const + { + return ptrTypeMap; + } + + auto &GetRefTypeMap() const + { + return refTypeMap; + } + + MIRType *GetTypeFromTyIdx(TyIdx tyIdx) + { + return const_cast(const_cast(this)->GetTypeFromTyIdx(tyIdx)); + } + const MIRType *GetTypeFromTyIdx(TyIdx tyIdx) const + { + CHECK_FATAL(tyIdx < typeTable.size(), "array index out of range"); + return typeTable.at(tyIdx); + } + + MIRType *GetTypeFromTyIdx(uint32 index) const + { + CHECK_FATAL(index < typeTable.size(), "array index out of range"); + return typeTable.at(index); + } + + PrimType GetPrimTypeFromTyIdx(const TyIdx &tyIdx) const + { + CHECK_FATAL(tyIdx < typeTable.size(), "array index out of range"); + return typeTable.at(tyIdx)->GetPrimType(); + } + + void SetTypeWithTyIdx(const TyIdx &tyIdx, MIRType &type); + MIRType *GetOrCreateMIRTypeNode(MIRType &ptype); + + TyIdx GetOrCreateMIRType(MIRType *pType) + { + return GetOrCreateMIRTypeNode(*pType)->GetTypeIndex(); + } + + uint32 GetTypeTableSize() const + { + return static_cast(typeTable.size()); + } + + // Get primtive types. + MIRType *GetPrimType(PrimType primType) const + { + DEBUG_ASSERT(primType < typeTable.size(), "array index out of range"); + return typeTable.at(primType); + } + + MIRType *GetFloat() const + { + DEBUG_ASSERT(PTY_f32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_f32); + } + + MIRType *GetDouble() const + { + DEBUG_ASSERT(PTY_f64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_f64); + } + + MIRType *GetFloat128() const + { + DEBUG_ASSERT(PTY_f128 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_f128); + } + + MIRType *GetUInt1() const + { + DEBUG_ASSERT(PTY_u1 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u1); + } + + MIRType *GetUInt8() const + { + DEBUG_ASSERT(PTY_u8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u8); + } + + MIRType *GetInt8() const + { + DEBUG_ASSERT(PTY_i8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_i8); + } + + MIRType *GetUInt16() const + { + DEBUG_ASSERT(PTY_u16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u16); + } + + MIRType *GetInt16() const + { + DEBUG_ASSERT(PTY_i16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_i16); + } + + MIRType *GetInt32() const + { + DEBUG_ASSERT(PTY_i32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_i32); + } + + MIRType *GetUInt32() const + { + DEBUG_ASSERT(PTY_u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u32); + } + + MIRType *GetInt64() const + { + DEBUG_ASSERT(PTY_i64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_i64); + } + + MIRType *GetUInt64() const + { + DEBUG_ASSERT(PTY_u64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u64); + } + + MIRType *GetPtr() const + { + DEBUG_ASSERT(PTY_ptr < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_ptr); + } + +#ifdef USE_ARM32_MACRO + MIRType *GetUIntType() const + { + DEBUG_ASSERT(PTY_u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u32); + } + + MIRType *GetPtrType() const + { + DEBUG_ASSERT(PTY_u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u32); + } +#else + MIRType *GetUIntType() const + { + DEBUG_ASSERT(PTY_u64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u64); + } + + MIRType *GetPtrType() const + { + DEBUG_ASSERT(PTY_ptr < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_ptr); + } +#endif + +#ifdef USE_32BIT_REF + MIRType *GetCompactPtr() const + { + DEBUG_ASSERT(PTY_u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u32); + } + +#else + MIRType *GetCompactPtr() const + { + DEBUG_ASSERT(PTY_u64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u64); + } + +#endif + MIRType *GetRef() const + { + DEBUG_ASSERT(PTY_ref < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_ref); + } + + MIRType *GetAddr32() const + { + DEBUG_ASSERT(PTY_a32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_a32); + } + + MIRType *GetAddr64() const + { + DEBUG_ASSERT(PTY_a64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_a64); + } + + MIRType *GetVoid() const + { + DEBUG_ASSERT(PTY_void < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_void); + } + +#ifdef DYNAMICLANG + MIRType *GetDynundef() const + { + DEBUG_ASSERT(PTY_dynundef < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynundef); + } + + MIRType *GetDynany() const + { + DEBUG_ASSERT(PTY_dynany < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynany); + } + + MIRType *GetDyni32() const + { + DEBUG_ASSERT(PTY_dyni32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dyni32); + } + + MIRType *GetDynf64() const + { + DEBUG_ASSERT(PTY_dynf64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynf64); + } + + MIRType *GetDynf32() const + { + DEBUG_ASSERT(PTY_dynf32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynf32); + } + + MIRType *GetDynstr() const + { + DEBUG_ASSERT(PTY_dynstr < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynstr); + } + + MIRType *GetDynobj() const + { + DEBUG_ASSERT(PTY_dynobj < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynobj); + } + + MIRType *GetDynbool() const + { + DEBUG_ASSERT(PTY_dynbool < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynbool); + } + +#endif + MIRType *GetUnknown() const + { + DEBUG_ASSERT(PTY_unknown < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_unknown); + } + // vector type + MIRType *GetV4Int32() const + { + DEBUG_ASSERT(PTY_v4i32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4i32); + } + + MIRType *GetV2Int32() const + { + DEBUG_ASSERT(PTY_v2i32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2i32); + } + + MIRType *GetV4UInt32() const + { + DEBUG_ASSERT(PTY_v4u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4u32); + } + MIRType *GetV2UInt32() const + { + DEBUG_ASSERT(PTY_v2u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2u32); + } + + MIRType *GetV4Int16() const + { + DEBUG_ASSERT(PTY_v4i16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4i16); + } + MIRType *GetV8Int16() const + { + DEBUG_ASSERT(PTY_v8i16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v8i16); + } + + MIRType *GetV4UInt16() const + { + DEBUG_ASSERT(PTY_v4u16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4u16); + } + MIRType *GetV8UInt16() const + { + DEBUG_ASSERT(PTY_v8u16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v8u16); + } + + MIRType *GetV8Int8() const + { + DEBUG_ASSERT(PTY_v8i8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v8i8); + } + MIRType *GetV16Int8() const + { + DEBUG_ASSERT(PTY_v16i8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v16i8); + } + + MIRType *GetV8UInt8() const + { + DEBUG_ASSERT(PTY_v8u8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v8u8); + } + MIRType *GetV16UInt8() const + { + DEBUG_ASSERT(PTY_v16u8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v16u8); + } + MIRType *GetV2Int64() const + { + DEBUG_ASSERT(PTY_v2i64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2i64); + } + MIRType *GetV2UInt64() const + { + DEBUG_ASSERT(PTY_v2u64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2u64); + } + + MIRType *GetV2Float32() const + { + DEBUG_ASSERT(PTY_v2f32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2f32); + } + MIRType *GetV4Float32() const + { + DEBUG_ASSERT(PTY_v4f32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4f32); + } + MIRType *GetV2Float64() const + { + DEBUG_ASSERT(PTY_v2f64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2f64); + } + + // Get or Create derived types. + MIRType *GetOrCreatePointerType(const TyIdx &pointedTyIdx, PrimType primType = PTY_ptr, + const TypeAttrs &attrs = TypeAttrs()); + MIRType *GetOrCreatePointerType(const MIRType &pointTo, PrimType primType = PTY_ptr, + const TypeAttrs &attrs = TypeAttrs()); + const MIRType *GetPointedTypeIfApplicable(MIRType &type) const; + MIRType *GetPointedTypeIfApplicable(MIRType &type); + MIRType *GetVoidPtr() const + { + DEBUG_ASSERT(voidPtrType != nullptr, "voidPtrType should not be null"); + return voidPtrType; + } + + void UpdateMIRType(const MIRType &pType, const TyIdx tyIdx); + MIRArrayType *GetOrCreateArrayType(const MIRType &elem, uint8 dim, const uint32 *sizeArray, + const TypeAttrs &attrs = TypeAttrs()); + // For one dimention array + MIRArrayType *GetOrCreateArrayType(const MIRType &elem, uint32 size, const TypeAttrs &attrs = TypeAttrs()); + MIRType *GetOrCreateFarrayType(const MIRType &elem); + MIRType *GetOrCreateJarrayType(const MIRType &elem); + MIRType *GetOrCreateFunctionType(const TyIdx &, const std::vector &, const std::vector &, + bool isVarg = false, const TypeAttrs &retAttrs = TypeAttrs()); + MIRType *GetOrCreateStructType(const std::string &name, const FieldVector &fields, const FieldVector &prntFields, + MIRModule &module) + { + return GetOrCreateStructOrUnion(name, fields, prntFields, module); + } + + MIRType *GetOrCreateUnionType(const std::string &name, const FieldVector &fields, const FieldVector &parentFields, + MIRModule &module) + { + return GetOrCreateStructOrUnion(name, fields, parentFields, module, false); + } + + MIRType *GetOrCreateClassType(const std::string &name, MIRModule &module) + { + return GetOrCreateClassOrInterface(name, module, true); + } + + MIRType *GetOrCreateInterfaceType(const std::string &name, MIRModule &module) + { + return GetOrCreateClassOrInterface(name, module, false); + } + + void PushIntoFieldVector(FieldVector &fields, const std::string &name, const MIRType &type); + void AddFieldToStructType(MIRStructType &structType, const std::string &fieldName, const MIRType &fieldType); + + TyIdx lastDefaultTyIdx; + +private: + using MIRTypePtr = MIRType *; + struct Hash { + size_t operator()(const MIRTypePtr &ty) const + { + return ty->GetHashIndex(); + } + }; + + struct Equal { + bool operator()(const MIRTypePtr &tx, const MIRTypePtr &ty) const + { + return tx->EqualTo(*ty); + } + }; + + // create an entry in typeTable for the type node + MIRType *CreateType(const MIRType &oldType) + { + MIRType *newType = oldType.CopyMIRTypeNode(); + newType->SetTypeIndex(TyIdx(typeTable.size())); + typeTable.push_back(newType); + return newType; + } + + void PushNull() + { + typeTable.push_back(nullptr); + } + void PopBack() + { + typeTable.pop_back(); + } + + void CreateMirTypeNodeAt(MIRType &pType, TyIdx tyIdxUsed, MIRModule *module, bool isObject, bool isIncomplete); + MIRType *CreateAndUpdateMirTypeNode(MIRType &pType); + MIRType *GetOrCreateStructOrUnion(const std::string &name, const FieldVector &fields, + const FieldVector &printFields, MIRModule &module, bool forStruct = true, + const TypeAttrs &attrs = TypeAttrs()); + MIRType *GetOrCreateClassOrInterface(const std::string &name, MIRModule &module, bool forClass); + + MIRType *CreateMirType(uint32 primTypeIdx) const; + void PutToHashTable(MIRType *mirType); + + std::unordered_set typeHashTable; + std::unordered_map ptrTypeMap; + std::unordered_map refTypeMap; + std::vector typeTable; + mutable std::shared_timed_mutex mtx; +}; + +class StrPtrHash { +public: + size_t operator()(const std::string *str) const + { + return std::hash {}(*str); + } + + size_t operator()(const std::u16string *str) const + { + return std::hash {}(*str); + } +}; + +class StrPtrEqual { +public: + bool operator()(const std::string *str1, const std::string *str2) const + { + return *str1 == *str2; + } + + bool operator()(const std::u16string *str1, const std::u16string *str2) const + { + return *str1 == *str2; + } +}; + +// T can be std::string or std::u16string +// U can be GStrIdx, UStrIdx, or U16StrIdx +template +class StringTable { +public: + StringTable() = default; + StringTable(const StringTable &) = delete; + StringTable &operator=(const StringTable &) = delete; + + ~StringTable() + { + ReleaseStrings(); + } + + void Init() + { + // initialize 0th entry of stringTable with an empty string + T *ptr = new T; + stringTable.push_back(ptr); + } + + void Reset() + { + ReleaseStrings(); + stringTable.clear(); + Init(); + } + + void ReleaseStrings() + { + stringTableMap.clear(); + for (auto it : stringTable) { + delete it; + } + } + + U GetStrIdxFromName(const T &str) const + { + if (ThreadEnv::IsMeParallel()) { + std::shared_lock lock(mtx); + auto it = stringTableMap.find(&str); + if (it == stringTableMap.end()) { + return U(0); + } + return it->second; + } + auto it = stringTableMap.find(&str); + if (it == stringTableMap.end()) { + return U(0); + } + return it->second; + } + + U GetOrCreateStrIdxFromName(const T &str) + { + U strIdx = GetStrIdxFromName(str); + if (strIdx == 0u) { + if (ThreadEnv::IsMeParallel()) { + std::unique_lock lock(mtx); + strIdx.reset(stringTable.size()); + T *newStr = new T(str); + stringTable.push_back(newStr); + stringTableMap[newStr] = strIdx; + return strIdx; + } + strIdx.reset(stringTable.size()); + T *newStr = new T(str); + stringTable.push_back(newStr); + stringTableMap[newStr] = strIdx; + } + return strIdx; + } + + size_t StringTableSize() const + { + if (ThreadEnv::IsMeParallel()) { + std::shared_lock lock(mtx); + return stringTable.size(); + } + return stringTable.size(); + } + + const T &GetStringFromStrIdx(U strIdx) const + { + if (ThreadEnv::IsMeParallel()) { + std::shared_lock lock(mtx); + DEBUG_ASSERT(strIdx < stringTable.size(), "array index out of range"); + return *stringTable[strIdx]; + } + DEBUG_ASSERT(strIdx < stringTable.size(), "array index out of range"); + return *stringTable[strIdx]; + } + + const T &GetStringFromStrIdx(uint32 idx) const + { + DEBUG_ASSERT(idx < stringTable.size(), "array index out of range"); + return *stringTable[idx]; + } + +private: + std::vector stringTable; // index is uint32 + std::unordered_map stringTableMap; + mutable std::shared_timed_mutex mtx; +}; + +class FPConstTable { +public: + FPConstTable(const FPConstTable &p) = delete; + FPConstTable &operator=(const FPConstTable &p) = delete; + ~FPConstTable(); + + // get the const from floatConstTable or create a new one + MIRFloatConst *GetOrCreateFloatConst(float fval); + // get the const from doubleConstTable or create a new one + MIRDoubleConst *GetOrCreateDoubleConst(double fval); + + static std::unique_ptr Create() + { + auto p = std::unique_ptr(new FPConstTable()); + p->PostInit(); + return p; + } + +private: + FPConstTable() : floatConstTable(), doubleConstTable() {}; + void PostInit(); + MIRFloatConst *DoGetOrCreateFloatConst(float); + MIRDoubleConst *DoGetOrCreateDoubleConst(double); + MIRFloatConst *DoGetOrCreateFloatConstThreadSafe(float); + MIRDoubleConst *DoGetOrCreateDoubleConstThreadSafe(double); + std::shared_timed_mutex floatMtx; + std::shared_timed_mutex doubleMtx; + std::unordered_map floatConstTable; // map float const value to the table; + std::unordered_map doubleConstTable; // map double const value to the table; + MIRFloatConst *nanFloatConst = nullptr; + MIRFloatConst *infFloatConst = nullptr; + MIRFloatConst *minusInfFloatConst = nullptr; + MIRFloatConst *minusZeroFloatConst = nullptr; + MIRDoubleConst *nanDoubleConst = nullptr; + MIRDoubleConst *infDoubleConst = nullptr; + MIRDoubleConst *minusInfDoubleConst = nullptr; + MIRDoubleConst *minusZeroDoubleConst = nullptr; +}; + +class IntConstTable { +public: + IntConstTable(const IntConstTable &p) = delete; + IntConstTable &operator=(const IntConstTable &p) = delete; + ~IntConstTable(); + + MIRIntConst *GetOrCreateIntConst(const IntVal &val, MIRType &type); + MIRIntConst *GetOrCreateIntConst(uint64 val, MIRType &type); + + static std::unique_ptr Create() + { + auto p = std::unique_ptr(new IntConstTable()); + return p; + } + +private: + IntConstTable() = default; + MIRIntConst *DoGetOrCreateIntConst(uint64 val, MIRType &type); + MIRIntConst *DoGetOrCreateIntConstTreadSafe(uint64 val, MIRType &type); + std::shared_timed_mutex mtx; + std::unordered_map intConstTable; +}; + +// STypeNameTable is only used to store class and interface types. +// Each module maintains its own MIRTypeNameTable. +class STypeNameTable { +public: + STypeNameTable() = default; + virtual ~STypeNameTable() = default; + + void Reset() + { + gStrIdxToTyIdxMap.clear(); + } + + const std::unordered_map &GetGStridxToTyidxMap() const + { + return gStrIdxToTyIdxMap; + } + + TyIdx GetTyIdxFromGStrIdx(GStrIdx idx) const + { + const auto it = gStrIdxToTyIdxMap.find(idx); + if (it == gStrIdxToTyIdxMap.cend()) { + return TyIdx(0); + } + return it->second; + } + + void SetGStrIdxToTyIdx(GStrIdx gStrIdx, TyIdx tyIdx) + { + gStrIdxToTyIdxMap[gStrIdx] = tyIdx; + } + +private: + std::unordered_map gStrIdxToTyIdxMap; +}; + +class FunctionTable { +public: + FunctionTable() + { + Init(); + } + + virtual ~FunctionTable() = default; + + void Init() + { + // puIdx 0 is reserved + funcTable.push_back(nullptr); + } + + void Reset() + { + funcTable.clear(); + Init(); + } + + std::vector &GetFuncTable() + { + return funcTable; + } + + MIRFunction *GetFunctionFromPuidx(PUIdx pIdx) const + { + CHECK_FATAL(pIdx < funcTable.size(), "Invalid puIdx"); + return funcTable.at(pIdx); + } + + void SetFunctionItem(uint32 pIdx, MIRFunction *func) + { + CHECK_FATAL(pIdx < funcTable.size(), "Invalid puIdx"); + funcTable[pIdx] = func; + } + +private: + std::vector funcTable; // index is PUIdx +}; + +class GSymbolTable { +public: + GSymbolTable(); + GSymbolTable(const GSymbolTable &) = delete; + GSymbolTable &operator=(const GSymbolTable &) = delete; + ~GSymbolTable(); + + void Init(); + void Reset(); + void ReleaseSymbols(); + + MIRModule *GetModule() + { + return module; + } + + void SetModule(MIRModule *m) + { + module = m; + } + + bool IsValidIdx(size_t idx) const + { + return idx < symbolTable.size(); + } + + MIRSymbol *GetSymbolFromStidx(uint32 idx, bool checkFirst = false) const + { + if (checkFirst && idx >= symbolTable.size()) { + return nullptr; + } + DEBUG_ASSERT(IsValidIdx(idx), "symbol table index out of range"); + return symbolTable[idx]; + } + + void SetStrIdxStIdxMap(GStrIdx strIdx, StIdx stIdx) + { + strIdxToStIdxMap[strIdx] = stIdx; + } + + StIdx GetStIdxFromStrIdx(GStrIdx idx) const + { + const auto it = strIdxToStIdxMap.find(idx); + if (it == strIdxToStIdxMap.cend()) { + return StIdx(); + } + return it->second; + } + + MIRSymbol *GetSymbolFromStrIdx(GStrIdx idx, bool checkFirst = false) const + { + return GetSymbolFromStidx(GetStIdxFromStrIdx(idx).Idx(), checkFirst); + } + + auto &GetTable() + { + return symbolTable; + } + + size_t GetSymbolTableSize() const + { + return symbolTable.size(); + } + + MIRSymbol *GetSymbol(uint32 idx) const + { + DEBUG_ASSERT(idx < symbolTable.size(), "array index out of range"); + return symbolTable.at(idx); + } + + MIRSymbol *CreateSymbol(uint8 scopeID); + bool AddToStringSymbolMap(const MIRSymbol &st); + bool RemoveFromStringSymbolMap(const MIRSymbol &st); + void Dump(bool isLocal, int32 indent = 0) const; + +private: + MIRModule *module = nullptr; + // hash table mapping string index to st index + std::unordered_map strIdxToStIdxMap; + std::vector symbolTable; // map symbol idx to symbol node +}; + +class ConstPool { +public: + std::unordered_map &GetConstU16StringPool() + { + return constU16StringPool; + } + + void Reset() + { + constMap.clear(); + importedLiteralNames.clear(); + constU16StringPool.clear(); + } + + void InsertConstPool(GStrIdx strIdx, MIRConst *cst) + { + (void)constMap.emplace(strIdx, cst); + } + + MIRConst *GetConstFromPool(GStrIdx strIdx) + { + return constMap[strIdx]; + } + + void PutLiteralNameAsImported(GStrIdx gIdx) + { + (void)importedLiteralNames.insert(gIdx); + } + + bool LookUpLiteralNameFromImported(GStrIdx gIdx) + { + return importedLiteralNames.find(gIdx) != importedLiteralNames.end(); + } + +protected: + std::unordered_map constMap; + std::set importedLiteralNames; + +private: + std::unordered_map constU16StringPool; +}; + +class GlobalTables { +public: + static GlobalTables &GetGlobalTables(); + + static StringTable &GetStrTable() + { + return globalTables.gStringTable; + } + + static StringTable &GetUStrTable() + { + return globalTables.uStrTable; + } + + static StringTable &GetU16StrTable() + { + return globalTables.u16StringTable; + } + + static TypeTable &GetTypeTable() + { + return globalTables.typeTable; + } + + static FPConstTable &GetFpConstTable() + { + return *(globalTables.fpConstTablePtr); + } + + static STypeNameTable &GetTypeNameTable() + { + return globalTables.typeNameTable; + } + + static FunctionTable &GetFunctionTable() + { + return globalTables.functionTable; + } + + static GSymbolTable &GetGsymTable() + { + return globalTables.gSymbolTable; + } + + static ConstPool &GetConstPool() + { + return globalTables.constPool; + } + + static IntConstTable &GetIntConstTable() + { + return *(globalTables.intConstTablePtr); + } + + static void Reset() + { + globalTables.typeTable.Reset(); + globalTables.typeNameTable.Reset(); + globalTables.functionTable.Reset(); + globalTables.gSymbolTable.Reset(); + globalTables.constPool.Reset(); + globalTables.fpConstTablePtr = FPConstTable::Create(); + globalTables.intConstTablePtr = IntConstTable::Create(); + globalTables.gStringTable.Reset(); + globalTables.uStrTable.Reset(); + globalTables.u16StringTable.Reset(); + } + + GlobalTables(const GlobalTables &globalTables) = delete; + GlobalTables(const GlobalTables &&globalTables) = delete; + GlobalTables &operator=(const GlobalTables &globalTables) = delete; + GlobalTables &operator=(const GlobalTables &&globalTables) = delete; + +private: + GlobalTables() : fpConstTablePtr(FPConstTable::Create()), intConstTablePtr(IntConstTable::Create()) + { + gStringTable.Init(); + uStrTable.Init(); + u16StringTable.Init(); + } + virtual ~GlobalTables() = default; + thread_local static GlobalTables globalTables; + + TypeTable typeTable; + STypeNameTable typeNameTable; + FunctionTable functionTable; + GSymbolTable gSymbolTable; + ConstPool constPool; + std::unique_ptr fpConstTablePtr; + std::unique_ptr intConstTablePtr; + StringTable gStringTable; + StringTable uStrTable; + StringTable u16StringTable; +}; + +inline MIRType &GetTypeFromTyIdx(TyIdx idx) +{ + return *(GlobalTables::GetTypeTable().GetTypeFromTyIdx(idx)); +} +} // namespace maple +#endif // MAPLE_IR_INCLUDE_GLOBAL_TABLES_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_c.def b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_c.def new file mode 100644 index 0000000000000000000000000000000000000000..275420766913b2786884084fca3ca5016815c4c4 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_c.def @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) + +DEF_MIR_INTRINSIC(C_strcmp,\ + "strcmp", INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyPtr, kArgTyPtr) +DEF_MIR_INTRINSIC(C_strncmp,\ + "strncmp", INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyPtr, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C_strcpy,\ + "strcpy", 0, kArgTyVoid, kArgTyPtr, kArgTyPtr) +DEF_MIR_INTRINSIC(C_strncpy,\ + "strncpy", 0, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_strlen,\ + "strlen", INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyU64, kArgTyPtr) +DEF_MIR_INTRINSIC(C_strchr,\ + "strchr", INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyPtr, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C_strrchr,\ + "strrchr", INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyPtr, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C_memcmp,\ + "memcmp", INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyPtr, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_memcpy,\ + "memcpy", 0, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_memmove,\ + "memmove", 0, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_memset,\ + "memset", 0, kArgTyVoid, kArgTyPtr, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_acosf,\ + "acosf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_asinf,\ + "asinf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_atanf,\ + "atanf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_cosf,\ + "cosf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_coshf,\ + "coshf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_expf,\ + "expf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_logf,\ + "logf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_log10f,\ + "log10f", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_sinf,\ + "sinf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_sinhf,\ + "sinhf", INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_acos,\ + "acos", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_asin,\ + "asin", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_atan,\ + "atan", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_cos,\ + "cos", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_cosh,\ + "cosh", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_exp,\ + "exp", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_log,\ + "log", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_log10,\ + "log10", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_sin,\ + "sin", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_sinh,\ + "sinh", INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_ffs,\ + "ffs", INTRNISPURE, kArgTyI32, kArgTyI32) +DEF_MIR_INTRINSIC(C_va_start,\ + "sinh", INTRNISPURE | INTRNISSPECIAL, kArgTyVoid, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C_constant_p,\ + "sinh", 0, kArgTyI32, kArgTyDynany) +DEF_MIR_INTRINSIC(C_clz32,\ + "sinh", INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_clz64,\ + "sinh", INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_ctz32,\ + "sinh", INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_ctz64,\ + "sinh", INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_popcount32,\ + "popcount32", INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_popcount64,\ + "popcount64", INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_parity32,\ + "parity32", INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_parity64,\ + "parity64", INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_clrsb32,\ + "clrsb32", INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_clrsb64,\ + "clrsb64", INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_isaligned,\ + "isaligned", INTRNISPURE, kArgTyU1, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_alignup,\ + "alignup", INTRNISPURE, kArgTyU1, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_aligndown,\ + "aligndown", INTRNISPURE, kArgTyU1, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_rev16_2,\ + "rev16", INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, kArgTyI16) +DEF_MIR_INTRINSIC(C_rev_4,\ + "rev", INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyI32) +DEF_MIR_INTRINSIC(C_rev_8,\ + "rev", INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, kArgTyI64) +DEF_MIR_INTRINSIC(C_stack_save,\ + "stack_save", INTRNISPURE | INTRNISSPECIAL, kArgTyPtr) +DEF_MIR_INTRINSIC(C_stack_restore,\ + "stack_restore", INTRNISPURE | INTRNISSPECIAL, kArgTyPtr) +// sync +DEF_MIR_INTRINSIC(C___sync_add_and_fetch_1,\ + "__sync_add_and_fetch_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_add_and_fetch_2,\ + "__sync_add_and_fetch_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_add_and_fetch_4,\ + "__sync_add_and_fetch_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_add_and_fetch_8,\ + "__sync_add_and_fetch_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_sub_and_fetch_1,\ + "__sync_sub_and_fetch_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_sub_and_fetch_2,\ + "__sync_sub_and_fetch_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_sub_and_fetch_4,\ + "__sync_sub_and_fetch_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_sub_and_fetch_8,\ + "__sync_sub_and_fetch_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_add_1,\ + "__sync_fetch_and_add_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_add_2,\ + "__sync_fetch_and_add_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_add_4,\ + "__sync_fetch_and_add_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_add_8,\ + "__sync_fetch_and_add_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_sub_1,\ + "__sync_fetch_and_sub_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_sub_2,\ + "__sync_fetch_and_sub_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_sub_4,\ + "__sync_fetch_and_sub_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_sub_8,\ + "__sync_fetch_and_sub_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_bool_compare_and_swap_1,\ + "__sync_bool_compare_and_swap_1", INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyU8, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_bool_compare_and_swap_2,\ + "__sync_bool_compare_and_swap_2", INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyU16, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_bool_compare_and_swap_4,\ + "__sync_bool_compare_and_swap_4", INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyU32, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_bool_compare_and_swap_8,\ + "__sync_bool_compare_and_swap_8", INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyU64, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_val_compare_and_swap_1,\ + "__sync_val_compare_and_swap_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_val_compare_and_swap_2,\ + "__sync_val_compare_and_swap_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_val_compare_and_swap_4,\ + "__sync_val_compare_and_swap_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_val_compare_and_swap_8,\ + "__sync_val_compare_and_swap_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_lock_test_and_set_1,\ + "__sync_lock_test_and_set_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_lock_test_and_set_2,\ + "__sync_lock_test_and_set_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_lock_test_and_set_4,\ + "__sync_lock_test_and_set_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_lock_test_and_set_8,\ + "__sync_lock_test_and_set_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_lock_release_8,\ + "__sync_lock_release_8", INTRNATOMIC, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(C___sync_lock_release_4,\ + "__sync_lock_release_4", INTRNATOMIC, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(C___sync_lock_release_2,\ + "__sync_lock_release_2", INTRNATOMIC, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(C___sync_lock_release_1,\ + "__sync_lock_release_1", INTRNATOMIC, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(C___sync_fetch_and_and_1,\ + "__sync_fetch_and_and_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_and_2,\ + "__sync_fetch_and_and_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_and_4,\ + "__sync_fetch_and_and_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_and_8,\ + "__sync_fetch_and_and_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_or_1,\ + "__sync_fetch_and_or_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_or_2,\ + "__sync_fetch_and_or_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_or_4,\ + "__sync_fetch_and_or_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_or_8,\ + "__sync_fetch_and_or_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_xor_1,\ + "__sync_fetch_and_xor_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_xor_2,\ + "__sync_fetch_and_xor_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_xor_4,\ + "__sync_fetch_and_xor_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_xor_8,\ + "__sync_fetch_and_xor_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_nand_1,\ + "__sync_fetch_and_nand_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_nand_2,\ + "__sync_fetch_and_nand_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_nand_4,\ + "__sync_fetch_and_nand_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_nand_8,\ + "__sync_fetch_and_nand_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_and_and_fetch_1,\ + "__sync_and_and_fetch_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_and_and_fetch_2,\ + "__sync_and_and_fetch_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_and_and_fetch_4,\ + "__sync_and_and_fetch_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_and_and_fetch_8,\ + "__sync_and_and_fetch_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_or_and_fetch_1,\ + "__sync_or_and_fetch_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_or_and_fetch_2,\ + "__sync_or_and_fetch_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_or_and_fetch_4,\ + "__sync_or_and_fetch_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_or_and_fetch_8,\ + "__sync_or_and_fetch_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_xor_and_fetch_1,\ + "__sync_xor_and_fetch_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_xor_and_fetch_2,\ + "__sync_xor_and_fetch_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_xor_and_fetch_4,\ + "__sync_xor_and_fetch_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_xor_and_fetch_8,\ + "__sync_xor_and_fetch_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_nand_and_fetch_1,\ + "__sync_nand_and_fetch_1", INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_nand_and_fetch_2,\ + "__sync_nand_and_fetch_2", INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_nand_and_fetch_4,\ + "__sync_nand_and_fetch_4", INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_nand_and_fetch_8,\ + "__sync_nand_and_fetch_8", INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_synchronize,\ + "__sync_synchronize", INTRNATOMIC, kArgTyUndef) + +DEF_MIR_INTRINSIC(C__builtin_return_address,\ + "__builtin_return_address", INTRNISPURE, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C__builtin_extract_return_addr,\ + "__builtin_extract_return_addr", INTRNISPURE, kArgTyPtr, kArgTyPtr) +DEF_MIR_INTRINSIC(C___builtin_expect,\ + "__builtin_expect", INTRNISPURE, kArgTyI32, kArgTyI32, kArgTyI32) + +// atomic +DEF_MIR_INTRINSIC(C___atomic_load_n,\ + "__atomic_load_n", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_load,\ + "__atomic_load", INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_store_n,\ + "__atomic_store_n", INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_store,\ + "__atomic_store", INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_exchange_n,\ + "__atomic_exchange_n", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_exchange,\ + "__atomic_exchange", INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_add_fetch,\ + "__atomic_add_fetch", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_sub_fetch,\ + "__atomic_sub_fetch", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_and_fetch,\ + "__atomic_and_fetch", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_xor_fetch,\ + "__atomic_xor_fetch", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_or_fetch,\ + "__atomic_or_fetch", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_nand_fetch,\ + "__atomic_nand_fetch", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_add,\ + "__atomic_fetch_add", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_sub,\ + "__atomic_fetch_sub", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_and,\ + "__atomic_fetch_and", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_xor,\ + "__atomic_fetch_xor", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_or,\ + "__atomic_fetch_or", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_nand,\ + "__atomic_fetch_nand", INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_test_and_set,\ + "__atomic_test_and_set", INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_clear,\ + "__atomic_clear", INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_thread_fence,\ + "__atomic_thread_fence", INTRNATOMIC, kArgTyVoid, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_signal_fence,\ + "__atomic_signal_fence", INTRNATOMIC, kArgTyVoid, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_always_lock_free,\ + "__atomic_always_lock_free", INTRNATOMIC, kArgTyU1, kArgTyU64, kArgTyPtr) +DEF_MIR_INTRINSIC(C___atomic_is_lock_free,\ + "__atomic_is_lock_free", INTRNATOMIC, kArgTyU1, kArgTyU64, kArgTyPtr) +DEF_MIR_INTRINSIC(C___atomic_compare_exchange_n,\ + "__atomic_compare_exchange_n", INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyPtr, kArgTyDynany, kArgTyU1, kArgTyI32, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_compare_exchange,\ + "__atomic_compare_exchange", INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyPtr, kArgTyPtr, kArgTyU1, kArgTyI32, kArgTyI32) diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_dai.def b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_dai.def new file mode 100644 index 0000000000000000000000000000000000000000..28c97ddfb22fad1643490dc94c00bafa6c5e4a8f --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_dai.def @@ -0,0 +1,20 @@ +DEF_MIR_INTRINSIC(MCC_DeferredConstClass,\ + "MCC_DeferredConstClass", INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredInstanceOf,\ + "MCC_DeferredInstanceOf", INTRNISJAVA, kArgTyU1, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredCheckCast,\ + "MCC_DeferredCheckCast", INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredNewInstance,\ + "MCC_DeferredNewInstance", INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredNewArray,\ + "MCC_DeferredNewArray", INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyI32) +DEF_MIR_INTRINSIC(MCC_DeferredFillNewArray,\ + "MCC_DeferredFillNewArray", INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyI32, kArgTyDynany, kArgTyDynany) +DEF_MIR_INTRINSIC(MCC_DeferredLoadField,\ + "MCC_DeferredLoadField", INTRNISJAVA, kArgTyDynany, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredStoreField,\ + "MCC_DeferredStoreField", INTRNISJAVA, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredInvoke,\ + "MCC_DeferredInvoke", INTRNISJAVA, kArgTyDynany, kArgTyRef, kArgTyI32, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredClinitCheck,\ + "MCC_DeferredClinitCheck", INTRNISJAVA, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_java.def b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_java.def new file mode 100644 index 0000000000000000000000000000000000000000..06d9b615e4fb74df7ed0a5c173c5aa4f36ba1e10 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_java.def @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) +DEF_MIR_INTRINSIC(JAVA_ARRAY_LENGTH,\ + "__java_array_length", INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_ARRAY_FILL,\ + "__java_array_fill", INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyI32, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_FILL_NEW_ARRAY,\ + "__java_fill_new_array", INTRNISJAVA, kArgTyRef, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CHECK_CAST,\ + "__java_check_cast", INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CONST_CLASS,\ + "__java_const_class", INTRNISJAVA, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_INSTANCE_OF,\ + "__java_instance_of", INTRNISJAVA, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_ISASSIGNABLEFROM,\ + "__java_isAssignableFrom", INTRNISJAVA, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_MERGE,\ + "__java_merge", INTRNISJAVA, kArgTyPtr, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CLINIT_CHECK,\ + "__java_clinit_check", INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_POLYMORPHIC_CALL,\ + "__java_polymorphic_call", INTRNISJAVA, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_THROW_ARITHMETIC,\ + "MCC_ThrowArithmeticException", INTRNISJAVA | INTRNNEVERRETURN, kArgTyVoid, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_GET_CLASS,\ + "MCC_GetClass", INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyPtr, kArgTyPtr, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) \ No newline at end of file diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_js.def b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_js.def new file mode 100644 index 0000000000000000000000000000000000000000..382df9fd14a4f9aeafa24c02f6a77b4d782a1438 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_js.def @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) +DEF_MIR_INTRINSIC(JS_INIT_CONTEXT,\ + "__js_init_context", INTRNISJS, kArgTyVoid, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_REQUIRE,\ + "__js_require", INTRNISJS | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_BIOBJECT,\ + "__jsobj_get_or_create_builtin", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_BISTRING,\ + "__jsstr_get_builtin", INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTySimplestr, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_THIS,\ + "__jsop_this", INTRNISJS | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_ADD,\ + "__jsop_add", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(ADD_WITH_OVERFLOW,\ + "__add_with_overflow", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(SUB_WITH_OVERFLOW,\ + "__sub_with_overflow", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MUL_WITH_OVERFLOW,\ + "__mul_with_overflow", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_CONCAT,\ + "__jsstr_concat_2", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTySimplestr, kArgTySimplestr, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_STRICTEQ,\ + "__jsop_stricteq", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSSTR_STRICTEQ,\ + "__jsstr_equal", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTySimplestr, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_STRICTNE,\ + "__jsop_strictne", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSSTR_STRICTNE,\ + "__jsstr_ne", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTySimplestr, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_INSTANCEOF,\ + "__jsop_instanceof", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_IN,\ + "__jsop_in", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_OR,\ + "__jsop_or", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_AND,\ + "__jsop_and", INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_TYPEOF,\ + "__jsop_typeof", INTRNISJS | INTRNISJSUNARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW,\ + "__js_new", INTRNISJS | INTRNNOSIDEEFFECT, kArgTyPtr, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_STRING,\ + "__js_ToString", INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTySimplestr, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSSTR_LENGTH,\ + "__jsstr_get_length", INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_BOOLEAN,\ + "__js_ToBoolean", INTRNISJS | INTRNISJSUNARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NUMBER,\ + "__js_ToNumber", INTRNISJS | INTRNISJSUNARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_INT32,\ + "__js_ToInt32", INTRNISJS | INTRNISJSUNARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_PRINT,\ + "__jsop_print", INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_ERROR,\ + "__js_error", INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNNEVERRETURN, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_EVAL,\ + "__js_eval", kIntrnUndef, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_ICALL,\ + "__js_icall", INTRNISJS | INTRNRETURNSTRUCT, kArgTyDynany, kArgTyA32, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_CALL, + "__jsop_call", INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_CCALL,\ + "__jsop_ccall", INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_NEW, + "__jsop_new", INTRNISJS | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_SETTIMEOUT, + "__js_setTimeout", INTRNISJS | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyI32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_SETCYCLEHEADER,\ + "__js_setCycleHeader", INTRNISJS, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_OBJECT_0,\ + "__js_new_obj_obj_0", INTRNISJS | INTRNNOSIDEEFFECT, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_OBJECT_1,\ + "__js_new_obj_obj_1", INTRNISJS | INTRNNOSIDEEFFECT, kArgTySimpleobj, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_SETPROP,\ + "__jsop_setprop", INTRNISJS, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_GETPROP,\ + "__jsop_getprop", INTRNISJS | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_DELPROP,\ + "__jsop_delprop", INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_SETPROP_BY_NAME,\ + "__jsop_setprop_by_name", INTRNISJS, kArgTyVoid, kArgTyDynany, kArgTySimplestr, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_GETPROP_BY_NAME,\ + "__jsop_getprop_by_name", INTRNISJS | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_SETPROP_BY_INDEX,\ + "__jsop_setprop_by_index", INTRNISJS, kArgTyVoid, kArgTyDynany, kArgTyU32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_GETPROP_BY_INDEX,\ + "__jsop_getprop_by_index", INTRNISJS | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_INITPROP_BY_NAME,\ + "__jsop_initprop", INTRNISJS, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_INITPROP_GETTER,\ + "__jsop_initprop_getter", INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_INITPROP_SETTER,\ + "__jsop_initprop_setter", INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_FUNCTION,\ + "__js_new_function", INTRNISJS, kArgTyDynany, kArgTyPtr, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_ARR_ELEMS,\ + "__js_new_arr_elems", INTRNISJS | INTRNNOSIDEEFFECT, kArgTySimpleobj, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_ARR_LENGTH,\ + "__js_new_arr_length", INTRNISJS | INTRNNOSIDEEFFECT, kArgTySimpleobj, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_LENGTH,\ + "__jsop_length", INTRNISJS | INTRNLOADMEM | INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_NEW_ITERATOR,\ + "__jsop_valueto_iterator", INTRNISJS, kArgTyPtr, kArgTyDynany, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_NEXT_ITERATOR,\ + "__jsop_iterator_next", INTRNISJS, kArgTyDynany, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_MORE_ITERATOR,\ + "__jsop_more_iterator", INTRNISJS, kArgTyU32, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_ADDSYSEVENTLISTENER,\ + "__js_add_sysevent_listener", INTRNISJS, kArgTyU32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_js_eng.def b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_js_eng.def new file mode 100644 index 0000000000000000000000000000000000000000..21c0132e0bfe8f55a4fd48286141db5acb975fd1 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_js_eng.def @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) +DEF_MIR_INTRINSIC(JS_GET_ARGUMENTOBJECT,\ + "__jsobj_get_or_create_argument", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_ERROR_OBJECT,\ + "__jsobj_get_or_create_error", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_EVALERROR_OBJECT,\ + "__jsobj_get_or_create_evalError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_RANGEERROR_OBJECT,\ + "__jsobj_get_or_create_rangeError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_REFERENCEERROR_OBJECT,\ + "__jsobj_get_or_create_referenceError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_SYNTAXERROR_OBJECT,\ + "__jsobj_get_or_create_syntaxError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_TYPEERROR_OBJECT,\ + "__jsobj_get_or_create_typeError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_URIERROR_OBJECT,\ + "__jsobj_get_or_create_uriError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_ASSERTVALUE, + "__jsop_assert_value", INTRNISJS, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) \ No newline at end of file diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_op.h b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_op.h new file mode 100644 index 0000000000000000000000000000000000000000..48a8f7d237aba9ed66920e4a8aa0cc6e6ae40c0f --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_op.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_INTRINSIC_OP_H +#define MAPLE_IR_INCLUDE_INTRINSIC_OP_H + +namespace maple { +enum MIRIntrinsicID { +#define DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ...) INTRN_##STR, +#include "intrinsics.def" +#undef DEF_MIR_INTRINSIC +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_INTRINSIC_OP_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_vector.def b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_vector.def new file mode 100644 index 0000000000000000000000000000000000000000..30fa3fb8030d9a9890e96a86ebcbd0e53b7eb80f --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsic_vector.def @@ -0,0 +1,1227 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, +// ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) + +// vecTy vector_abs(vecTy src) +// Create a vector by getting the absolute value of the elements in src. +DEF_MIR_INTRINSIC(vector_abs_v8i8, "vector_abs_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_abs_v4i16, "vector_abs_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_abs_v2i32, "vector_abs_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_abs_v1i64, "vector_abs_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_abs_v2f32, "vector_abs_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV2F32) +DEF_MIR_INTRINSIC(vector_abs_v1f64, "vector_abs_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_abs_v16i8, "vector_abs_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_abs_v8i16, "vector_abs_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_abs_v4i32, "vector_abs_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_abs_v2i64, "vector_abs_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_abs_v4f32, "vector_abs_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_abs_v2f64, "vector_abs_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyV2F64) + +// vecTy vector_addl_low(vecTy src1, vecTy src2) +// Add each element of the source vector to second source +// put the result into the destination vector. +DEF_MIR_INTRINSIC(vector_addl_low_v8i8, "vector_addl_low_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_addl_low_v4i16, "vector_addl_low_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_addl_low_v2i32, "vector_addl_low_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_addl_low_v8u8, "vector_addl_low_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_addl_low_v4u16, "vector_addl_low_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_addl_low_v2u32, "vector_addl_low_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) + +// vecTy vector_addl_high(vecTy src1, vecTy src2) +// Add each element of the source vector to upper half of second source +// put the result into the destination vector. +DEF_MIR_INTRINSIC(vector_addl_high_v8i8, "vector_addl_high_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_addl_high_v4i16, "vector_addl_high_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_addl_high_v2i32, "vector_addl_high_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_addl_high_v8u8, "vector_addl_high_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_addl_high_v4u16, "vector_addl_high_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_addl_high_v2u32, "vector_addl_high_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32, kArgTyV4U32) + +// vecTy vector_addw_low(vecTy src1, vecTy src2) +// Add each element of the source vector to second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_addw_low_v8i8, "vector_addw_low_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_addw_low_v4i16, "vector_addw_low_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_addw_low_v2i32, "vector_addw_low_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_addw_low_v8u8, "vector_addw_low_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_addw_low_v4u16, "vector_addw_low_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_addw_low_v2u32, "vector_addw_low_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U32) + +// vecTy vector_addw_high(vecTy src1, vecTy src2) +// Add each element of the source vector to upper half of second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_addw_high_v8i8, "vector_addw_high_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_addw_high_v4i16, "vector_addw_high_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_addw_high_v2i32, "vector_addw_high_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_addw_high_v8u8, "vector_addw_high_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_addw_high_v4u16, "vector_addw_high_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_addw_high_v2u32, "vector_addw_high_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV4U32) + +// vecTy vector_from_scalar(scalarTy value) +// Create a vector by repeating the scalar value for each element in the +// vector. +DEF_MIR_INTRINSIC(vector_from_scalar_v2i64, "vector_from_scalar_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyI64) +DEF_MIR_INTRINSIC(vector_from_scalar_v4i32, "vector_from_scalar_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyI32) +DEF_MIR_INTRINSIC(vector_from_scalar_v8i16, "vector_from_scalar_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyI16) +DEF_MIR_INTRINSIC(vector_from_scalar_v16i8, "vector_from_scalar_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyI8) +DEF_MIR_INTRINSIC(vector_from_scalar_v2u64, "vector_from_scalar_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyU64) +DEF_MIR_INTRINSIC(vector_from_scalar_v4u32, "vector_from_scalar_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyU32) +DEF_MIR_INTRINSIC(vector_from_scalar_v8u16, "vector_from_scalar_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyU16) +DEF_MIR_INTRINSIC(vector_from_scalar_v16u8, "vector_from_scalar_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyU8) +DEF_MIR_INTRINSIC(vector_from_scalar_v2f64, "vector_from_scalar_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyF64) +DEF_MIR_INTRINSIC(vector_from_scalar_v4f32, "vector_from_scalar_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyF32) +DEF_MIR_INTRINSIC(vector_from_scalar_v1i64, "vector_from_scalar_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyI64) +DEF_MIR_INTRINSIC(vector_from_scalar_v2i32, "vector_from_scalar_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyI32) +DEF_MIR_INTRINSIC(vector_from_scalar_v4i16, "vector_from_scalar_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyI16) +DEF_MIR_INTRINSIC(vector_from_scalar_v8i8, "vector_from_scalar_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyI8) +DEF_MIR_INTRINSIC(vector_from_scalar_v1u64, "vector_from_scalar_v1u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyU64) +DEF_MIR_INTRINSIC(vector_from_scalar_v2u32, "vector_from_scalar_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyU32) +DEF_MIR_INTRINSIC(vector_from_scalar_v4u16, "vector_from_scalar_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyU16) +DEF_MIR_INTRINSIC(vector_from_scalar_v8u8, "vector_from_scalar_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyU8) +DEF_MIR_INTRINSIC(vector_from_scalar_v1f64, "vector_from_scalar_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyF64) +DEF_MIR_INTRINSIC(vector_from_scalar_v2f32, "vector_from_scalar_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyF32) + +// vecTy2 vector_labssub(vectTy1 src2, vectTy2 src2) +// Create a widened vector by getting the abs value of subtracted arguments. +DEF_MIR_INTRINSIC(vector_labssub_low_v8i8, "vector_labssub_low_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_labssub_low_v4i16, "vector_labssub_low_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_labssub_low_v2i32, "vector_labssub_low_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_labssub_low_v8u8, "vector_labssub_low_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_labssub_low_v4u16, "vector_labssub_low_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_labssub_low_v2u32, "vector_labssub_low_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) + +// vecTy2 vector_labssub_high(vectTy1 src2, vectTy2 src2) +// Create a widened vector by getting the abs value of subtracted high args. +DEF_MIR_INTRINSIC(vector_labssub_high_v8i8, "vector_labssub_high_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_labssub_high_v4i16, "vector_labssub_high_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_labssub_high_v2i32, "vector_labssub_high_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_labssub_high_v8u8, "vector_labssub_high_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_labssub_high_v4u16, "vector_labssub_high_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_labssub_high_v2u32, "vector_labssub_high_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32, kArgTyV4U32) + +// vecTy2 vector_madd(vecTy2 accum, vecTy1 src1, vecTy1 src2) +// Multiply the elements of src1 and src2, then accumulate into accum. +// Elements of vecTy2 are twice as long as elements of vecTy1. +DEF_MIR_INTRINSIC(vector_madd_v2i32, "vector_madd_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_madd_v4i16, "vector_madd_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_madd_v8i8, "vector_madd_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_madd_v2u32, "vector_madd_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_madd_v4u16, "vector_madd_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_madd_v8u8, "vector_madd_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U8, kArgTyV8U8) + +// vecTy2 vector_mull_low(vecTy1 src1, vecTy1 src2) +// Multiply the elements of src1 and src2. Elements of vecTy2 are twice as +// long as elements of vecTy1. +DEF_MIR_INTRINSIC(vector_mull_low_v2i32, "vector_mull_low_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_mull_low_v4i16, "vector_mull_low_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_mull_low_v8i8, "vector_mull_low_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_mull_low_v2u32, "vector_mull_low_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_mull_low_v4u16, "vector_mull_low_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_mull_low_v8u8, "vector_mull_low_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) + +// vecTy2 vector_mull_high(vecTy1 src1, vecTy1 src2) +// Multiply the upper elements of src1 and src2. Elements of vecTy2 are twice +// as long as elements of vecTy1. +DEF_MIR_INTRINSIC(vector_mull_high_v2i32, "vector_mull_high_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_mull_high_v4i16, "vector_mull_high_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_mull_high_v8i8, "vector_mull_high_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_mull_high_v2u32, "vector_mull_high_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_mull_high_v4u16, "vector_mull_high_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_mull_high_v8u8, "vector_mull_high_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) + +// vecTy vector_merge(vecTy src1, vecTy src2, int n) +// Create a vector by concatenating the high elements of src1, starting +// with the nth element, followed by the low elements of src2. +DEF_MIR_INTRINSIC(vector_merge_v2i64, "vector_merge_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4i32, "vector_merge_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v8i16, "vector_merge_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v16i8, "vector_merge_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8, kArgTyV16I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2u64, "vector_merge_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4u32, "vector_merge_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v8u16, "vector_merge_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v16u8, "vector_merge_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8, kArgTyV16U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2f64, "vector_merge_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyV2F64, kArgTyV2F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4f32, "vector_merge_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyV4F32, kArgTyV4F32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v1i64, "vector_merge_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64, kArgTyV1I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2i32, "vector_merge_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32, kArgTyV2I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4i16, "vector_merge_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16, kArgTyV4I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v8i8, "vector_merge_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8, kArgTyV8I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v1u64, "vector_merge_v1u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV1U64, kArgTyV1U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2u32, "vector_merge_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32, kArgTyV2U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4u16, "vector_merge_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16, kArgTyV4U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v8u8, "vector_merge_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8, kArgTyV8U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v1f64, "vector_merge_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV1F64, kArgTyV1F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2f32, "vector_merge_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV2F32, kArgTyV2F32, kArgTyI32) + +// vecTy2 vector_get_low(vecTy1 src) +// Create a vector from the low part of the source vector. +DEF_MIR_INTRINSIC(vector_get_low_v2i64, "vector_get_low_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_get_low_v4i32, "vector_get_low_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_get_low_v8i16, "vector_get_low_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_get_low_v16i8, "vector_get_low_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_get_low_v2u64, "vector_get_low_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_get_low_v4u32, "vector_get_low_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_get_low_v8u16, "vector_get_low_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_get_low_v16u8, "vector_get_low_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_get_low_v2f64, "vector_get_low_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_get_low_v4f32, "vector_get_low_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV4F32) + +// vecTy2 vector_get_low(vecTy1 src) +// Create a vector from the high part of the source vector. +DEF_MIR_INTRINSIC(vector_get_high_v2i64, "vector_get_high_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_get_high_v4i32, "vector_get_high_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_get_high_v8i16, "vector_get_high_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_get_high_v16i8, "vector_get_high_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_get_high_v2u64, "vector_get_high_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_get_high_v4u32, "vector_get_high_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_get_high_v8u16, "vector_get_high_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_get_high_v16u8, "vector_get_high_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_get_high_v2f64, "vector_get_high_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_get_high_v4f32, "vector_get_high_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV4F32) + +// scalarTy vector_get_element(vecTy src, int n) +// Get the nth element of the source vector. +DEF_MIR_INTRINSIC(vector_get_element_v2i64, "vector_get_element_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, + kArgTyV2I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4i32, "vector_get_element_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, + kArgTyV4I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v8i16, "vector_get_element_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, + kArgTyV8I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v16i8, "vector_get_element_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI8, + kArgTyV16I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2u64, "vector_get_element_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU64, + kArgTyV2U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4u32, "vector_get_element_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, + kArgTyV4U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v8u16, "vector_get_element_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU16, + kArgTyV8U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v16u8, "vector_get_element_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU8, + kArgTyV16U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2f64, "vector_get_element_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF64, + kArgTyV2F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4f32, "vector_get_element_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF32, + kArgTyV4F32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v1i64, "vector_get_element_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, + kArgTyV1I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2i32, "vector_get_element_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, + kArgTyV2I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4i16, "vector_get_element_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, + kArgTyV4I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v8i8, "vector_get_element_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI8, + kArgTyV8I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v1u64, "vector_get_element_v1u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU64, + kArgTyV1U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2u32, "vector_get_element_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, + kArgTyV2U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4u16, "vector_get_element_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU16, + kArgTyV4U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v8u8, "vector_get_element_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU8, + kArgTyV8U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v1f64, "vector_get_element_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF64, + kArgTyV1F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2f32, "vector_get_element_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF32, + kArgTyV2F32, kArgTyI32) + +// vecTy vector_set_element(ScalarTy value, VecTy vec, int n) +// Set the nth element of the source vector to value. +DEF_MIR_INTRINSIC(vector_set_element_v2i64, "vector_set_element_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyI64, kArgTyV2I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4i32, "vector_set_element_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyI32, kArgTyV4I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v8i16, "vector_set_element_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyI16, kArgTyV8I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v16i8, "vector_set_element_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyI8, kArgTyV16I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2u64, "vector_set_element_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyU64, kArgTyV2U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4u32, "vector_set_element_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyU32, kArgTyV4U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v8u16, "vector_set_element_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyU16, kArgTyV8U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v16u8, "vector_set_element_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyU8, kArgTyV16U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2f64, "vector_set_element_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyF64, kArgTyV2F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4f32, "vector_set_element_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyF32, kArgTyV4F32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v1i64, "vector_set_element_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyI64, kArgTyV1I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2i32, "vector_set_element_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyI32, kArgTyV2I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4i16, "vector_set_element_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyI16, kArgTyV4I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v8i8, "vector_set_element_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyI8, kArgTyV8I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v1u64, "vector_set_element_v1u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyU64, kArgTyV1U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2u32, "vector_set_element_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyU32, kArgTyV2U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4u16, "vector_set_element_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyU16, kArgTyV4U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v8u8, "vector_set_element_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyU8, kArgTyV8U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v1f64, "vector_set_element_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyF64, kArgTyV1F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2f32, "vector_set_element_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyF32, kArgTyV2F32, kArgTyI32) + +// vecTy2 vector_widen_low(vecTy1 src) +// Widen each element of the 64-bit argument to double size of the +// original width to a 128-bit destination vector. +DEF_MIR_INTRINSIC(vector_widen_low_v2i32, "vector_widen_low_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_widen_low_v4i16, "vector_widen_low_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_widen_low_v8i8, "vector_widen_low_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_widen_low_v2u32, "vector_widen_low_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_widen_low_v4u16, "vector_widen_low_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_widen_low_v8u8, "vector_widen_low_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8) + +// vecTy2 vector_widen_high(vecTy1 src) +// Widen each upper element of the 128-bit source vector to double size of +// the original width into a 128-bit destination vector. +DEF_MIR_INTRINSIC(vector_widen_high_v2i32, "vector_widen_high_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_widen_high_v4i16, "vector_widen_high_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_widen_high_v8i8, "vector_widen_high_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_widen_high_v2u32, "vector_widen_high_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_widen_high_v4u16, "vector_widen_high_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_widen_high_v8u8, "vector_widen_high_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8) + +// vecTy2 vector_narrow_low(vecTy1 src) +// Narrow each element of the 128-bit source vector to half of the original width, +// then write it to the lower half of the destination vector. +DEF_MIR_INTRINSIC(vector_narrow_low_v2i64, "vector_narrow_low_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_narrow_low_v4i32, "vector_narrow_low_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_narrow_low_v8i16, "vector_narrow_low_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_narrow_low_v2u64, "vector_narrow_low_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_narrow_low_v4u32, "vector_narrow_low_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_narrow_low_v8u16, "vector_narrow_low_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U16) + +// vecTy2 vector_narrow_high(vecTy1 src) +// Narrow each element of the upper source vector to half of the original width, +// concatenate with the first 64-bit arg into a 128-bit destination vector. +DEF_MIR_INTRINSIC(vector_narrow_high_v2i64, "vector_narrow_high_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV2I32, kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_narrow_high_v4i32, "vector_narrow_high_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV4I16, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_narrow_high_v8i16, "vector_narrow_high_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV8I8, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_narrow_high_v2u64, "vector_narrow_high_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV2U32, kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_narrow_high_v4u32, "vector_narrow_high_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV4U16, kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_narrow_high_v8u16, "vector_narrow_high_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV8U8, kArgTyV8U16) + +// vecTy vector_pairwise_adalp(vecTy src1, vecTy2 src2) +// Pairwise add of src2 then accumulate into src1 as dest +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v8i8, "vector_pairwise_adalp_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v4i16, "vector_pairwise_adalp_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v2i32, "vector_pairwise_adalp_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v8u8, "vector_pairwise_adalp_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v4u16, "vector_pairwise_adalp_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v2u32, "vector_pairwise_adalp_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV1U64, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v16i8, "vector_pairwise_adalp_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v8i16, "vector_pairwise_adalp_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v4i32, "vector_pairwise_adalp_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v16u8, "vector_pairwise_adalp_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v8u16, "vector_pairwise_adalp_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v4u32, "vector_pairwise_adalp_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV4U32) + +// vecTy2 vector_pairwise_add(vecTy1 src) +// Add pairs of elements from the source vector and put the result into the +// destination vector, whose element size is twice and the number of +// elements is half of the source vector type. +DEF_MIR_INTRINSIC(vector_pairwise_add_v4i32, "vector_pairwise_add_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_pairwise_add_v8i16, "vector_pairwise_add_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_pairwise_add_v16i8, "vector_pairwise_add_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_pairwise_add_v4u32, "vector_pairwise_add_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_pairwise_add_v8u16, "vector_pairwise_add_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_pairwise_add_v16u8, "vector_pairwise_add_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_pairwise_add_v2i32, "vector_pairwise_add_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_pairwise_add_v4i16, "vector_pairwise_add_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_pairwise_add_v8i8, "vector_pairwise_add_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_pairwise_add_v2u32, "vector_pairwise_add_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_pairwise_add_v4u16, "vector_pairwise_add_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_pairwise_add_v8u8, "vector_pairwise_add_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV8U8) + +// vecTy vector_reverse(vecTy src) +// Create a vector by reversing the order of the elements in src. +DEF_MIR_INTRINSIC(vector_reverse_v2i64, "vector_reverse_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_reverse_v4i32, "vector_reverse_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_reverse_v8i16, "vector_reverse_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_reverse_v16i8, "vector_reverse_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_reverse_v2u64, "vector_reverse_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_reverse_v4u32, "vector_reverse_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_reverse_v8u16, "vector_reverse_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_reverse_v16u8, "vector_reverse_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_reverse_v2f64, "vector_reverse_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_reverse_v4f32, "vector_reverse_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_reverse_v1i64, "vector_reverse_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_reverse_v2i32, "vector_reverse_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_reverse_v4i16, "vector_reverse_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_reverse_v8i8, "vector_reverse_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_reverse_v1u64, "vector_reverse_v1u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV1U64) +DEF_MIR_INTRINSIC(vector_reverse_v2u32, "vector_reverse_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_reverse_v4u16, "vector_reverse_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_reverse_v8u8, "vector_reverse_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_reverse_v1f64, "vector_reverse_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_reverse_v2f32, "vector_reverse_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV2F32) + +// vector_reverse16 with 8-bit elements +DEF_MIR_INTRINSIC(vector_reverse16_v16u8, "vector_reverse16_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_reverse16_v16i8, "vector_reverse16_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_reverse16_v8u8, "vector_reverse16_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_reverse16_v8i8, "vector_reverse16_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8) + +// vector_reverse64 with 8-bit elements +DEF_MIR_INTRINSIC(vector_reverse64_v16u8, "vector_reverse64_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_reverse64_v16i8, "vector_reverse64_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_reverse64_v8u8, "vector_reverse64_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_reverse64_v8i8, "vector_reverse64_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8) + +// vector_reverse64 with 16-bit elements +DEF_MIR_INTRINSIC(vector_reverse64_v8u16, "vector_reverse64_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_reverse64_v8i16, "vector_reverse64_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_reverse64_v4u16, "vector_reverse64_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_reverse64_v4i16, "vector_reverse64_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16) + +// vector_reverse64 with 32-bit elements +DEF_MIR_INTRINSIC(vector_reverse64_v4u32, "vector_reverse64_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_reverse64_v4i32, "vector_reverse64_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_reverse64_v2u32, "vector_reverse64_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_reverse64_v2i32, "vector_reverse64_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32) + +// vecTy2 vector_shift_narrow_low(vecTy1 src, const int n) +// Shift each element in the vector right by n, narrow each element to half +// of the original width (truncating), then write the result to the lower +// half of the destination vector. +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v2i64, "vector_shr_narrow_low_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v4i32, "vector_shr_narrow_low_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v8i16, "vector_shr_narrow_low_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v2u64, "vector_shr_narrow_low_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v4u32, "vector_shr_narrow_low_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v8u16, "vector_shr_narrow_low_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U16, kArgTyI32) + +// scalarTy vector_sum(vecTy src) +// Sum all of the elements in the vector into a scalar. +DEF_MIR_INTRINSIC(vector_sum_v2i64, "vector_sum_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_sum_v4i32, "vector_sum_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_sum_v8i16, "vector_sum_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_sum_v16i8, "vector_sum_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_sum_v2u64, "vector_sum_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU64, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_sum_v4u32, "vector_sum_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_sum_v8u16, "vector_sum_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_sum_v16u8, "vector_sum_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_sum_v2f64, "vector_sum_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF64, + kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_sum_v4f32, "vector_sum_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF32, + kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_sum_v1i64, "vector_sum_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, + kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_sum_v2i32, "vector_sum_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_sum_v4i16, "vector_sum_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_sum_v8i8, "vector_sum_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI8, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_sum_v1u64, "vector_sum_v1u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU64, + kArgTyV1U64) +DEF_MIR_INTRINSIC(vector_sum_v2u32, "vector_sum_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_sum_v4u16, "vector_sum_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU16, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_sum_v8u8, "vector_sum_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU8, + kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_sum_v1f64, "vector_sum_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF64, + kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_sum_v2f32, "vector_sum_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF32, + kArgTyV2F32) + +// vecTy table_lookup(vecTy tbl, vecTy idx) +// Performs a table vector lookup. +DEF_MIR_INTRINSIC(vector_table_lookup_v2i64, "vector_table_lookup_v2i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_table_lookup_v4i32, "vector_table_lookup_v4i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_table_lookup_v8i16, "vector_table_lookup_v8i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_table_lookup_v16i8, "vector_table_lookup_v16i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_table_lookup_v2u64, "vector_table_lookup_v2u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_table_lookup_v4u32, "vector_table_lookup_v4u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_table_lookup_v8u16, "vector_table_lookup_v8u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_table_lookup_v16u8, "vector_table_lookup_v16u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_table_lookup_v2f64, "vector_table_lookup_v2f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyV2F64, kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_table_lookup_v4f32, "vector_table_lookup_v4f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyV4F32, kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_table_lookup_v1i64, "vector_table_lookup_v1i64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64, kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_table_lookup_v2i32, "vector_table_lookup_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_table_lookup_v4i16, "vector_table_lookup_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_table_lookup_v8i8, "vector_table_lookup_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_table_lookup_v1u64, "vector_table_lookup_v1u64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV1U64, kArgTyV1U64) +DEF_MIR_INTRINSIC(vector_table_lookup_v2u32, "vector_table_lookup_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_table_lookup_v4u16, "vector_table_lookup_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_table_lookup_v8u8, "vector_table_lookup_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_table_lookup_v1f64, "vector_table_lookup_v1f64", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV1F64, kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_table_lookup_v2f32, "vector_table_lookup_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV2F32, kArgTyV2F32) + +// vecArrTy vector_zip(vecTy a, vecTy b) +// Interleave the upper half of elements from a and b into the destination +// vector. +DEF_MIR_INTRINSIC(vector_zip_v2i32, "vector_zip_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_zip_v4i16, "vector_zip_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_zip_v8i8, "vector_zip_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_zip_v2u32, "vector_zip_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_zip_v4u16, "vector_zip_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_zip_v8u8, "vector_zip_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_zip_v2f32, "vector_zip_v2f32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV2F32, kArgTyV2F32) + +// vecTy vector_load(scalarTy *ptr) +// Load the elements pointed to by ptr into a vector. +DEF_MIR_INTRINSIC(vector_load_v2i64, "vector_load_v2i64", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4i32, "vector_load_v4i32", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v8i16, "vector_load_v8i16", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v16i8, "vector_load_v16i8", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2u64, "vector_load_v2u64", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4u32, "vector_load_v4u32", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v8u16, "vector_load_v8u16", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v16u8, "vector_load_v16u8", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2f64, "vector_load_v2f64", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4f32, "vector_load_v4f32", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v1i64, "vector_load_v1i64", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2i32, "vector_load_v2i32", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4i16, "vector_load_v4i16", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v8i8, "vector_load_v8i8", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v1u64, "vector_load_v1u64", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2u32, "vector_load_v2u32", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4u16, "vector_load_v4u16", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v8u8, "vector_load_v8u8", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v1f64, "vector_load_v1f64", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2f32, "vector_load_v2f32", + INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyPtr) + +// void vector_store(scalarTy *ptr, vecTy src) +// Store the elements from src into the memory pointed to by ptr. +DEF_MIR_INTRINSIC(vector_store_v2i64, "vector_store_v2i64", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_store_v4i32, "vector_store_v4i32", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_store_v8i16, "vector_store_v8i16", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_store_v16i8, "vector_store_v16i8", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_store_v2u64, "vector_store_v2u64", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_store_v4u32, "vector_store_v4u32", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_store_v8u16, "vector_store_v8u16", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_store_v16u8, "vector_store_v16u8", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_store_v2f64, "vector_store_v2f64", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_store_v4f32, "vector_store_v4f32", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_store_v1i64, "vector_store_v1i64", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_store_v2i32, "vector_store_v2i32", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_store_v4i16, "vector_store_v4i16", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_store_v8i8, "vector_store_v8i8", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_store_v1u64, "vector_store_v1u64", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV1U64) +DEF_MIR_INTRINSIC(vector_store_v2u32, "vector_store_v2u32", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_store_v4u16, "vector_store_v4u16", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_store_v8u8, "vector_store_v8u8", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_store_v1f64, "vector_store_v1f64", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_store_v2f32, "vector_store_v2f32", INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2F32) + +// vecTy vector_subl_low(vecTy src1, vecTy src2) +// Subtract each element of the source vector to second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_subl_low_v8i8, "vector_subl_low_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_subl_low_v4i16, "vector_subl_low_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_subl_low_v2i32, "vector_subl_low_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_subl_low_v8u8, "vector_subl_low_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_subl_low_v4u16, "vector_subl_low_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_subl_low_v2u32, "vector_subl_low_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) + +// vecTy vector_subl_high(vecTy src1, vecTy src2) +// Subtract each element of the source vector to upper half of second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_subl_high_v8i8, "vector_subl_high_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_subl_high_v4i16, "vector_subl_high_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_subl_high_v2i32, "vector_subl_high_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_subl_high_v8u8, "vector_subl_high_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_subl_high_v4u16, "vector_subl_high_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_subl_high_v2u32, "vector_subl_high_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32, kArgTyV4U32) + +// vecTy vector_subw_low(vecTy src1, vecTy src2) +// Subtract each element of the source vector to second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_subw_low_v8i8, "vector_subw_low_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_subw_low_v4i16, "vector_subw_low_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_subw_low_v2i32, "vector_subw_low_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_subw_low_v8u8, "vector_subw_low_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_subw_low_v4u16, "vector_subw_low_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_subw_low_v2u32, "vector_subw_low_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U32) + +// vecTy vector_subw_high(vecTy src1, vecTy src2) +// Subtract each element of the source vector to upper half of second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_subw_high_v8i8, "vector_subw_high_v8i8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_subw_high_v4i16, "vector_subw_high_v4i16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_subw_high_v2i32, "vector_subw_high_v2i32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_subw_high_v8u8, "vector_subw_high_v8u8", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_subw_high_v4u16, "vector_subw_high_v4u16", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_subw_high_v2u32, "vector_subw_high_v2u32", + INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV4U32) diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsics.def b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsics.def new file mode 100644 index 0000000000000000000000000000000000000000..43175408e2d5a12d0b6ff222134813a59d387136 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsics.def @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) +DEF_MIR_INTRINSIC(UNDEFINED,\ + nullptr, kIntrnUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(DEX_ATOMIC_INC,\ + "__dex_ainc", kIntrnIsAtomic, kArgTyI32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(DEX_ATOMIC_DEC,\ + "__dex_adec", kIntrnIsAtomic, kArgTyI32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_ATOMIC_EXCHANGE_PTR,\ + "__mpl_atomic_exchange_ptr", kIntrnIsAtomic, kArgTyPtr, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_CLINIT_CHECK,\ + "__mpl_clinit_check", INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_PROF_COUNTER_INC,\ + "__mpl_prof_counter_inc", INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyVoid, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_CLEAR_STACK,\ + "__mpl_clear_stack", kIntrnUndef, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_GET_VTAB_FUNC,\ + "MCC_getFuncPtrFromVtab", kIntrnUndef, kArgTyA64, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_STATIC_OFFSET_TAB,\ + "__mpl_read_static_offset", INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY,\ + "__mpl_const_offset", INTRNISPURE, kArgTyA32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY2,\ + "__mpl_const_offset2", INTRNISPURE, kArgTyA32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY_LAZY,\ + "__mpl_const_offset_lazy", INTRNNOSIDEEFFECT, kArgTyA32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY_VTAB_LAZY,\ + "__mpl_const_offset_vtab_lazy", INTRNISPURE, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY_FIELD_LAZY,\ + "__mpl_const_offset_field_lazy", INTRNISPURE, kArgTyA32, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_BOUNDARY_CHECK,\ + "", INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyU1, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_ARRAYCLASS_CACHE_ENTRY,\ + "__mpl_const_arrayclass_cache", kIntrnUndef, kArgTyPtr, kArgTyU32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) + +// start of RC Intrinsics with one parameters +DEF_MIR_INTRINSIC(MCCSetPermanent,\ + "MCC_SetObjectPermanent", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCIncRef,\ + "MCC_IncRef_NaiveRCFast", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCDecRef,\ + "MCC_DecRef_NaiveRCFast", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCDecRefReset,\ + "MCC_ClearLocalStackRef", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(MCCLoadRefSVol,\ + "MCC_LoadVolatileStaticField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCLoadRefS,\ + "MCC_LoadRefStatic", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCSetObjectPermanent,\ + "MCC_SetObjectPermanent", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) + +// start of RC Intrinsics with two parameters +DEF_MIR_INTRINSIC(MCCCheck,\ + "MCC_CheckRefCount", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyU32) +DEF_MIR_INTRINSIC(MCCCheckArrayStore,\ + "MCC_Reflect_Check_Arraystore", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCIncDecRef,\ + "MCC_IncDecRef_NaiveRCFast", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCIncDecRefReset,\ + "MCC_IncDecRefReset", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyPtr) +DEF_MIR_INTRINSIC(MCCDecRefResetPair,\ + "MCC_DecRefResetPair", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyPtr, kArgTyPtr) +DEF_MIR_INTRINSIC(MCCLoadWeakVol,\ + "MCC_LoadVolatileWeakField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCLoadWeak,\ + "MCC_LoadWeakField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCLoadRef,\ + "MCC_LoadRefField_NaiveRCFast", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCLoadRefVol,\ + "MCC_LoadVolatileField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteReferent,\ + "MCC_WriteReferent", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSVolNoInc,\ + "MCC_WriteVolatileStaticFieldNoInc", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSVolNoDec,\ + "MCC_WriteVolatileStaticFieldNoDec", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSVolNoRC,\ + "MCC_WriteVolatileStaticFieldNoRC", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSVol,\ + "MCC_WriteVolatileStaticField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSNoInc,\ + "MCC_WriteRefFieldStaticNoInc", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSNoDec,\ + "MCC_WriteRefFieldStaticNoDec", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSNoRC,\ + "MCC_WriteRefFieldStaticNoRC", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteS,\ + "MCC_WriteRefFieldStatic", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) + +// start of RC intrinsics with three parameters +DEF_MIR_INTRINSIC(MCCWriteVolNoInc,\ + "MCC_WriteVolatileFieldNoInc", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteVolNoDec,\ + "MCC_WriteVolatileFieldNoDec", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteVolNoRC,\ + "MCC_WriteVolatileFieldNoRC", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteVol,\ + "MCC_WriteVolatileField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteNoInc,\ + "MCC_WriteRefFieldNoInc", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteNoDec,\ + "MCC_WriteRefFieldNoDec", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteNoRC,\ + "MCC_WriteRefFieldNoRC", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWrite,\ + "MCC_WriteRefField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteVolWeak,\ + "MCC_WriteVolatileWeakField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteWeak,\ + "MCC_WriteWeakField", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) + +DEF_MIR_INTRINSIC(MPL_CLEANUP_LOCALREFVARS,\ + "__mpl_cleanup_localrefvars", INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyUndef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MPL_CLEANUP_LOCALREFVARS_SKIP,\ + "__mpl_cleanup_localrefvars_skip", INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyUndef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MPL_MEMSET_LOCALVAR,\ + "", kIntrnUndef, kArgTyPtr, kArgTyU32, kArgTyU8, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_SET_CLASS,\ + "", kIntrnUndef, kArgTyPtr, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_CLEANUP_NORETESCOBJS,\ + "__mpl_cleanup_noretescobjs", INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyUndef, kArgTyRef, kArgTyRef,\ + kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) + +// start of GC Intrinsics +DEF_MIR_INTRINSIC(MCCGCCheck,\ + "MCC_CheckObjAllocated", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) + +// start of Profile Intrinsics +DEF_MIR_INTRINSIC(MCCSaveProf,\ + "MCC_SaveProfile", INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) + +#include "intrinsic_java.def" +#include "simplifyintrinsics.def" +#include "intrinsic_c.def" +#include "intrinsic_js.def" +#include "intrinsic_js_eng.def" +#include "dex2mpl/dexintrinsic.def" +#include "intrinsic_dai.def" +#include "intrinsic_vector.def" +DEF_MIR_INTRINSIC(LAST,\ + nullptr, kIntrnUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsics.h b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..5268e7ba40ef76435374fdff74fa54899546342f --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/intrinsics.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_INTRINSICS_H +#define MAPLE_IR_INCLUDE_INTRINSICS_H +#include "prim_types.h" +#include "intrinsic_op.h" + +namespace maple { +enum IntrinProperty { + kIntrnUndef, + kIntrnIsJs, + kIntrnIsJsUnary, + kIntrnIsJsBinary, + kIntrnIsJava, + kIntrnIsJavaUnary, + kIntrnIsJavaBinary, + kIntrnIsReturnStruct, + kIntrnNoSideEffect, + kIntrnIsLoadMem, + kIntrnIsPure, + kIntrnNeverReturn, + kIntrnIsAtomic, + kIntrnIsRC, + kIntrnIsSpecial, + kIntrnIsVector +}; + +enum IntrinArgType { + kArgTyUndef, + kArgTyVoid, + kArgTyI8, + kArgTyI16, + kArgTyI32, + kArgTyI64, + kArgTyU8, + kArgTyU16, + kArgTyU32, + kArgTyU64, + kArgTyU1, + kArgTyPtr, + kArgTyRef, + kArgTyA32, + kArgTyA64, + kArgTyF32, + kArgTyF64, + kArgTyF128, + kArgTyC64, + kArgTyC128, + kArgTyAgg, + kArgTyV2I64, + kArgTyV4I32, + kArgTyV8I16, + kArgTyV16I8, + kArgTyV2U64, + kArgTyV4U32, + kArgTyV8U16, + kArgTyV16U8, + kArgTyV2F64, + kArgTyV4F32, + kArgTyV1I64, + kArgTyV2I32, + kArgTyV4I16, + kArgTyV8I8, + kArgTyV1U64, + kArgTyV2U32, + kArgTyV4U16, + kArgTyV8U8, + kArgTyV1F64, + kArgTyV2F32, +#ifdef DYNAMICLANG + kArgTyDynany, + kArgTyDynu32, + kArgTyDyni32, + kArgTyDynundef, + kArgTyDynnull, + kArgTyDynhole, + kArgTyDynbool, + kArgTyDynf64, + kArgTyDynf32, + kArgTySimplestr, + kArgTyDynstr, + kArgTySimpleobj, + kArgTyDynobj +#endif +}; + +constexpr uint32 INTRNISJS = 1U << kIntrnIsJs; +constexpr uint32 INTRNISJSUNARY = 1U << kIntrnIsJsUnary; +constexpr uint32 INTRNISJSBINARY = 1U << kIntrnIsJsBinary; +constexpr uint32 INTRNISJAVA = 1U << kIntrnIsJava; +constexpr uint32 INTRNNOSIDEEFFECT = 1U << kIntrnNoSideEffect; +constexpr uint32 INTRNRETURNSTRUCT = 1U << kIntrnIsReturnStruct; +constexpr uint32 INTRNLOADMEM = 1U << kIntrnIsLoadMem; +constexpr uint32 INTRNISPURE = 1U << kIntrnIsPure; +constexpr uint32 INTRNNEVERRETURN = 1U << kIntrnNeverReturn; +constexpr uint32 INTRNATOMIC = 1U << kIntrnIsAtomic; +constexpr uint32 INTRNISRC = 1U << kIntrnIsRC; +constexpr uint32 INTRNISSPECIAL = 1U << kIntrnIsSpecial; +constexpr uint32 INTRNISVECTOR = 1U << kIntrnIsVector; +class MIRType; // circular dependency exists, no other choice +class MIRModule; // circular dependency exists, no other choice +struct IntrinDesc { + static constexpr int kMaxArgsNum = 7; + const char *name; + uint32 properties; + IntrinArgType argTypes[1 + kMaxArgsNum]; // argTypes[0] is the return type + bool IsJS() const + { + return static_cast(properties & INTRNISJS); + } + + bool IsJava() const + { + return static_cast(properties & INTRNISJAVA); + } + + bool IsJsUnary() const + { + return static_cast(properties & INTRNISJSUNARY); + } + + bool IsJsBinary() const + { + return static_cast(properties & INTRNISJSBINARY); + } + + bool IsJsOp() const + { + return static_cast(properties & INTRNISJSUNARY) || static_cast(properties & INTRNISJSBINARY); + } + + bool IsLoadMem() const + { + return static_cast(properties & INTRNLOADMEM); + } + + bool IsJsReturnStruct() const + { + return static_cast(properties & INTRNRETURNSTRUCT); + } + + bool IsPure() const + { + return static_cast(properties & INTRNISPURE); + } + + bool IsNeverReturn() const + { + return static_cast(properties & INTRNNEVERRETURN); + } + + bool IsAtomic() const + { + return static_cast(properties & INTRNATOMIC); + } + + bool IsRC() const + { + return static_cast(properties & INTRNISRC); + } + + bool IsSpecial() const + { + return static_cast(properties & INTRNISSPECIAL); + } + + bool HasNoSideEffect() const + { + return properties & INTRNNOSIDEEFFECT; + } + + bool IsVectorOp() const + { + return static_cast(properties & INTRNISVECTOR); + } + + MIRType *GetReturnType() const; + MIRType *GetArgType(uint32 index) const; + MIRType *GetTypeFromArgTy(IntrinArgType argType) const; + static MIRType *jsValueType; + static MIRModule *mirModule; + static void InitMIRModule(MIRModule *mirModule); + static MIRType *GetOrCreateJSValueType(); + static IntrinDesc intrinTable[INTRN_LAST + 1]; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_INTRINSICS_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/ir_safe_cast_traits.def b/ecmascript/compiler/codegen/maple/maple_ir/include/ir_safe_cast_traits.def new file mode 100644 index 0000000000000000000000000000000000000000..dc4714613880b712e8e5e2295f5067ea409f6e9e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/ir_safe_cast_traits.def @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "opcode_info.h" + +namespace maple { +#ifdef LOAD_SAFE_CAST_FOR_MIR_CONST +#undef LOAD_SAFE_CAST_FOR_MIR_CONST +REGISTER_SAFE_CAST(MIRIntConst, from.GetKind() == kConstInt); +REGISTER_SAFE_CAST(MIRAddrofConst, from.GetKind() == kConstAddrof); +REGISTER_SAFE_CAST(MIRAddroffuncConst, from.GetKind() == kConstAddrofFunc); +REGISTER_SAFE_CAST(MIRLblConst, from.GetKind() == kConstLblConst); +REGISTER_SAFE_CAST(MIRStrConst, from.GetKind() == kConstStrConst); +REGISTER_SAFE_CAST(MIRStr16Const, from.GetKind() == kConstStr16Const); +REGISTER_SAFE_CAST(MIRFloatConst, from.GetKind() == kConstFloatConst); +REGISTER_SAFE_CAST(MIRDoubleConst, from.GetKind() == kConstDoubleConst); +REGISTER_SAFE_CAST(MIRFloat128Const, from.GetKind() == kConstFloat128Const); +REGISTER_SAFE_CAST(MIRAggConst, from.GetKind() == kConstAggConst); +REGISTER_SAFE_CAST(MIRStConst, from.GetKind() == kConstStConst); +#endif + +#ifdef LOAD_SAFE_CAST_FOR_MIR_TYPE +#undef LOAD_SAFE_CAST_FOR_MIR_TYPE +REGISTER_SAFE_CAST(MIRPtrType, from.GetKind() == kTypePointer); +REGISTER_SAFE_CAST(MIRArrayType, from.GetKind() == kTypeArray); +REGISTER_SAFE_CAST(MIRFarrayType, from.GetKind() == kTypeFArray || + instance_of(from)); +REGISTER_SAFE_CAST(MIRStructType, from.GetKind() == kTypeStruct || + from.GetKind() == kTypeStructIncomplete || + from.GetKind() == kTypeUnion || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(MIRJarrayType, from.GetKind() == kTypeJArray); +REGISTER_SAFE_CAST(MIRClassType, from.GetKind() == kTypeClass || + from.GetKind() == kTypeClassIncomplete); +REGISTER_SAFE_CAST(MIRInterfaceType, from.GetKind() == kTypeInterface || + from.GetKind() == kTypeInterfaceIncomplete); +REGISTER_SAFE_CAST(MIRBitFieldType, from.GetKind() == kTypeBitField); +REGISTER_SAFE_CAST(MIRFuncType, from.GetKind() == kTypeFunction); +REGISTER_SAFE_CAST(MIRTypeByName, from.GetKind() == kTypeByName); +REGISTER_SAFE_CAST(MIRTypeParam, from.GetKind() == kTypeParam); +REGISTER_SAFE_CAST(MIRInstantVectorType, from.GetKind() == kTypeInstantVector); +REGISTER_SAFE_CAST(MIRGenericInstantType, from.GetKind() == kTypeGenericInstant); +#endif + +#ifdef LOAD_SAFE_CAST_FOR_MIR_NODE +#undef LOAD_SAFE_CAST_FOR_MIR_NODE +REGISTER_SAFE_CAST(UnaryNode, from.GetOpCode() == OP_abs || + from.GetOpCode() == OP_bnot || + from.GetOpCode() == OP_lnot || + from.GetOpCode() == OP_neg || + from.GetOpCode() == OP_recip || + from.GetOpCode() == OP_sqrt || + from.GetOpCode() == OP_alloca || + from.GetOpCode() == OP_malloc || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(TypeCvtNode, from.GetOpCode() == OP_ceil || + from.GetOpCode() == OP_cvt || + from.GetOpCode() == OP_floor || + from.GetOpCode() == OP_round || + from.GetOpCode() == OP_trunc || + instance_of(from)); +REGISTER_SAFE_CAST(RetypeNode, from.GetOpCode() == OP_retype); +REGISTER_SAFE_CAST(ExtractbitsNode, from.GetOpCode() == OP_extractbits || + from.GetOpCode() == OP_sext || + from.GetOpCode() == OP_zext); +REGISTER_SAFE_CAST(GCMallocNode, from.GetOpCode() == OP_gcmalloc || + from.GetOpCode() = OP_gcpermalloc); +REGISTER_SAFE_CAST(JarrayMallocNode, from.GetOpCode() == OP_gcmallocjarray || + from.GetOpCode() = OP_gcpermallocjarray); +REGISTER_SAFE_CAST(IreadNode, from.GetOpCode() == OP_iread || + from.GetOpCode() = OP_iaddrof); +REGISTER_SAFE_CAST(IreadoffNode, from.GetOpCode() == OP_ireadoff); +REGISTER_SAFE_CAST(IreadFPoffNode, from.GetOpCode() == OP_ireadfpoff); +REGISTER_SAFE_CAST(BinaryNode, from.GetOpCode() == OP_add || + from.GetOpCode() == OP_sub || + from.GetOpCode() == OP_mul || + from.GetOpCode() == OP_div || + from.GetOpCode() == OP_rem || + from.GetOpCode() == OP_ashr || + from.GetOpCode() == OP_lshr || + from.GetOpCode() == OP_shl || + from.GetOpCode() == OP_max || + from.GetOpCode() == OP_min || + from.GetOpCode() == OP_band || + from.GetOpCode() == OP_bior || + from.GetOpCode() == OP_bxor || + from.GetOpCode() == OP_CG_array_elem_add || + from.GetOpCode() == OP_land || + from.GetOpCode() == OP_lior || + from.GetOpCode() == OP_cand || + from.GetOpCode() == OP_cior || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(CompareNode, from.GetOpCode() == OP_eq || + from.GetOpCode() = OP_ge || + from.GetOpCode() = OP_gt || + from.GetOpCode() = OP_le || + from.GetOpCode() = OP_lt || + from.GetOpCode() = OP_ne || + from.GetOpCode() = OP_cmp || + from.GetOpCode() = OP_cmpl || + from.GetOpCode() = OP_cmpg); +REGISTER_SAFE_CAST(DepositbitsNode, from.GetOpCode() == OP_depositbits); +REGISTER_SAFE_CAST(ResolveFuncNode, from.GetOpCode() == OP_resolveinterfacefunc || + from.GetOpCode() == OP_resolvevirtualfunc); +REGISTER_SAFE_CAST(TernaryNode, from.GetOpCode() == OP_select); +REGISTER_SAFE_CAST(NaryNode, instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(IntrinsicopNode, from.GetOpCode() == OP_intrinsicop || + from.GetOpCode() == OP_intrinsicopwithtype); +REGISTER_SAFE_CAST(ConstvalNode, from.GetOpCode() == OP_constval); +REGISTER_SAFE_CAST(ConststrNode, from.GetOpCode() == OP_conststr); +REGISTER_SAFE_CAST(Conststr16Node, from.GetOpCode() == OP_conststr16); +REGISTER_SAFE_CAST(SizeoftypeNode, from.GetOpCode() == OP_sizeoftype); +REGISTER_SAFE_CAST(FieldsDistNode, from.GetOpCode() == OP_fieldsdist); +REGISTER_SAFE_CAST(ArrayNode, from.GetOpCode() == OP_array); +REGISTER_SAFE_CAST(AddrofNode, from.GetOpCode() == OP_dread || + from.GetOpCode() == OP_addrof); +REGISTER_SAFE_CAST(RegreadNode, from.GetOpCode() == OP_regread); +REGISTER_SAFE_CAST(AddroffuncNode, from.GetOpCode() == OP_addroffunc); +REGISTER_SAFE_CAST(AddroflabelNode, from.GetOpCode() == OP_addroflabel); +REGISTER_SAFE_CAST(StmtNode, from.GetOpCode() == OP_finally || + from.GetOpCode() == OP_cleanuptry || + from.GetOpCode() == OP_endtry || + from.GetOpCode() == OP_retsub || + from.GetOpCode() == OP_membaracquire || + from.GetOpCode() == OP_membarrelease || + from.GetOpCode() == OP_membarstoreload || + from.GetOpCode() == OP_membarstorestore || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(IassignNode, from.GetOpCode() == OP_iassign); +REGISTER_SAFE_CAST(GotoNode, from.GetOpCode() == OP_goto || + from.GetOpCode() == OP_gosub); +REGISTER_SAFE_CAST(JsTryNode, from.GetOpCode() == OP_jstry); +REGISTER_SAFE_CAST(TryNode, from.GetOpCode() == OP_try); +REGISTER_SAFE_CAST(CatchNode, from.GetOpCode() == OP_catch); +REGISTER_SAFE_CAST(SwitchNode, from.GetOpCode() == OP_switch); +REGISTER_SAFE_CAST(MultiwayNode, from.GetOpCode() == OP_multiway); +REGISTER_SAFE_CAST(UnaryStmtNode, from.GetOpCode() == OP_eval || + from.GetOpCode() == OP_throw || + from.GetOpCode() == OP_free || + from.GetOpCode() == OP_decref || + from.GetOpCode() == OP_incref || + from.GetOpCode() == OP_decrefreset || + (kOpcodeInfo.IsAssertNonnull(from.GetOpCode()) && + !kOpcodeInfo.IsCallAssertNonnull(from.GetOpCode())) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(CallAssertNonnullStmtNode, from.GetOpCode() == OP_callassertnonnull); +REGISTER_SAFE_CAST(DassignNode, from.GetOpCode() == OP_dassign || + from.GetOpCode() == OP_maydassign); +REGISTER_SAFE_CAST(RegassignNode, from.GetOpCode() == OP_regassign); +REGISTER_SAFE_CAST(CondGotoNode, from.GetOpCode() == OP_brtrue || + from.GetOpCode() == OP_brfalse); +REGISTER_SAFE_CAST(RangeGotoNode, from.GetOpCode() == OP_rangegoto); +REGISTER_SAFE_CAST(BlockNode, from.GetOpCode() == OP_block); +REGISTER_SAFE_CAST(IfStmtNode, from.GetOpCode() == OP_if); +REGISTER_SAFE_CAST(WhileStmtNode, from.GetOpCode() == OP_while || + from.GetOpCode() == OP_dowhile); +REGISTER_SAFE_CAST(DoloopNode, from.GetOpCode() == OP_doloop); +REGISTER_SAFE_CAST(ForeachelemNode, from.GetOpCode() == OP_foreachelem); +REGISTER_SAFE_CAST(BinaryStmtNode, from.GetOpCode() == OP_assertge || + from.GetOpCode() == OP_assertlt || + instance_of(from)); +REGISTER_SAFE_CAST(IassignoffNode, from.GetOpCode() == OP_iassignoff); +REGISTER_SAFE_CAST(IassignFPoffNode, from.GetOpCode() == OP_iassignfpoff); +REGISTER_SAFE_CAST(NaryStmtNode, from.GetOpCode() == OP_return || + from.GetOpCode() == OP_syncenter || + from.GetOpCode() == OP_syncexit || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(CallNode, from.GetOpCode() == OP_call || + from.GetOpCode() == OP_virtualcall || + from.GetOpCode() == OP_superclasscall || + from.GetOpCode() == OP_interfacecall || + from.GetOpCode() == OP_customcall || + from.GetOpCode() == OP_polymorphiccall || + from.GetOpCode() == OP_interfaceicall || + from.GetOpCode() == OP_virtualicall || + from.GetOpCode() == OP_callassigned || + from.GetOpCode() == OP_virtualcallassigned || + from.GetOpCode() == OP_superclasscallassigned || + from.GetOpCode() == OP_interfacecallassigned || + from.GetOpCode() == OP_customcallassigned || + from.GetOpCode() == OP_polymorphiccallassigned || + from.GetOpCode() == OP_interfaceicallassigned || + from.GetOpCode() == OP_virtualicallassigned || + instance_of(from)); +REGISTER_SAFE_CAST(IcallNode, from.GetOpCode() == OP_icall || + from.GetOpCode() == OP_icallassigned || + from.GetOpCode() == OP_icallproto || + from.GetOpCode() == OP_icallprotoassigned); +REGISTER_SAFE_CAST(IntrinsiccallNode, from.GetOpCode() == OP_intrinsiccall || + from.GetOpCode() == OP_intrinsiccallwithtype || + from.GetOpCode() == OP_xintrinsiccall || + from.GetOpCode() == OP_intrinsiccallassigned || + from.GetOpCode() == OP_intrinsiccallwithtypeassigned || + from.GetOpCode() == OP_xintrinsiccallassigned); +REGISTER_SAFE_CAST(CallinstantNode, from.GetOpCode() == OP_callinstant || + from.GetOpCode() == OP_virtualcallinstant || + from.GetOpCode() == OP_superclasscallinstant || + from.GetOpCode() == OP_interfacecallinstant || + from.GetOpCode() == OP_callinstantassigned || + from.GetOpCode() == OP_virtualcallinstantassigned || + from.GetOpCode() == OP_superclasscallinstantassigned || + from.GetOpCode() == OP_interfacecallinstantassigned); +REGISTER_SAFE_CAST(LabelNode, from.GetOpCode() == OP_label); +REGISTER_SAFE_CAST(CommentNode, from.GetOpCode() == OP_comment); +#endif +} diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/java_eh_lower.h b/ecmascript/compiler/codegen/maple/maple_ir/include/java_eh_lower.h new file mode 100644 index 0000000000000000000000000000000000000000..759a36564cdb334afc830322c3edcae65853f389 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/java_eh_lower.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_JAVA_EH_LOWER_H +#define MAPLE_IR_INCLUDE_JAVA_EH_LOWER_H +#include "phase_impl.h" +#include "class_hierarchy.h" +#include "maple_phase_manager.h" + +namespace maple { +class JavaEHLowerer : public FuncOptimizeImpl { +public: + JavaEHLowerer(MIRModule &mod, KlassHierarchy *kh, bool dump) : FuncOptimizeImpl(mod, kh, dump) {} + ~JavaEHLowerer() = default; + + FuncOptimizeImpl *Clone() override + { + return new JavaEHLowerer(*this); + } + + void ProcessFunc(MIRFunction *func) override; + +private: + BlockNode *DoLowerBlock(BlockNode &); + BaseNode *DoLowerExpr(BaseNode &, BlockNode &); + BaseNode *DoLowerDiv(BinaryNode &, BlockNode &); + void DoLowerBoundaryCheck(IntrinsiccallNode &, BlockNode &); + BaseNode *DoLowerRem(BinaryNode &expr, BlockNode &blkNode) + { + return DoLowerDiv(expr, blkNode); + } + + uint32 divSTIndex = 0; // The index of divide operand and result. + bool useRegTmp = Options::usePreg; // Use register to save temp variable or not. +}; + +MAPLE_MODULE_PHASE_DECLARE(M2MJavaEHLowerer) +} // namespace maple +#endif // MAPLE_IR_INCLUDE_JAVA_EH_LOWER_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/keywords.def b/ecmascript/compiler/codegen/maple/maple_ir/include/keywords.def new file mode 100644 index 0000000000000000000000000000000000000000..8cfec8e416c614dcbb121f77de28c49c297966bb --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/keywords.def @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + // opcode keywords +#define OPCODE(X, Y, Z, S) KEYWORD(X) +#include "opcodes.def" +#undef OPCODE + // primitive types +#define LOAD_ALGO_PRIMARY_TYPE +#define PRIMTYPE(P) KEYWORD(P) +#include "prim_types.def" +#undef PRIMTYPE + // intrinsic names +#undef DEF_MIR_INTRINSIC +#define DEF_MIR_INTRINSIC(X, NAME, INTRN_CLASS, RETURN_TYPE, ...) KEYWORD(X) +#include "intrinsics.def" +#undef DEF_MIR_INTRINSIC + KEYWORD(else) + // declaration keywords + KEYWORD(var) + KEYWORD(tempvar) + KEYWORD(reg) + KEYWORD(type) + KEYWORD(func) + KEYWORD(struct) + KEYWORD(structincomplete) + KEYWORD(union) + KEYWORD(class) + KEYWORD(classincomplete) + KEYWORD(interfaceincomplete) + KEYWORD(javaclass) + KEYWORD(javainterface) + // type attribute keywords +#define FUNC_ATTR +#define TYPE_ATTR +#define FIELD_ATTR +#define ATTR(X) KEYWORD(X) +#include "all_attributes.def" +#undef ATTR +#undef FUNC_ATTR +#undef TYPE_ATTR +#undef FIELD_ATTR + KEYWORD(align) + // per-function declaration keywords + KEYWORD(framesize) + KEYWORD(upformalsize) + KEYWORD(moduleid) + KEYWORD(funcsize) + KEYWORD(funcid) + KEYWORD(formalwordstypetagged) + KEYWORD(localwordstypetagged) + KEYWORD(formalwordsrefcounted) + KEYWORD(localwordsrefcounted) + // per-module declaration keywords + KEYWORD(flavor) + KEYWORD(srclang) + KEYWORD(globalmemsize) + KEYWORD(globalmemmap) + KEYWORD(globalwordstypetagged) + KEYWORD(globalwordsrefcounted) + KEYWORD(id) + KEYWORD(numfuncs) + KEYWORD(entryfunc) + // file related declaration keywords + KEYWORD(fileinfo) + KEYWORD(filedata) + KEYWORD(srcfileinfo) + KEYWORD(funcinfo) + // special float constants + KEYWORD(nanf) + KEYWORD(nan) + KEYWORD(inff) + KEYWORD(inf) + // pragma + KEYWORD(pragma) + KEYWORD(param) + KEYWORD(func_ex) + KEYWORD(func_var) + // staticvalue + KEYWORD(staticvalue) + // import + KEYWORD(import) + KEYWORD(importpath) + // source position information + KEYWORD(LOC) + // scope and source var to mpl var mapping + KEYWORD(SCOPE) + KEYWORD(ALIAS) + // storage class + KEYWORD(pstatic) + KEYWORD(fstatic) + // file-scope asm + KEYWORD(asmdecl) diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/lexer.h b/ecmascript/compiler/codegen/maple/maple_ir/include/lexer.h new file mode 100644 index 0000000000000000000000000000000000000000..8560a131000cee595cad94de11bdaa523a231126 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/lexer.h @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_LEXER_H +#define MAPLE_IR_INCLUDE_LEXER_H +#include "cstdio" +#include +#include "types_def.h" +#include "tokens.h" +#include "mempool_allocator.h" +#include "mir_module.h" + +namespace maple { +class MIRParser; // circular dependency exists, no other choice +class MIRLexer { + friend MIRParser; + +public: + explicit MIRLexer(MIRModule &mod); + ~MIRLexer() + { + airFile = nullptr; + if (airFileInternal.is_open()) { + airFileInternal.close(); + } + } + + void PrepareForFile(const std::string &filename); + void PrepareForString(const std::string &src); + TokenKind NextToken(); + TokenKind LexToken(); + TokenKind GetTokenKind() const + { + return kind; + } + + uint32 GetLineNum() const + { + return lineNum; + } + + uint32 GetCurIdx() const + { + return curIdx; + } + + // get the identifier name after the % or $ prefix + const std::string &GetName() const + { + return name; + } + + uint64 GetTheIntVal() const + { + return theIntVal; + } + + float GetTheFloatVal() const + { + return theFloatVal; + } + + double GetTheDoubleVal() const + { + return theDoubleVal; + } + + std::string GetTokenString() const; // for error reporting purpose + +private: + MIRModule &module; + // for storing the different types of constant values + uint64 theIntVal = 0; // also indicates preg number under TK_preg + float theFloatVal = 0.0; + double theDoubleVal = 0.0; + MapleVector seenComments; + std::ifstream *airFile = nullptr; + std::ifstream airFileInternal; + std::string line; + size_t lineBufSize = 0; // the allocated size of line(buffer). + uint32 currentLineSize = 0; + uint32 curIdx = 0; + uint32 lineNum = 0; + TokenKind kind = TK_invalid; + std::string name = ""; // store the name token without the % or $ prefix + MapleUnorderedMap keywordMap; + std::queue mirQueue; + bool needFile = true; + void RemoveReturnInline(std::string &line) + { + if (line.empty()) { + return; + } + if (line.back() == '\n') { + line.pop_back(); + } + if (line.back() == '\r') { + line.pop_back(); + } + } + + int ReadALine(); // read a line from MIR (text) file. + int ReadALineByMirQueue(); // read a line from MIR Queue. + void GenName(); + TokenKind GetConstVal(); + TokenKind GetSpecialFloatConst(); + TokenKind GetHexConst(uint32 valStart, bool negative); + TokenKind GetIntConst(uint32 valStart, bool negative); + TokenKind GetFloatConst(uint32 valStart, uint32 startIdx, bool negative); + TokenKind GetSpecialTokenUsingOneCharacter(char c); + TokenKind GetTokenWithPrefixDollar(); + TokenKind GetTokenWithPrefixPercent(); + TokenKind GetTokenWithPrefixAmpersand(); + TokenKind GetTokenWithPrefixAtOrCircumflex(char prefix); + TokenKind GetTokenWithPrefixExclamation(); + TokenKind GetTokenWithPrefixQuotation(); + TokenKind GetTokenWithPrefixDoubleQuotation(); + TokenKind GetTokenSpecial(); + + char GetCharAt(uint32 idx) const + { + return line[idx]; + } + + char GetCharAtWithUpperCheck(uint32 idx) const + { + return idx < currentLineSize ? line[idx] : 0; + } + + char GetCharAtWithLowerCheck(uint32 idx) const + { + return idx >= 0 ? line[idx] : 0; + } + + char GetCurrentCharWithUpperCheck() + { + return curIdx < currentLineSize ? line[curIdx] : 0; + } + + char GetNextCurrentCharWithUpperCheck() + { + ++curIdx; + return curIdx < currentLineSize ? line[curIdx] : 0; + } + + void SetFile(std::ifstream &file) + { + airFile = &file; + } + + std::ifstream *GetFile() const + { + return airFile; + } + + void SetMirQueue(const std::string &fileText) + { + StringUtils::Split(fileText, mirQueue, '\n'); + needFile = false; + } +}; + +inline bool IsPrimitiveType(TokenKind tk) +{ + return (tk >= TK_void) && (tk < TK_unknown); +} + +inline bool IsVarName(TokenKind tk) +{ + return (tk == TK_lname) || (tk == TK_gname); +} + +inline bool IsExprBinary(TokenKind tk) +{ + return (tk >= TK_add) && (tk <= TK_sub); +} + +inline bool IsConstValue(TokenKind tk) +{ + return (tk >= TK_intconst) && (tk <= TK_doubleconst); +} + +inline bool IsConstAddrExpr(TokenKind tk) +{ + return (tk == TK_addrof) || (tk == TK_addroffunc) || (tk == TK_addroflabel) || (tk == TK_conststr) || + (tk == TK_conststr16); +} +} // namespace maple +#endif // MAPLE_IR_INCLUDE_LEXER_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/memory_order_attrs.def b/ecmascript/compiler/codegen/maple/maple_ir/include/memory_order_attrs.def new file mode 100644 index 0000000000000000000000000000000000000000..5575bb0ec9115e699877b03dd90449ba867b9c9a --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/memory_order_attrs.def @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + ATTR(memory_order_relaxed) + ATTR(memory_order_consume) + ATTR(memory_order_acquire) + ATTR(memory_order_release) + ATTR(memory_order_acq_rel) + ATTR(memory_order_seq_cst) diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/metadata_layout.h b/ecmascript/compiler/codegen/maple/maple_ir/include/metadata_layout.h new file mode 100644 index 0000000000000000000000000000000000000000..9e546054c550e3141838c7b078705fe484065e7a --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/metadata_layout.h @@ -0,0 +1,357 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef METADATA_LAYOUT_H +#define METADATA_LAYOUT_H +#include + +// metadata layout is shared between maple compiler and runtime, thus not in namespace maplert +// some of the reference field of metadata is stored as relative offset +// for example, declaring class of Fields/Methods +// which can be negative +#ifdef USE_32BIT_REF +using MetaRef = uint32_t; // consistent with reffield_t in address.h +#else +using MetaRef = uintptr_t; // consistent iwth reffield_t in address.h +#endif // USE_32BIT_REF + +// DataRefOffset aims to represent a reference to data in maple file, which is already an offset. +// DataRefOffset is meant to have pointer size. +// All Xx32 data types defined in this file aim to use 32 bits to save 64-bit address, and thus are +// specific for 64-bit platforms. +struct DataRefOffset32 { + int32_t refOffset; + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline int32_t GetRawValue() const; + inline void SetRawValue(int32_t value); +}; + +struct DataRefOffsetPtr { + intptr_t refOffset; + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline intptr_t GetRawValue() const; + inline void SetRawValue(intptr_t value); +}; + +struct DataRefOffset { +#ifdef USE_32BIT_REF + DataRefOffset32 refOffset; +#else + DataRefOffsetPtr refOffset; +#endif + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline intptr_t GetRawValue() const; + inline void SetRawValue(intptr_t value); +}; + +struct MethodFieldRef { + // MethodFieldRef aims to represent a reference to fields/methods in maple file, which is already an offset. + // also, offset LSB may set 1, to indicate that it is compact fields/methods. + enum MethodFieldRefFormat { + kMethodFieldRefIsCompact = 1, + }; + DataRefOffsetPtr refOffset; + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline bool IsCompact() const; + template + inline T GetCompactData() const; + inline intptr_t GetRawValue() const; + inline void SetRawValue(intptr_t value); +}; + +// DataRef aims for reference to data in maple file (generated by maple compiler) and is aligned to at least 4 bytes. +// Perhaps MDataRef is more fit, still DataRef is chosen to make it common. +// DataRef allows 4 formats of value: +// 0. "label_name" for direct reference +// 1. "label_name - . + 1" for padding unused +// 2. "label_name - . + 2" for reference in offset format +// 3. "indirect.label_name - . + 3" for indirect reference +// this format aims to support lld which does not support expression "global_symbol - ." +// DataRef is self-decoded by also encoding the format and is defined for binary compatibility. +// If no compatibility problem is involved, DataRefOffsetPtr is preferred. +enum DataRefFormat { + kDataRefIsDirect = 0, // must be 0 + kDataRefPadding = 1, // unused + kDataRefIsOffset = 2, + kDataRefIsIndirect = 3, // read-only + kDataRefBitMask = 3, +}; + +struct DataRef32 { + // be careful when *refVal* is treated as an offset which is a signed integer actually. + uint32_t refVal; + template + inline T GetDataRef() const; + template + inline void SetDataRef(T ref, DataRefFormat format = kDataRefIsDirect); + template + inline T GetRawValue() const; +}; + +struct DataRef { + uintptr_t refVal; + template + inline T GetDataRef() const; + template + inline void SetDataRef(const T ref, const DataRefFormat format = kDataRefIsDirect); + template + inline T GetRawValue() const; +}; +// GctibRef aims to represent a reference to gctib in maple file, which is an offset by default. +// GctibRef is meant to have pointer size and aligned to at least 4 bytes. +// GctibRef allows 2 formats of value: +// 0. "label_name - . + 0" for reference in offset format +// 1. "indirect.label_name - . + 1" for indirect reference +// this format aims to support lld which does not support expression "global_symbol - ." +// GctibRef is self-decoded by also encoding the format and is defined for binary compatibility. +// If no compatibility problem is involved, DataRef is preferred. +enum GctibRefFormat { + kGctibRefIsOffset = 0, // default + kGctibRefIsIndirect = 1, + kGctibRefBitMask = 3 +}; + +struct GctibRef32 { + // be careful when *refVal* is treated as an offset which is a signed integer actually. + uint32_t refVal; + template + inline T GetGctibRef() const; + template + inline void SetGctibRef(T ref, GctibRefFormat format = kGctibRefIsOffset); +}; + +struct GctibRef { + uintptr_t refVal; + template + inline T GetGctibRef() const; + template + inline void SetGctibRef(const T ref, const GctibRefFormat format = kGctibRefIsOffset); +}; + +// MByteRef is meant to represent a reference to data defined in maple file. It is a direct reference or an offset. +// MByteRef is self-encoded/decoded and aligned to 1 byte. +// Unlike DataRef, the format of MByteRef is determined by its value. +struct MByteRef { + uintptr_t refVal; // initializer prefers this field to be a pointer + +#if defined(__arm__) || defined(USE_ARM32_MACRO) + // assume address range 0 ~ 256MB is unused in arm runtime + // kEncodedOffsetMin ~ kEncodedOffsetMax is the value range of encoded offset + static constexpr intptr_t kOffsetBound = 128 * 1024 * 1024; + static constexpr intptr_t kOffsetMin = -kOffsetBound; + static constexpr intptr_t kOffsetMax = kOffsetBound; + + static constexpr intptr_t kPositiveOffsetBias = 128 * 1024 * 1024; + static constexpr intptr_t kEncodedOffsetMin = kPositiveOffsetBias + kOffsetMin; + static constexpr intptr_t kEncodedOffsetMax = kPositiveOffsetBias + kOffsetMax; +#else + enum { + kBiasBitPosition = sizeof(refVal) * 8 - 4, // the most significant 4 bits + }; + + static constexpr uintptr_t kOffsetBound = 256 * 1024 * 1024; // according to kDsoLoadedAddessEnd = 0xF0000000 + static constexpr uintptr_t kPositiveOffsetMin = 0; + static constexpr uintptr_t kPositiveOffsetMax = kOffsetBound; + + static constexpr uintptr_t kPositiveOffsetBias = static_cast(6) << kBiasBitPosition; + static constexpr uintptr_t kEncodedPosOffsetMin = kPositiveOffsetMin + kPositiveOffsetBias; + static constexpr uintptr_t kEncodedPosOffsetMax = kPositiveOffsetMax + kPositiveOffsetBias; +#endif + + template + inline T GetRef() const; + template + inline void SetRef(const T ref); + inline bool IsOffset() const; +}; + +struct MByteRef32 { + uint32_t refVal; + static constexpr uint32_t kOffsetBound = 256 * 1024 * 1024; // according to kDsoLoadedAddessEnd = 0xF0000000 + static constexpr uint32_t kPositiveOffsetMin = 0; + static constexpr uint32_t kPositiveOffsetMax = kOffsetBound; + + static constexpr uint32_t kPositiveOffsetBias = 0x60000000; // the most significant 4 bits 0110 + static constexpr uint32_t kEncodedPosOffsetMin = kPositiveOffsetMin + kPositiveOffsetBias; + static constexpr uint32_t kEncodedPosOffsetMax = kPositiveOffsetMax + kPositiveOffsetBias; + + static constexpr uint32_t kDirectRefMin = 0xC0000000; // according to kDsoLoadedAddessStart = 0xC0000000 + static constexpr uint32_t kDirectRefMax = 0xF0000000; // according to kDsoLoadedAddessEnd = 0xF0000000 + + static constexpr int32_t kNegativeOffsetMin = -(256 * 1024 * 1024); // -kOffsetBound + static constexpr int32_t kNegativeOffsetMax = 0; + + template + inline T GetRef() const; + template + inline void SetRef(T ref); + inline bool IsOffset() const; + inline bool IsPositiveOffset() const; + inline bool IsNegativeOffset() const; +}; + +// MethodMeta defined in methodmeta.h +// FieldMeta defined in fieldmeta.h +// MethodDesc contains MethodMetadata and stack map +struct MethodDesc { + // relative offset for method metadata relative to current PC. + // method metadata is in compact format if this offset is odd. + uint32_t metadataOffset; + + int16_t localRefOffset; + uint16_t localRefNumber; + + // stack map for a methed might be placed here +}; + +// Note: class init in maplebe and cg is highly dependent on this type. +// update aarch64rtsupport.h if you modify this definition. +struct ClassMetadataRO { + MByteRef className; + MethodFieldRef fields; // point to info of fields + MethodFieldRef methods; // point to info of methods + union { // Element classinfo of array, others parent classinfo + DataRef superclass; + DataRef componentClass; + }; + + uint16_t numOfFields; + uint16_t numOfMethods; + +#ifndef USE_32BIT_REF + uint16_t flag; + uint16_t numOfSuperclasses; + uint32_t padding; +#endif // !USE_32BIT_REF + + uint32_t mod; + DataRefOffset32 annotation; + DataRefOffset32 clinitAddr; +}; + +static constexpr size_t kPageSize = 4096; +static constexpr size_t kCacheLine = 64; + +// according to kSpaceAnchor and kFireBreak defined in bp_allocator.cpp +// the address of this readable page is set as kProtectedMemoryStart for java class +static constexpr uintptr_t kClInitStateAddrBase = 0xc0000000 - (1u << 20) * 2; + +// In Kirin 980, 2 mmap memory address with odd number of page distances may have unreasonable L1&L2 cache conflict. +// kClassInitializedState is used as the init state for class that has no method, it's will be loaded in many +// place for Decouple build App. if we set the value to kClInitStateAddrBase(0xbfe00000), it may conflict with the +// yieldpoind test address globalPollingPage which is defined in yieldpoint.cpp. +// Hence we add 1 cache line (64 byte) offset here to avoid such conflict +static constexpr uintptr_t kClassInitializedState = kClInitStateAddrBase + kCacheLine; + +extern "C" uint8_t classInitProtectRegion[]; + +// Note there is no state to indicate a class is already initialized. +// Any state beyond listed below is treated as initialized. +enum ClassInitState { + kClassInitStateMin = 0, + kClassUninitialized = 1, + kClassInitializing = 2, + kClassInitFailed = 3, + kClassInitialized = 4, + kClassInitStateMax = 4, +}; + +enum SEGVAddr { + kSEGVAddrRangeStart = kPageSize + 0, + + // Note any readable address is treated as Initialized. + kSEGVAddrForClassInitStateMin = kSEGVAddrRangeStart + kClassInitStateMin, + kSEGVAddrForClassUninitialized = kSEGVAddrForClassInitStateMin + kClassUninitialized, + kSEGVAddrForClassInitializing = kSEGVAddrForClassInitStateMin + kClassInitializing, + kSEGVAddrForClassInitFailed = kSEGVAddrForClassInitStateMin + kClassInitFailed, + kSEGVAddrFoClassInitStateMax = kSEGVAddrForClassInitStateMin + kClassInitStateMax, + + kSEGVAddrRangeEnd, +}; + +struct ClassMetadata { + // object common fields + MetaRef shadow; // point to classinfo of java/lang/Class + int32_t monitor; + + // other fields + uint16_t clIndex; // 8bit ClassLoader index, used for querying the address of related ClassLoader instance. + union { + uint16_t objSize; + uint16_t componentSize; + } sizeInfo; + +#ifdef USE_32BIT_REF // for alignment purpose + uint16_t flag; + uint16_t numOfSuperclasses; +#endif // USE_32BIT_REF + + DataRef iTable; // iTable of current class, used for interface call, will insert the content into classinfo + DataRef vTable; // vTable of current class, used for virtual call, will insert the content into classinfo + GctibRef gctib; // for rc + +#ifdef USE_32BIT_REF + DataRef32 classInfoRo; + DataRef32 cacheFalseClass; +#else + DataRef classInfoRo; +#endif + + union { + uintptr_t initState; // a readable address for initState means initialized + DataRef cacheTrueClass; + }; + +public: + static inline intptr_t OffsetOfInitState() + { + ClassMetadata *base = nullptr; + return reinterpret_cast(&(base->initState)); + } + + uintptr_t GetInitStateRawValue() const + { + return __atomic_load_n(&initState, __ATOMIC_ACQUIRE); + } + + template + void SetInitStateRawValue(T val) + { + __atomic_store_n(&initState, reinterpret_cast(val), __ATOMIC_RELEASE); + } +}; + +// function to set Class/Field/Method metadata's shadow field to avoid type conversion +// Note 1: here we don't do NULL-check and type-compatibility check +// NOte 2: C should be of jclass/ClassMetata* type +template +static inline void MRTSetMetadataShadow(M *meta, C cls) +{ + meta->shadow = static_cast(reinterpret_cast(cls)); +} + +#endif // METADATA_LAYOUT_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mir_builder.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_builder.h new file mode 100755 index 0000000000000000000000000000000000000000..48aebc5bd78ddf1bf1da8f954a9561dba24f8d9e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_builder.h @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_MIR_BUILDER_H +#define MAPLE_IR_INCLUDE_MIR_BUILDER_H +#include +#include +#include +#include +#ifdef _WIN32 +#include +#endif +#include "opcodes.h" +#include "prim_types.h" +#include "mir_type.h" +#include "mir_const.h" +#include "mir_symbol.h" +#include "mir_nodes.h" +#include "mir_module.h" +#include "mir_preg.h" +#include "mir_function.h" +#include "printing.h" +#include "intrinsic_op.h" +#include "opcode_info.h" +#include "global_tables.h" + +namespace maple { +using ArgPair = std::pair; +using ArgVector = MapleVector; +class MIRBuilder { +public: + enum MatchStyle { + kUpdateFieldID = 0, // do not match but traverse to update fieldID + kMatchTopField = 1, // match top level field only + kMatchAnyField = 2, // match any field + kParentFirst = 4, // traverse parent first + kFoundInChild = 8, // found in child + }; + + explicit MIRBuilder(MIRModule *module) + : mirModule(module), incompleteTypeRefedSet(mirModule->GetMPAllocator().Adapter()) + { + } + + virtual ~MIRBuilder() = default; + + virtual void SetCurrentFunction(MIRFunction &fun) + { + mirModule->SetCurFunction(&fun); + } + + virtual MIRFunction *GetCurrentFunction() const + { + return mirModule->CurFunction(); + } + MIRFunction *GetCurrentFunctionNotNull() const + { + MIRFunction *func = GetCurrentFunction(); + CHECK_FATAL(func != nullptr, "nullptr check"); + return func; + } + + MIRModule &GetMirModule() + { + return *mirModule; + } + + const MapleSet &GetIncompleteTypeRefedSet() const + { + return incompleteTypeRefedSet; + } + + std::vector> &GetExtraFieldsTuples() + { + return extraFieldsTuples; + } + + unsigned int GetLineNum() const + { + return lineNum; + } + void SetLineNum(unsigned int num) + { + lineNum = num; + } + + GStrIdx GetOrCreateStringIndex(const std::string &str) const + { + return GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str); + } + + GStrIdx GetOrCreateStringIndex(GStrIdx strIdx, const std::string &str) const + { + std::string firstString(GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx)); + firstString.append(str); + return GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(firstString); + } + + GStrIdx GetStringIndex(const std::string &str) const + { + return GlobalTables::GetStrTable().GetStrIdxFromName(str); + } + + MIRFunction *GetOrCreateFunction(const std::string &, TyIdx); + MIRFunction *GetFunctionFromSymbol(const MIRSymbol &funcst); + MIRFunction *GetFunctionFromStidx(StIdx stIdx); + MIRFunction *GetFunctionFromName(const std::string &); + // For compiler-generated metadata struct + void AddIntFieldConst(const MIRStructType &sType, MIRAggConst &newConst, uint32 fieldID, int64 constValue); + void AddAddrofFieldConst(const MIRStructType &sType, MIRAggConst &newConst, uint32 fieldID, + const MIRSymbol &fieldSt); + void AddAddroffuncFieldConst(const MIRStructType &sType, MIRAggConst &newConst, uint32 fieldID, + const MIRSymbol &funcSt); + + bool TraverseToNamedField(MIRStructType &structType, GStrIdx nameIdx, uint32 &fieldID); + bool TraverseToNamedFieldWithTypeAndMatchStyle(MIRStructType &structType, GStrIdx nameIdx, TyIdx typeIdx, + uint32 &fieldID, unsigned int matchStyle); + void TraverseToNamedFieldWithType(MIRStructType &structType, GStrIdx nameIdx, TyIdx typeIdx, uint32 &fieldID, + uint32 &idx); + + FieldID GetStructFieldIDFromNameAndType(MIRType &type, const std::string &name, TyIdx idx, unsigned int matchStyle); + FieldID GetStructFieldIDFromNameAndType(MIRType &type, const std::string &name, TyIdx idx); + FieldID GetStructFieldIDFromNameAndTypeParentFirst(MIRType &type, const std::string &name, TyIdx idx); + FieldID GetStructFieldIDFromNameAndTypeParentFirstFoundInChild(MIRType &type, const std::string &name, TyIdx idx); + + FieldID GetStructFieldIDFromFieldName(MIRType &type, const std::string &name); + FieldID GetStructFieldIDFromFieldNameParentFirst(MIRType *type, const std::string &name); + + void SetStructFieldIDFromFieldName(MIRStructType &structType, const std::string &name, GStrIdx newStrIdx, + const MIRType &newFieldType); + // for creating Function. + MIRSymbol *GetFunctionArgument(MIRFunction &fun, uint32 index) const + { + CHECK(index < fun.GetFormalCount(), "index out of range in GetFunctionArgument"); + return fun.GetFormal(index); + } + + MIRFunction *CreateFunction(const std::string &name, const MIRType &returnType, const ArgVector &arguments, + bool isVarg = false, bool createBody = true) const; + MIRFunction *CreateFunction(StIdx stIdx, bool addToTable = true) const; + virtual void UpdateFunction(MIRFunction &, const MIRType *, const ArgVector &) {} + + MIRSymbol *GetSymbolFromEnclosingScope(StIdx stIdx) const; + virtual MIRSymbol *GetOrCreateLocalDecl(const std::string &str, const MIRType &type); + MIRSymbol *GetLocalDecl(const std::string &str); + MIRSymbol *CreateLocalDecl(const std::string &str, const MIRType &type); + MIRSymbol *GetOrCreateGlobalDecl(const std::string &str, const MIRType &type); + MIRSymbol *GetGlobalDecl(const std::string &str); + MIRSymbol *GetDecl(const std::string &str); + MIRSymbol *CreateGlobalDecl(const std::string &str, const MIRType &type, MIRStorageClass sc = kScGlobal); + MIRSymbol *GetOrCreateDeclInFunc(const std::string &str, const MIRType &type, MIRFunction &func); + // for creating Expression + ConstvalNode *CreateConstval(MIRConst *constVal); + ConstvalNode *CreateIntConst(uint64, PrimType); + ConstvalNode *CreateFloatConst(float val); + ConstvalNode *CreateDoubleConst(double val); + ConstvalNode *CreateFloat128Const(const uint64 *val); + ConstvalNode *GetConstInt(MemPool &memPool, int val); + ConstvalNode *GetConstInt(int val) + { + return CreateIntConst(val, PTY_i32); + } + + ConstvalNode *GetConstUInt1(bool val) + { + return CreateIntConst(val, PTY_u1); + } + + ConstvalNode *GetConstUInt8(uint8 val) + { + return CreateIntConst(val, PTY_u8); + } + + ConstvalNode *GetConstUInt16(uint16 val) + { + return CreateIntConst(val, PTY_u16); + } + + ConstvalNode *GetConstUInt32(uint32 val) + { + return CreateIntConst(val, PTY_u32); + } + + ConstvalNode *GetConstUInt64(uint64 val) + { + return CreateIntConst(val, PTY_u64); + } + + ConstvalNode *CreateAddrofConst(BaseNode &); + ConstvalNode *CreateAddroffuncConst(const BaseNode &); + ConstvalNode *CreateStrConst(const BaseNode &); + ConstvalNode *CreateStr16Const(const BaseNode &); + SizeoftypeNode *CreateExprSizeoftype(const MIRType &type); + FieldsDistNode *CreateExprFieldsDist(const MIRType &type, FieldID field1, FieldID field2); + AddrofNode *CreateExprAddrof(FieldID fieldID, const MIRSymbol &symbol, MemPool *memPool = nullptr); + AddrofNode *CreateExprAddrof(FieldID fieldID, StIdx symbolStIdx, MemPool *memPool = nullptr); + AddroffuncNode *CreateExprAddroffunc(PUIdx, MemPool *memPool = nullptr); + AddrofNode *CreateExprDread(const MIRType &type, FieldID fieldID, const MIRSymbol &symbol); + AddrofNode *CreateExprDread(PrimType ptyp, FieldID fieldID, const MIRSymbol &symbol); + virtual AddrofNode *CreateExprDread(MIRType &type, MIRSymbol &symbol); + virtual AddrofNode *CreateExprDread(MIRSymbol &symbol); + AddrofNode *CreateExprDread(PregIdx pregID, PrimType pty); + AddrofNode *CreateExprDread(MIRSymbol &symbol, uint16 fieldID); + DreadoffNode *CreateExprDreadoff(Opcode op, PrimType pty, const MIRSymbol &symbol, int32 offset); + RegreadNode *CreateExprRegread(PrimType pty, PregIdx regIdx); + IreadNode *CreateExprIread(const MIRType &returnType, const MIRType &ptrType, FieldID fieldID, BaseNode *addr); + IreadoffNode *CreateExprIreadoff(PrimType pty, int32 offset, BaseNode *opnd0); + IreadFPoffNode *CreateExprIreadFPoff(PrimType pty, int32 offset); + IaddrofNode *CreateExprIaddrof(const MIRType &returnType, const MIRType &ptrType, FieldID fieldID, BaseNode *addr); + IaddrofNode *CreateExprIaddrof(PrimType returnTypePty, TyIdx ptrTypeIdx, FieldID fieldID, BaseNode *addr); + BinaryNode *CreateExprBinary(Opcode opcode, const MIRType &type, BaseNode *opnd0, BaseNode *opnd1); + BinaryNode *CreateExprBinary(Opcode opcode, PrimType pty, BaseNode *opnd0, BaseNode *opnd1) + { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(pty)); + return CreateExprBinary(opcode, *ty, opnd0, opnd1); + } + TernaryNode *CreateExprTernary(Opcode opcode, const MIRType &type, BaseNode *opnd0, BaseNode *opnd1, + BaseNode *opnd2); + CompareNode *CreateExprCompare(Opcode opcode, const MIRType &type, const MIRType &opndType, BaseNode *opnd0, + BaseNode *opnd1); + UnaryNode *CreateExprUnary(Opcode opcode, const MIRType &type, BaseNode *opnd); + GCMallocNode *CreateExprGCMalloc(Opcode opcode, const MIRType &ptype, const MIRType &type); + JarrayMallocNode *CreateExprJarrayMalloc(Opcode opcode, const MIRType &ptype, const MIRType &type, BaseNode *opnd); + TypeCvtNode *CreateExprTypeCvt(Opcode o, PrimType toPrimType, PrimType fromPrimType, BaseNode &opnd); + TypeCvtNode *CreateExprTypeCvt(Opcode o, const MIRType &type, const MIRType &fromtype, BaseNode *opnd); + ExtractbitsNode *CreateExprExtractbits(Opcode o, const MIRType &type, uint32 bOffset, uint32 bSize, BaseNode *opnd); + ExtractbitsNode *CreateExprExtractbits(Opcode o, PrimType type, uint32 bOffset, uint32 bSize, BaseNode *opnd); + DepositbitsNode *CreateExprDepositbits(Opcode o, PrimType type, uint32 bOffset, uint32 bSize, BaseNode *leftOpnd, + BaseNode *rightOpnd); + RetypeNode *CreateExprRetype(const MIRType &type, const MIRType &fromType, BaseNode *opnd); + RetypeNode *CreateExprRetype(const MIRType &type, PrimType fromType, BaseNode *opnd); + ArrayNode *CreateExprArray(const MIRType &arrayType); + ArrayNode *CreateExprArray(const MIRType &arrayType, BaseNode *op); + ArrayNode *CreateExprArray(const MIRType &arrayType, BaseNode *op1, BaseNode *op2); + ArrayNode *CreateExprArray(const MIRType &arrayType, std::vector ops); + IntrinsicopNode *CreateExprIntrinsicop(MIRIntrinsicID id, Opcode op, PrimType primType, TyIdx tyIdx, + const MapleVector &ops); + IntrinsicopNode *CreateExprIntrinsicop(MIRIntrinsicID idx, Opcode opcode, const MIRType &type, + const MapleVector &ops); + // for creating Statement. + NaryStmtNode *CreateStmtReturn(BaseNode *rVal); + NaryStmtNode *CreateStmtNary(Opcode op, BaseNode *rVal); + NaryStmtNode *CreateStmtNary(Opcode op, const MapleVector &rVals); + AssertNonnullStmtNode *CreateStmtAssertNonnull(Opcode op, BaseNode *rVal, GStrIdx funcNameIdx); + CallAssertNonnullStmtNode *CreateStmtCallAssertNonnull(Opcode op, BaseNode *rVal, GStrIdx callFuncNameIdx, + size_t index, GStrIdx stmtFuncNameIdx); + CallAssertBoundaryStmtNode *CreateStmtCallAssertBoundary(Opcode op, const MapleVector &rVals, + GStrIdx funcNameIdx, size_t index, + GStrIdx stmtFuncNameIdx); + AssertBoundaryStmtNode *CreateStmtAssertBoundary(Opcode op, const MapleVector &rVals, + GStrIdx funcNameIdx); + UnaryStmtNode *CreateStmtUnary(Opcode op, BaseNode *rVal); + UnaryStmtNode *CreateStmtThrow(BaseNode *rVal); + DassignNode *CreateStmtDassign(const MIRSymbol &var, FieldID fieldID, BaseNode *src); + DassignNode *CreateStmtDassign(StIdx sIdx, FieldID fieldID, BaseNode *src); + RegassignNode *CreateStmtRegassign(PrimType pty, PregIdx regIdx, BaseNode *src); + IassignNode *CreateStmtIassign(const MIRType &type, FieldID fieldID, BaseNode *addr, BaseNode *src); + IassignoffNode *CreateStmtIassignoff(PrimType pty, int32 offset, BaseNode *opnd0, BaseNode *src); + IassignFPoffNode *CreateStmtIassignFPoff(Opcode op, PrimType pty, int32 offset, BaseNode *src); + CallNode *CreateStmtCall(PUIdx puIdx, const MapleVector &args, Opcode opcode = OP_call); + CallNode *CreateStmtCall(const std::string &name, const MapleVector &args); + CallNode *CreateStmtVirtualCall(PUIdx puIdx, const MapleVector &args) + { + return CreateStmtCall(puIdx, args, OP_virtualcall); + } + + CallNode *CreateStmtSuperclassCall(PUIdx puIdx, const MapleVector &args) + { + return CreateStmtCall(puIdx, args, OP_superclasscall); + } + + CallNode *CreateStmtInterfaceCall(PUIdx puIdx, const MapleVector &args) + { + return CreateStmtCall(puIdx, args, OP_interfacecall); + } + + IcallNode *CreateStmtIcall(const MapleVector &args); + IcallNode *CreateStmtIcallAssigned(const MapleVector &args, const MIRSymbol &ret); + IcallNode *CreateStmtIcallAssigned(const MapleVector &args, PregIdx pregIdx); + IcallNode *CreateStmtIcallproto(const MapleVector &args); + IcallNode *CreateStmtIcallprotoAssigned(const MapleVector &args, const MIRSymbol &ret); + // For Call, VirtualCall, SuperclassCall, InterfaceCall + IntrinsiccallNode *CreateStmtIntrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments, + TyIdx tyIdx = TyIdx()); + IntrinsiccallNode *CreateStmtXintrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments); + CallNode *CreateStmtCallAssigned(PUIdx puidx, const MIRSymbol *ret, Opcode op = OP_callassigned); + CallNode *CreateStmtCallAssigned(PUIdx puidx, const MapleVector &args, const MIRSymbol *ret, + Opcode op = OP_callassigned, TyIdx tyIdx = TyIdx()); + CallNode *CreateStmtCallRegassigned(PUIdx, PregIdx, Opcode); + CallNode *CreateStmtCallRegassigned(PUIdx, PregIdx, Opcode, BaseNode *opnd); + CallNode *CreateStmtCallRegassigned(PUIdx, const MapleVector &, PregIdx, Opcode); + IntrinsiccallNode *CreateStmtIntrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &arguments, + PregIdx retPregIdx); + IntrinsiccallNode *CreateStmtIntrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &arguments, + const MIRSymbol *ret, TyIdx tyIdx = TyIdx()); + IntrinsiccallNode *CreateStmtXintrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &args, + const MIRSymbol *ret); + IfStmtNode *CreateStmtIf(BaseNode *cond); + IfStmtNode *CreateStmtIfThenElse(BaseNode *cond); + DoloopNode *CreateStmtDoloop(StIdx, bool, BaseNode *, BaseNode *, BaseNode *); + SwitchNode *CreateStmtSwitch(BaseNode *opnd, LabelIdx defaultLabel, const CaseVector &switchTable); + GotoNode *CreateStmtGoto(Opcode o, LabelIdx labIdx); + JsTryNode *CreateStmtJsTry(Opcode o, LabelIdx cLabIdx, LabelIdx fLabIdx); + TryNode *CreateStmtTry(const MapleVector &cLabIdxs); + CatchNode *CreateStmtCatch(const MapleVector &tyIdxVec); + LabelIdx GetOrCreateMIRLabel(const std::string &name); + LabelIdx CreateLabIdx(MIRFunction &mirFunc); + LabelNode *CreateStmtLabel(LabelIdx labIdx); + StmtNode *CreateStmtComment(const std::string &comment); + CondGotoNode *CreateStmtCondGoto(BaseNode *cond, Opcode op, LabelIdx labIdx); + void AddStmtInCurrentFunctionBody(StmtNode &stmt); + MIRSymbol *GetSymbol(TyIdx, const std::string &, MIRSymKind, MIRStorageClass, uint8, bool) const; + MIRSymbol *GetSymbol(TyIdx, GStrIdx, MIRSymKind, MIRStorageClass, uint8, bool) const; + MIRSymbol *GetOrCreateSymbol(TyIdx, const std::string &, MIRSymKind, MIRStorageClass, MIRFunction *, uint8, + bool) const; + MIRSymbol *GetOrCreateSymbol(TyIdx, GStrIdx, MIRSymKind, MIRStorageClass, MIRFunction *, uint8, bool) const; + MIRSymbol *CreatePregFormalSymbol(TyIdx, PregIdx, MIRFunction &) const; + // for creating symbol + MIRSymbol *CreateSymbol(TyIdx, const std::string &, MIRSymKind, MIRStorageClass, MIRFunction *, uint8) const; + MIRSymbol *CreateSymbol(TyIdx, GStrIdx, MIRSymKind, MIRStorageClass, MIRFunction *, uint8) const; + MIRSymbol *CreateConstStringSymbol(const std::string &symbolName, const std::string &content); + // for creating nodes + AddrofNode *CreateAddrof(const MIRSymbol &st, PrimType pty = PTY_ptr); + AddrofNode *CreateDread(const MIRSymbol &st, PrimType pty); + virtual MemPool *GetCurrentFuncCodeMp(); + virtual MapleAllocator *GetCurrentFuncCodeMpAllocator(); + virtual MemPool *GetCurrentFuncDataMp(); + + virtual void GlobalLock() {} + virtual void GlobalUnlock() {} + +private: + MIRSymbol *GetOrCreateGlobalDecl(const std::string &str, TyIdx tyIdx, bool &created) const; + MIRSymbol *GetOrCreateLocalDecl(const std::string &str, TyIdx tyIdx, MIRSymbolTable &symbolTable, + bool &created) const; + + MIRModule *mirModule; + MapleSet incompleteTypeRefedSet; + // + std::vector> extraFieldsTuples; + unsigned int lineNum = 0; +}; + +class MIRBuilderExt : public MIRBuilder { +public: + explicit MIRBuilderExt(MIRModule *module, pthread_mutex_t *mutex = nullptr); + virtual ~MIRBuilderExt() = default; + + void SetCurrentFunction(MIRFunction &func) override + { + curFunction = &func; + } + + MIRFunction *GetCurrentFunction() const override + { + return curFunction; + } + + MemPool *GetCurrentFuncCodeMp() override; + MapleAllocator *GetCurrentFuncCodeMpAllocator() override; + void GlobalLock() override; + void GlobalUnlock() override; + +private: + MIRFunction *curFunction = nullptr; + pthread_mutex_t *mutex = nullptr; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_BUILDER_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mir_config.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_config.h new file mode 100644 index 0000000000000000000000000000000000000000..33ab18bf54a4439b19dba3cd44688885b048eb1e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_config.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// configuration definition for code in maple_ir namespace +#ifndef MAPLE_IR_INCLUDE_MIR_CONFIG_H +#define MAPLE_IR_INCLUDE_MIR_CONFIG_H + +// MIR_FEATURE_FULL = 1 : for host/server size building, by default. +// MIR_FEATURE_FULL = 0 : for resource-constrained devices. optimized for memory size +#if !defined(MIR_FEATURE_FULL) +#define MIR_FEATURE_FULL 1 // default to full feature building, for debugging +#endif // MIR_FEATURE_FULL define + +// MIR_DEBUG = 0 : for release building. +// MIR_DEBUG = 1 : for debug building. +#ifndef MIR_DEBUG +#define MIR_DEBUG 0 // currently default to none. turn it on explicitly +#endif // MIR_DEBUG + +// MIR_DEBUG_LEVEL = 0: no debuging information at all. +// 1: with error information. +// 2: with severe warning information +// 3: with normal warning information +// 4: with normal information +// 5: with everything +// +#ifndef MIR_DEBUG_LEVEL +#define MIR_DEBUG_LEVEL 0 +#endif // MIR_DEBUG_LEVEL +// assertion +#if !MIR_FEATURE_FULL +#define MIR_ASSERT(...) \ + do { \ + } while (0) +#define MIR_PRINTF(...) \ + do { \ + } while (0) +#define MIR_INFO(...) \ + do { \ + } while (0) +#define MIR_ERROR(...) \ + do { \ + } while (0) +#define MIR_WARNING(...) \ + do { \ + } while (0) +#define MIR_CAST_TO(var, totype) ((totype)(var)) +#include +#if DEBUG +#include +#define MIR_FATAL(...) \ + do { \ + printf("FATAL ERROR: (%s:%d) ", __FILE__, __LINE__); \ + printf(__VA_ARGS__); \ + exit(1); \ + } while (0) +#else +#define MIR_FATAL(...) \ + do { \ + exit(1); \ + } while (0) +#endif // DEBUG +#else // MIR_FEATURE_FULL +#include +#include +#include + +namespace maple { +#define MIR_ASSERT(...) assert(__VA_ARGS__) +#define MIR_FATAL(...) \ + do { \ + fprintf(stderr, "FATAL ERROR: (%s:%d) ", __FILE__, __LINE__); \ + fprintf(stderr, __VA_ARGS__); \ + exit(EXIT_FAILURE); \ + } while (0) +#define MIR_ERROR(...) \ + do { \ + fprintf(stderr, "ERROR: (%s:%d) ", __FILE__, __LINE__); \ + fprintf(stderr, __VA_ARGS__); \ + } while (0) +#define MIR_WARNING(...) \ + do { \ + fprintf(stderr, "WARNING: (%s:%d) ", __FILE__, __LINE__); \ + fprintf(stderr, __VA_ARGS__); \ + } while (0) +#define MIR_PRINTF(...) printf(__VA_ARGS__) +#define MIR_INFO(...) printf(__VA_ARGS__) +#define MIR_CAST_TO(var, totype) static_cast(var) +#endif // !MIR_FEATURE_FULL +#if MIR_DEBUG +#else +#endif // MIR_DEBUG + +// MIR specific configurations. +// Note: fix size definition cannot handle arbitary long MIR lines, such +// as those array initialization lines. +constexpr int kMirMaxLineSize = 3072; // a max of 3K characters per line initially +// LIBRARY API availability +#if MIR_FEATURE_FULL +#define HAVE_STRTOD 1 // strtod +#define HAVE_MALLOC 1 // malloc/free +#else // compact VM +#define HAVE_STRTOD 1 // strtod in current libc +#define HAVE_MALLOC 0 // no malloc/free in current libc +#endif // MIR_FEATURE_FULL +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_CONFIG_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mir_const.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_const.h new file mode 100644 index 0000000000000000000000000000000000000000..61783ac096b6f4b22b138d0ab97eb5690e1f8711 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_const.h @@ -0,0 +1,735 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_MIR_CONST_H +#define MAPLE_IR_INCLUDE_MIR_CONST_H +#include +#include "mir_type.h" +#include "mpl_int_val.h" + +namespace maple { +class MIRConst; // circular dependency exists, no other choice +using MIRConstPtr = MIRConst *; +#if MIR_FEATURE_FULL +class MIRSymbol; // circular dependency exists, no other choice +enum MIRConstKind { + kConstInvalid, + kConstInt, + kConstAddrof, + kConstAddrofFunc, + kConstLblConst, + kConstStrConst, + kConstStr16Const, + kConstFloatConst, + kConstDoubleConst, + kConstFloat128Const, + kConstAggConst, + kConstStConst +}; + +class MIRConst { +public: + explicit MIRConst(MIRType &type, MIRConstKind constKind = kConstInvalid) : type(&type), kind(constKind) {} + + virtual ~MIRConst() = default; + + virtual void Dump(const MIRSymbolTable *localSymTab = nullptr) const + { + (void)localSymTab; + } + + uint32 GetFieldId() const + { + return fieldID; + } + + void SetFieldId(uint32 fieldIdx) + { + DoSetFieldId(fieldIdx); + } + + virtual bool IsZero() const + { + return false; + } + + virtual bool IsOne() const + { + return false; + } + + virtual bool IsMagicNum() const + { + return false; + } + + // NO OP + virtual void Neg() {} + + virtual bool operator==(const MIRConst &rhs) const + { + return &rhs == this; + } + + virtual MIRConst *Clone(MemPool &memPool) const = 0; + + MIRConstKind GetKind() const + { + return kind; + } + + MIRType &GetType() + { + return *type; + } + + const MIRType &GetType() const + { + return *type; + } + + void SetType(MIRType &t) + { + type = &t; + } + +protected: + uint32 fieldID = 0; + +private: + MIRType *type; + MIRConstKind kind; + virtual void DoSetFieldId(uint32 fieldIdx) + { + DEBUG_ASSERT(kind != kConstInt, "must be"); + fieldID = fieldIdx; + } +}; + +class MIRIntConst : public MIRConst { +public: + MIRIntConst(uint64 val, MIRType &type) : MIRConst(type, kConstInt), value(val, type.GetPrimType()) {} + + MIRIntConst(const IntVal &val, MIRType &type) : MIRConst(type, kConstInt), value(val) + { + [[maybe_unused]] PrimType pType = type.GetPrimType(); + DEBUG_ASSERT( + IsPrimitiveInteger(pType) && GetPrimTypeActualBitSize(pType) <= value.GetBitWidth(), + "Constant is tried to be constructed with non-integral type or bit-width is not appropriate for it"); + } + + /// @return number of used bits in the value + uint8 GetActualBitWidth() const; + + void Trunc(uint8 width) + { + value.TruncInPlace(width); + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + + bool IsNegative() const + { + return value.IsSigned() && value.GetSignBit(); + } + + bool IsPositive() const + { + return !IsNegative() && value != 0; + } + + bool IsZero() const override + { + return value == 0; + } + + bool IsOne() const override + { + return value == 1; + } + + void Neg() override + { + value = -value; + } + + const IntVal &GetValue() const + { + return value; + } + + int64 GetExtValue(uint8 size = 0) const + { + return value.GetExtValue(size); + } + + int64 GetSXTValue(uint8 size = 0) const + { + return value.GetSXTValue(size); + } + + uint64 GetZXTValue(uint8 size = 0) const + { + return value.GetZXTValue(size); + } + + void SetValue(int64 val) const + { + (void)val; + CHECK_FATAL(false, "Can't Use This Interface in This Object"); + } + + bool operator==(const MIRConst &rhs) const override; + + MIRIntConst *Clone(MemPool &memPool) const override + { + CHECK_FATAL(false, "Can't Use This Interface in This Object"); + } + +private: + IntVal value; + + void DoSetFieldId(uint32 fieldIdx) override + { + DEBUG_ASSERT(false, "Can't Use This Interface in This Object"); + (void)fieldIdx; + } +}; + +class MIRAddrofConst : public MIRConst { +public: + MIRAddrofConst(StIdx sy, FieldID fi, MIRType &ty) : MIRConst(ty, kConstAddrof), stIdx(sy), fldID(fi), offset(0) {} + + MIRAddrofConst(StIdx sy, FieldID fi, MIRType &ty, int32 ofst) + : MIRConst(ty, kConstAddrof), stIdx(sy), fldID(fi), offset(ofst) + { + } + + ~MIRAddrofConst() = default; + + StIdx GetSymbolIndex() const + { + return stIdx; + } + + void SetSymbolIndex(StIdx idx) + { + stIdx = idx; + } + + FieldID GetFieldID() const + { + return fldID; + } + + int32 GetOffset() const + { + return offset; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + + bool operator==(const MIRConst &rhs) const override; + + MIRAddrofConst *Clone(MemPool &memPool) const override + { + return memPool.New(*this); + } + +private: + StIdx stIdx; + FieldID fldID; + int32 offset; +}; + +class MIRAddroffuncConst : public MIRConst { +public: + MIRAddroffuncConst(PUIdx idx, MIRType &ty) : MIRConst(ty, kConstAddrofFunc), puIdx(idx) {} + + ~MIRAddroffuncConst() = default; + + PUIdx GetValue() const + { + return puIdx; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + + bool operator==(const MIRConst &rhs) const override; + + MIRAddroffuncConst *Clone(MemPool &memPool) const override + { + return memPool.New(*this); + } + +private: + PUIdx puIdx; +}; + +class MIRLblConst : public MIRConst { +public: + MIRLblConst(LabelIdx val, PUIdx pidx, MIRType &type) : MIRConst(type, kConstLblConst), value(val), puIdx(pidx) {} + + ~MIRLblConst() = default; + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool operator==(const MIRConst &rhs) const override; + + MIRLblConst *Clone(MemPool &memPool) const override + { + return memPool.New(*this); + } + + LabelIdx GetValue() const + { + return value; + } + + PUIdx GetPUIdx() const + { + return puIdx; + } + +private: + LabelIdx value; + PUIdx puIdx; +}; + +class MIRStrConst : public MIRConst { +public: + MIRStrConst(UStrIdx val, MIRType &type) : MIRConst(type, kConstStrConst), value(val) {} + + MIRStrConst(const std::string &str, MIRType &type); + + ~MIRStrConst() = default; + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool operator==(const MIRConst &rhs) const override; + + MIRStrConst *Clone(MemPool &memPool) const override + { + return memPool.New(*this); + } + + UStrIdx GetValue() const + { + return value; + } + + static PrimType GetPrimType() + { + return kPrimType; + } + +private: + UStrIdx value; + static const PrimType kPrimType = PTY_ptr; +}; + +class MIRStr16Const : public MIRConst { +public: + MIRStr16Const(const U16StrIdx &val, MIRType &type) : MIRConst(type, kConstStr16Const), value(val) {} + + MIRStr16Const(const std::u16string &str, MIRType &type); + ~MIRStr16Const() = default; + + static PrimType GetPrimType() + { + return kPrimType; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool operator==(const MIRConst &rhs) const override; + + MIRStr16Const *Clone(MemPool &memPool) const override + { + return memPool.New(*this); + } + + U16StrIdx GetValue() const + { + return value; + } + +private: + static const PrimType kPrimType = PTY_ptr; + U16StrIdx value; +}; + +class MIRFloatConst : public MIRConst { +public: + using value_type = float; + MIRFloatConst(float val, MIRType &type) : MIRConst(type, kConstFloatConst) + { + value.floatValue = val; + } + + ~MIRFloatConst() = default; + + void SetFloatValue(float fvalue) + { + value.floatValue = fvalue; + } + + value_type GetFloatValue() const + { + return value.floatValue; + } + + static PrimType GetPrimType() + { + return kPrimType; + } + + int32 GetIntValue() const + { + return value.intValue; + } + + value_type GetValue() const + { + return GetFloatValue(); + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool IsZero() const override + { + return fabs(value.floatValue) <= 1e-6; + } + + bool IsGeZero() const + { + return value.floatValue >= 0; + } + + bool IsNeg() const + { + return ((static_cast(value.intValue) & 0x80000000) == 0x80000000); + } + + bool IsOne() const override + { + return fabs(value.floatValue - 1) <= 1e-6; + }; + bool IsAllBitsOne() const + { + return fabs(value.floatValue + 1) <= 1e-6; + }; + void Neg() override + { + value.floatValue = -value.floatValue; + } + + bool operator==(const MIRConst &rhs) const override; + + MIRFloatConst *Clone(MemPool &memPool) const override + { + return memPool.New(*this); + } + +private: + static const PrimType kPrimType = PTY_f32; + union { + value_type floatValue; + int32 intValue; + } value; +}; + +class MIRDoubleConst : public MIRConst { +public: + using value_type = double; + MIRDoubleConst(double val, MIRType &type) : MIRConst(type, kConstDoubleConst) + { + value.dValue = val; + } + + ~MIRDoubleConst() = default; + + uint32 GetIntLow32() const + { + auto unsignVal = static_cast(value.intValue); + return static_cast(unsignVal & 0xffffffff); + } + + uint32 GetIntHigh32() const + { + auto unsignVal = static_cast(value.intValue); + return static_cast((unsignVal & 0xffffffff00000000) >> 32); + } + + int64 GetIntValue() const + { + return value.intValue; + } + + value_type GetValue() const + { + return value.dValue; + } + + static PrimType GetPrimType() + { + return kPrimType; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool IsZero() const override + { + return fabs(value.dValue) <= 1e-15; + } + + bool IsGeZero() const + { + return value.dValue >= 0; + } + + bool IsNeg() const + { + return ((static_cast(value.intValue) & 0x8000000000000000LL) == 0x8000000000000000LL); + } + + bool IsOne() const override + { + return fabs(value.dValue - 1) <= 1e-15; + }; + bool IsAllBitsOne() const + { + return fabs(value.dValue + 1) <= 1e-15; + }; + void Neg() override + { + value.dValue = -value.dValue; + } + + bool operator==(const MIRConst &rhs) const override; + + MIRDoubleConst *Clone(MemPool &memPool) const override + { + return memPool.New(*this); + } + +private: + static const PrimType kPrimType = PTY_f64; + union { + value_type dValue; + int64 intValue; + } value; +}; + +class MIRFloat128Const : public MIRConst { +public: + MIRFloat128Const(const uint64 &val, MIRType &type) : MIRConst(type, kConstFloat128Const) + { + value = &val; + } + + ~MIRFloat128Const() = default; + + const uint64 *GetIntValue() const + { + return value; + } + + static PrimType GetPrimType() + { + return kPrimType; + } + + bool IsZero() const override + { + MIR_ASSERT(value && "value must not be nullptr!"); + return value[0] == 0 && value[1] == 0; + } + + bool IsOne() const override + { + MIR_ASSERT(value && "value must not be nullptr!"); + return value[0] == 0 && value[1] == 0x3FFF000000000000; + }; + bool IsAllBitsOne() const + { + MIR_ASSERT(value && "value must not be nullptr!"); + return (value[0] == 0xffffffffffffffff && value[1] == 0xffffffffffffffff); + }; + bool operator==(const MIRConst &rhs) const override; + + MIRFloat128Const *Clone(MemPool &memPool) const override + { + auto *res = memPool.New(*this); + return res; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + +private: + static const PrimType kPrimType = PTY_f128; + // value[0]: Low 64 bits; value[1]: High 64 bits. + const uint64 *value; +}; + +class MIRAggConst : public MIRConst { +public: + MIRAggConst(MIRModule &mod, MIRType &type) + : MIRConst(type, kConstAggConst), + constVec(mod.GetMPAllocator().Adapter()), + fieldIdVec(mod.GetMPAllocator().Adapter()) + { + } + + ~MIRAggConst() = default; + + MIRConst *GetAggConstElement(unsigned int fieldId) + { + for (size_t i = 0; i < fieldIdVec.size(); ++i) { + if (fieldId == fieldIdVec[i]) { + return constVec[i]; + } + } + return nullptr; + } + + void SetFieldIdOfElement(uint32 index, uint32 fieldId) + { + DEBUG_ASSERT(index < fieldIdVec.size(), "index out of range"); + fieldIdVec[index] = fieldId; + } + + const MapleVector &GetConstVec() const + { + return constVec; + } + + MapleVector &GetConstVec() + { + return constVec; + } + + const MIRConstPtr &GetConstVecItem(size_t index) const + { + CHECK_FATAL(index < constVec.size(), "index out of range"); + return constVec[index]; + } + + MIRConstPtr &GetConstVecItem(size_t index) + { + CHECK_FATAL(index < constVec.size(), "index out of range"); + return constVec[index]; + } + + void SetConstVecItem(size_t index, MIRConst &st) + { + CHECK_FATAL(index < constVec.size(), "index out of range"); + constVec[index] = &st; + } + + uint32 GetFieldIdItem(size_t index) const + { + DEBUG_ASSERT(index < fieldIdVec.size(), "index out of range"); + return fieldIdVec[index]; + } + + void SetItem(uint32 index, MIRConst *mirConst, uint32 fieldId) + { + CHECK_FATAL(index < constVec.size(), "index out of range"); + constVec[index] = mirConst; + fieldIdVec[index] = fieldId; + } + + void AddItem(MIRConst *mirConst, uint32 fieldId) + { + constVec.push_back(mirConst); + fieldIdVec.push_back(fieldId); + } + + void PushBack(MIRConst *elem) + { + AddItem(elem, 0); + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool operator==(const MIRConst &rhs) const override; + + MIRAggConst *Clone(MemPool &memPool) const override + { + return memPool.New(*this); + } + +private: + MapleVector constVec; + MapleVector fieldIdVec; +}; + +// the const has one or more symbols +class MIRStConst : public MIRConst { +public: + MIRStConst(MIRModule &mod, MIRType &type) + : MIRConst(type, kConstStConst), + stVec(mod.GetMPAllocator().Adapter()), + stOffsetVec(mod.GetMPAllocator().Adapter()) + { + } + + const MapleVector &GetStVec() const + { + return stVec; + } + void PushbackSymbolToSt(MIRSymbol *sym) + { + stVec.push_back(sym); + } + + MIRSymbol *GetStVecItem(size_t index) + { + CHECK_FATAL(index < stVec.size(), "array index out of range"); + return stVec[index]; + } + + const MapleVector &GetStOffsetVec() const + { + return stOffsetVec; + } + void PushbackOffsetToSt(uint32 offset) + { + stOffsetVec.push_back(offset); + } + + uint32 GetStOffsetVecItem(size_t index) const + { + CHECK_FATAL(index < stOffsetVec.size(), "array index out of range"); + return stOffsetVec[index]; + } + + MIRStConst *Clone(MemPool &memPool) const override + { + auto *res = memPool.New(*this); + return res; + } + + ~MIRStConst() = default; + +private: + MapleVector stVec; // symbols that in the st const + MapleVector stOffsetVec; // symbols offset +}; +#endif // MIR_FEATURE_FULL + +bool IsDivSafe(const MIRIntConst ÷nd, const MIRIntConst &divisor, PrimType pType); + +} // namespace maple + +#define LOAD_SAFE_CAST_FOR_MIR_CONST +#include "ir_safe_cast_traits.def" + +#endif // MAPLE_IR_INCLUDE_MIR_CONST_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mir_function.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_function.h new file mode 100644 index 0000000000000000000000000000000000000000..d2de4b084a74b2bc1379abbc443abc31dabc7352 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_function.h @@ -0,0 +1,1642 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_MIR_FUNCTION_H +#define MAPLE_IR_INCLUDE_MIR_FUNCTION_H +#include +#include "mir_module.h" +#include "mir_const.h" +#include "mir_symbol.h" +#include "mir_preg.h" +#include "intrinsics.h" +#include "file_layout.h" +#include "mir_nodes.h" +#include "mir_type.h" +#include "mir_scope.h" +#include "profile.h" +#include "func_desc.h" + +#define DEBUGME true + +namespace maple { +enum PointerAttr : uint32_t { kPointerUndeiced = 0x1, kPointerNull = 0x2, kPointerNoNull = 0x3 }; + +enum FuncAttrProp : uint32_t { + kNoThrowException = 0x1, + kNoRetNewlyAllocObj = 0x2, + kNoDefEffect = 0x4, + kNoDefArgEffect = 0x8, + kPureFunc = 0x10, + kIpaSeen = 0x20, + kUseEffect = 0x40, + kDefEffect = 0x80 +}; + +// describe a formal definition in a function declaration +class FormalDef { +public: + GStrIdx formalStrIdx = GStrIdx(0); // used when processing the prototype + MIRSymbol *formalSym = nullptr; // used in the function definition + TyIdx formalTyIdx = TyIdx(); + TypeAttrs formalAttrs = TypeAttrs(); // the formal's type attributes + + FormalDef() {}; + virtual ~FormalDef() {} + FormalDef(MIRSymbol *s, const TyIdx &tidx, const TypeAttrs &at) : formalSym(s), formalTyIdx(tidx), formalAttrs(at) + { + } + FormalDef(const GStrIdx &sidx, MIRSymbol *s, const TyIdx &tidx, const TypeAttrs &at) + : formalStrIdx(sidx), formalSym(s), formalTyIdx(tidx), formalAttrs(at) + { + } +}; + +class MeFunction; // circular dependency exists, no other choice +class EAConnectionGraph; // circular dependency exists, no other choice +class MIRFunction { +public: + MIRFunction(MIRModule *mod, StIdx idx) : module(mod), symbolTableIdx(idx) + { + scope = module->GetMemPool()->New(mod); + } + + ~MIRFunction() = default; + + void Dump(bool withoutBody = false); + void DumpUpFormal(int32 indent) const; + void DumpFrame(int32 indent) const; + void DumpFuncBody(int32 indent); + void DumpScope(); + const MIRSymbol *GetFuncSymbol() const; + MIRSymbol *GetFuncSymbol(); + + void SetBaseClassFuncNames(GStrIdx strIdx); + void SetMemPool(MemPool *memPool) + { + SetCodeMemPool(memPool); + codeMemPoolAllocator.SetMemPool(codeMemPool); + } + + /// update signature_strIdx, basefunc_strIdx, baseclass_strIdx, basefunc_withtype_strIdx + /// without considering baseclass_strIdx, basefunc_strIdx's original non-zero values + /// \param strIdx full_name strIdx of the new function name + void OverrideBaseClassFuncNames(GStrIdx strIdx); + const std::string &GetName() const; + + GStrIdx GetNameStrIdx() const; + + const std::string &GetBaseClassName() const; + + const std::string &GetBaseFuncName() const; + + const std::string &GetBaseFuncNameWithType() const; + + const std::string &GetBaseFuncSig() const; + + const std::string &GetSignature() const; + + GStrIdx GetBaseClassNameStrIdx() const + { + return baseClassStrIdx; + } + + GStrIdx GetBaseFuncNameStrIdx() const + { + return baseFuncStrIdx; + } + + GStrIdx GetBaseFuncNameWithTypeStrIdx() const + { + return baseFuncWithTypeStrIdx; + } + + GStrIdx GetBaseFuncSigStrIdx() const + { + return baseFuncSigStrIdx; + } + + void SetBaseClassNameStrIdx(GStrIdx id) + { + baseClassStrIdx = id; + } + + void SetBaseFuncNameStrIdx(GStrIdx id) + { + baseFuncStrIdx = id; + } + + void SetBaseFuncNameWithTypeStrIdx(GStrIdx id) + { + baseFuncWithTypeStrIdx = id; + } + + const MIRType *GetReturnType() const; + MIRType *GetReturnType(); + bool IsReturnVoid() const + { + return GetReturnType()->GetPrimType() == PTY_void; + } + TyIdx GetReturnTyIdx() const + { + CHECK_FATAL(funcType != nullptr, "funcType is nullptr"); + return funcType->GetRetTyIdx(); + } + void SetReturnTyIdx(TyIdx tyidx) + { + CHECK_FATAL(funcType != nullptr, "funcType is nullptr"); + funcType->SetRetTyIdx(tyidx); + } + + const MIRType *GetClassType() const; + TyIdx GetClassTyIdx() const + { + return classTyIdx; + } + void SetClassTyIdx(TyIdx tyIdx) + { + classTyIdx = tyIdx; + } + void SetClassTyIdx(uint32 idx) + { + classTyIdx.reset(idx); + } + + void AddArgument(MIRSymbol *st) + { + DEBUG_ASSERT(st != nullptr, "null ptr check"); + FormalDef formalDef(st->GetNameStrIdx(), st, st->GetTyIdx(), st->GetAttrs()); + formalDefVec.push_back(formalDef); + } + + void AddFormalDef(const FormalDef &formalDef) + { + formalDefVec.push_back(formalDef); + } + + size_t GetParamSize() const + { + CHECK_FATAL(funcType != nullptr, "funcType is nullptr"); + return funcType->GetParamTypeList().size(); + } + + auto &GetParamTypes() const + { + CHECK_FATAL(funcType != nullptr, "funcType is nullptr"); + return funcType->GetParamTypeList(); + } + + TyIdx GetNthParamTyIdx(size_t i) const + { + DEBUG_ASSERT(i < funcType->GetParamTypeList().size(), "array index out of range"); + return funcType->GetParamTypeList()[i]; + } + + const MIRType *GetNthParamType(size_t i) const; + MIRType *GetNthParamType(size_t i); + + const TypeAttrs &GetNthParamAttr(size_t i) const + { + DEBUG_ASSERT(i < formalDefVec.size(), "array index out of range"); + DEBUG_ASSERT(formalDefVec[i].formalSym != nullptr, "null ptr check"); + return formalDefVec[i].formalSym->GetAttrs(); + } + + void UpdateFuncTypeAndFormals(const std::vector &symbols, bool clearOldArgs = false); + void UpdateFuncTypeAndFormalsAndReturnType(const std::vector &symbols, const TyIdx &retTyIdx, + bool clearOldArgs = false); + LabelIdx GetOrCreateLableIdxFromName(const std::string &name); + GStrIdx GetLabelStringIndex(LabelIdx labelIdx) const + { + CHECK_FATAL(labelTab != nullptr, "labelTab is nullptr"); + DEBUG_ASSERT(labelIdx < labelTab->Size(), "index out of range in GetLabelStringIndex"); + return labelTab->GetSymbolFromStIdx(labelIdx); + } + const std::string &GetLabelName(LabelIdx labelIdx) const + { + GStrIdx strIdx = GetLabelStringIndex(labelIdx); + return GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx); + } + + const MIRSymbol *GetLocalOrGlobalSymbol(const StIdx &idx, bool checkFirst = false) const; + MIRSymbol *GetLocalOrGlobalSymbol(const StIdx &idx, bool checkFirst = false); + + void SetAttrsFromSe(uint8 specialEffect); + + const FuncAttrs &GetAttrs() const + { + return funcAttrs; + } + + void SetAttrs(FuncAttrs attr) + { + funcAttrs = attr; + } + + bool GetAttr(FuncAttrKind attrKind) const + { + return funcAttrs.GetAttr(attrKind); + } + + void SetAttr(FuncAttrKind attrKind) + { + funcAttrs.SetAttr(attrKind); + } + + void UnSetAttr(FuncAttrKind attrKind) + { + funcAttrs.SetAttr(attrKind, true); + } + + bool IsVarargs() const + { + return funcAttrs.GetAttr(FUNCATTR_varargs); + } + + bool IsWeak() const + { + return funcAttrs.GetAttr(FUNCATTR_weak); + } + + bool IsStatic() const + { + return funcAttrs.GetAttr(FUNCATTR_static); + } + + bool IsInline() const + { + return funcAttrs.GetAttr(FUNCATTR_inline); + } + + bool IsExtern() const + { + return funcAttrs.GetAttr(FUNCATTR_extern); + } + + bool IsNative() const + { + return funcAttrs.GetAttr(FUNCATTR_native); + } + + bool IsFinal() const + { + return funcAttrs.GetAttr(FUNCATTR_final); + } + + bool IsAbstract() const + { + return funcAttrs.GetAttr(FUNCATTR_abstract); + } + + bool IsPublic() const + { + return funcAttrs.GetAttr(FUNCATTR_public); + } + + bool IsPrivate() const + { + return funcAttrs.GetAttr(FUNCATTR_private); + } + + bool IsProtected() const + { + return funcAttrs.GetAttr(FUNCATTR_protected); + } + + bool IsConstructor() const + { + return funcAttrs.GetAttr(FUNCATTR_constructor); + } + + bool IsLocal() const + { + return funcAttrs.GetAttr(FUNCATTR_local); + } + + bool IsNoDefArgEffect() const + { + return funcAttrs.GetAttr(FUNCATTR_nodefargeffect); + } + + bool IsNoDefEffect() const + { + return funcAttrs.GetAttr(FUNCATTR_nodefeffect); + } + + bool IsNoRetGlobal() const + { + return funcAttrs.GetAttr(FUNCATTR_noretglobal); + } + + bool IsNoThrowException() const + { + return funcAttrs.GetAttr(FUNCATTR_nothrow_exception); + } + + bool IsNoRetArg() const + { + return funcAttrs.GetAttr(FUNCATTR_noretarg); + } + + bool IsNoPrivateDefEffect() const + { + return funcAttrs.GetAttr(FUNCATTR_noprivate_defeffect); + } + + bool IsIpaSeen() const + { + return funcAttrs.GetAttr(FUNCATTR_ipaseen); + } + + bool IsPure() const + { + return funcAttrs.GetAttr(FUNCATTR_pure); + } + + bool IsFirstArgReturn() const + { + return funcAttrs.GetAttr(FUNCATTR_firstarg_return); + } + + bool IsUnSafe() const + { + return !funcAttrs.GetAttr(FUNCATTR_safed) || funcAttrs.GetAttr(FUNCATTR_unsafed); + } + + bool IsSafe() const + { + return funcAttrs.GetAttr(FUNCATTR_safed); + } + + void SetVarArgs() + { + funcAttrs.SetAttr(FUNCATTR_varargs); + } + + void SetNoDefArgEffect() + { + funcAttrs.SetAttr(FUNCATTR_nodefargeffect); + } + + void SetNoDefEffect() + { + funcAttrs.SetAttr(FUNCATTR_nodefeffect); + } + + void SetNoRetGlobal() + { + funcAttrs.SetAttr(FUNCATTR_noretglobal); + } + + void SetNoThrowException() + { + funcAttrs.SetAttr(FUNCATTR_nothrow_exception); + } + + void SetNoRetArg() + { + funcAttrs.SetAttr(FUNCATTR_noretarg); + } + + void SetNoPrivateDefEffect() + { + funcAttrs.SetAttr(FUNCATTR_noprivate_defeffect); + } + + void SetIpaSeen() + { + funcAttrs.SetAttr(FUNCATTR_ipaseen); + } + + void SetPure() + { + funcAttrs.SetAttr(FUNCATTR_pure); + } + + void SetFirstArgReturn() + { + funcAttrs.SetAttr(FUNCATTR_firstarg_return); + } + + void UnsetNoDefArgEffect() + { + funcAttrs.SetAttr(FUNCATTR_nodefargeffect, true); + } + + void UnsetNoDefEffect() + { + funcAttrs.SetAttr(FUNCATTR_nodefeffect, true); + } + + void UnsetNoRetGlobal() + { + funcAttrs.SetAttr(FUNCATTR_noretglobal, true); + } + + void UnsetNoThrowException() + { + funcAttrs.SetAttr(FUNCATTR_nothrow_exception, true); + } + + void UnsetPure() + { + funcAttrs.SetAttr(FUNCATTR_pure, true); + } + + void UnsetNoRetArg() + { + funcAttrs.SetAttr(FUNCATTR_noretarg, true); + } + + void UnsetNoPrivateDefEffect() + { + funcAttrs.SetAttr(FUNCATTR_noprivate_defeffect, true); + } + + bool HasCall() const; + void SetHasCall(); + + bool IsReturnStruct() const; + void SetReturnStruct(); + void SetReturnStruct(const MIRType &retType); + + bool IsUserFunc() const; + void SetUserFunc(); + + bool IsInfoPrinted() const; + void SetInfoPrinted(); + void ResetInfoPrinted(); + + void SetNoReturn(); + bool NeverReturns() const; + + void SetHasSetjmp(); + bool HasSetjmp() const; + + void SetHasAsm(); + bool HasAsm() const; + + void SetStructReturnedInRegs(); + bool StructReturnedInRegs() const; + + void SetReturnStruct(const MIRType *retType); + + bool IsEmpty() const; + bool IsClinit() const; + uint32 GetInfo(GStrIdx strIdx) const; + uint32 GetInfo(const std::string &str) const; + bool IsAFormal(const MIRSymbol *st) const + { + for (const auto &formalDef : formalDefVec) { + if (st == formalDef.formalSym) { + return true; + } + } + return false; + } + + uint32 GetFormalIndex(const MIRSymbol *symbol) const + { + for (size_t i = 0; i < formalDefVec.size(); ++i) { + if (formalDefVec[i].formalSym == symbol) { + return i; + } + } + return 0xffffffff; + } + + FormalDef &GetFormalDefFromMIRSymbol(const MIRSymbol *symbol) + { + for (auto &formalDef : formalDefVec) { + if (formalDef.formalSym == symbol) { + return formalDef; + } + } + CHECK_FATAL(false, "Impossible."); + } + + bool IsAFormalName(const GStrIdx idx) const + { + for (const auto &formalDef : formalDefVec) { + if (idx == formalDef.formalStrIdx) { + return true; + } + } + return false; + } + + const FormalDef GetFormalFromName(const GStrIdx idx) const + { + for (size_t i = 0; i < formalDefVec.size(); ++i) { + if (formalDefVec[i].formalStrIdx == idx) { + return formalDefVec[i]; + } + } + return FormalDef(); + } + + // tell whether this function is a Java method + bool IsJava() const + { + return classTyIdx != 0u; + } + + const MIRType *GetNodeType(const BaseNode &node) const; + +#ifdef DEBUGME + void SetUpGDBEnv(); + void ResetGDBEnv(); +#endif + void ReleaseMemory() + { + if (codeMemPoolTmp != nullptr) { + delete codeMemPoolTmp; + codeMemPoolTmp = nullptr; + } + } + + void ReleaseCodeMemory() + { + if (codeMemPool != nullptr) { + codeMemPoolAllocator.SetMemPool(nullptr); + delete codeMemPool; + SetMemPool(nullptr); + } + } + + MemPool *GetCodeMempool() + { + if (useTmpMemPool) { + if (codeMemPoolTmp == nullptr) { + codeMemPoolTmp = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolTmpAllocator.SetMemPool(codeMemPoolTmp); + } + return codeMemPoolTmp; + } + if (codeMemPool == nullptr) { + codeMemPool = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolAllocator.SetMemPool(codeMemPool); + } + return codeMemPool; + } + + MapleAllocator &GetCodeMemPoolAllocator() + { + GetCodeMempool(); + if (useTmpMemPool) { + return codeMemPoolTmpAllocator; + } + return codeMemPoolAllocator; + } + + MapleAllocator &GetCodeMempoolAllocator() + { + if (codeMemPool == nullptr) { + codeMemPool = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolAllocator.SetMemPool(codeMemPool); + } + return codeMemPoolAllocator; + } + + TyIdx GetFuncRetStructTyIdx() + { + TyIdx tyIdx = GetFormalDefAt(0).formalTyIdx; + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(ty->GetKind() == kTypePointer, "Fake param not a pointer"); + MIRPtrType *pType = static_cast(ty); + tyIdx = pType->GetPointedTyIdx(); + CHECK_FATAL(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->IsStructType(), "Must be struct return type"); + return tyIdx; + } + + void EnterFormals(); + void NewBody(); + + MIRModule *GetModule() + { + return module; + } + + PUIdx GetPuidx() const + { + return puIdx; + } + void SetPuidx(PUIdx idx) + { + puIdx = idx; + } + + PUIdx GetPuidxOrigin() const + { + return puIdxOrigin; + } + void SetPuidxOrigin(PUIdx idx) + { + puIdxOrigin = idx; + } + + StIdx GetStIdx() const + { + return symbolTableIdx; + } + void SetStIdx(StIdx stIdx) + { + symbolTableIdx = stIdx; + } + + int32 GetSCCId() const + { + return sccID; + } + void SetSCCId(int32 id) + { + sccID = id; + } + + MIRFuncType *GetMIRFuncType() + { + return funcType; + } + void SetMIRFuncType(MIRFuncType *type) + { + funcType = type; + } + + TyIdx GetInferredReturnTyIdx() const + { + return inferredReturnTyIdx; + } + + void SetInferredReturnTyIdx(TyIdx tyIdx) + { + inferredReturnTyIdx = tyIdx; + } + + MIRTypeNameTable *GetTypeNameTab() const + { + return typeNameTab; + } + + void AllocTypeNameTab() + { + if (typeNameTab == nullptr) { + typeNameTab = module->GetMemPool()->New(module->GetMPAllocator()); + } + } + bool HaveTypeNameTab() const + { + return typeNameTab != nullptr; + } + const MapleMap &GetGStrIdxToTyIdxMap() const + { + CHECK_FATAL(typeNameTab != nullptr, "typeNameTab is nullptr"); + return typeNameTab->GetGStrIdxToTyIdxMap(); + } + TyIdx GetTyIdxFromGStrIdx(GStrIdx idx) const + { + CHECK_FATAL(typeNameTab != nullptr, "typeNameTab is nullptr"); + return typeNameTab->GetTyIdxFromGStrIdx(idx); + } + void SetGStrIdxToTyIdx(GStrIdx gStrIdx, TyIdx tyIdx) + { + CHECK_FATAL(typeNameTab != nullptr, "typeNameTab is nullptr"); + typeNameTab->SetGStrIdxToTyIdx(gStrIdx, tyIdx); + } + + const std::string &GetLabelTabItem(LabelIdx labelIdx) const + { + CHECK_FATAL(labelTab != nullptr, "labelTab is nullptr"); + return labelTab->GetName(labelIdx); + } + + void AllocLabelTab() + { + if (labelTab == nullptr) { + labelTab = module->GetMemPool()->New(module->GetMPAllocator()); + } + } + + MIRPregTable *GetPregTab() const + { + return pregTab; + } + + void SetPregTab(MIRPregTable *tab) + { + pregTab = tab; + } + void AllocPregTab() + { + if (pregTab == nullptr) { + pregTab = module->GetMemPool()->New(&module->GetMPAllocator()); + } + } + MIRPreg *GetPregItem(PregIdx idx) + { + return const_cast(const_cast(this)->GetPregItem(idx)); + } + const MIRPreg *GetPregItem(PregIdx idx) const + { + return pregTab->PregFromPregIdx(idx); + } + + BlockNode *GetBody() + { + return body; + } + const BlockNode *GetBody() const + { + return body; + } + void SetBody(BlockNode *node) + { + body = node; + } + + BlockNode *GetLastPosBody() + { + return bodyLast; + } + const BlockNode *GetLastPosBody() const + { + return bodyLast; + + } + void SetLastPosBody(BlockNode *node) + { + bodyLast = node; + } + + SrcPosition &GetSrcPosition() + { + DEBUG_ASSERT(GetFuncSymbol() != nullptr, "null ptr check"); + return GetFuncSymbol()->GetSrcPosition(); + } + + void SetSrcPosition(const SrcPosition &position) + { + DEBUG_ASSERT(GetFuncSymbol() != nullptr, "null ptr check"); + GetFuncSymbol()->SetSrcPosition(position); + } + + const FuncAttrs &GetFuncAttrs() const + { + return funcAttrs; + } + FuncAttrs &GetFuncAttrs() + { + return funcAttrs; + } + + void SetFuncAttrs(const FuncAttrs &attrs) + { + funcAttrs = attrs; + } + void SetFuncAttrs(uint64 attrFlag) + { + funcAttrs.SetAttrFlag(attrFlag); + } + + uint32 GetFlag() const + { + return flag; + } + void SetFlag(uint32 newFlag) + { + flag = newFlag; + } + + uint16 GetHashCode() const + { + return hashCode; + } + void SetHashCode(uint16 newHashCode) + { + hashCode = newHashCode; + } + + void SetFileIndex(uint32 newFileIndex) + { + fileIndex = newFileIndex; + } + + MIRInfoVector &GetInfoVector() + { + return info; + } + + const MIRInfoPair &GetInfoPair(size_t i) const + { + return info.at(i); + } + + void PushbackMIRInfo(const MIRInfoPair &pair) + { + info.push_back(pair); + } + + void SetMIRInfoNum(size_t idx, uint32 num) + { + info[idx].second = num; + } + + MapleVector &InfoIsString() + { + return infoIsString; + } + + void PushbackIsString(bool isString) + { + infoIsString.push_back(isString); + } + + MIRScope *GetScope() + { + return scope; + } + + bool NeedEmitAliasInfo() const + { + return scope->NeedEmitAliasInfo(); + } + + MapleMap &GetAliasVarMap() + { + return scope->GetAliasVarMap(); + } + + void SetAliasVarMap(GStrIdx idx, const MIRAliasVars &vars) + { + scope->SetAliasVarMap(idx, vars); + } + + void AddAliasVarMap(GStrIdx idx, const MIRAliasVars &vars) + { + scope->AddAliasVarMap(idx, vars); + } + + bool HasVlaOrAlloca() const + { + return hasVlaOrAlloca; + } + void SetVlaOrAlloca(bool has) + { + hasVlaOrAlloca = has; + } + + // Default freq is the lastStmtFreq + bool HasFreqMap() const + { + return freqLastMap != nullptr; + } + + bool HasFirstFreqMap() const + { + return freqFirstMap != nullptr; + } + + const MapleMap &GetFirstFreqMap() const + { + return *freqFirstMap; + } + + void SetFirstFreqMap(uint32 stmtID, uint32 freq) + { + if (freqFirstMap == nullptr) { + freqFirstMap = module->GetMemPool()->New>(module->GetMPAllocator().Adapter()); + } + (*freqFirstMap)[stmtID] = freq; + } + + const MapleMap &GetLastFreqMap() const + { + return *freqLastMap; + } + + int32 GetFreqFromLastStmt(uint32 stmtId) + { + if (freqLastMap == nullptr) { + return -1; + } + if ((*freqLastMap).find(stmtId) == (*freqLastMap).end()) { + return -1; + } + return static_cast((*freqLastMap)[stmtId]); + } + + int32 GetFreqFromFirstStmt(uint32 stmtId) + { + if (freqFirstMap == nullptr) { + return -1; + } + if ((*freqFirstMap).find(stmtId) == (*freqFirstMap).end()) { + return -1; + } + return static_cast((*freqFirstMap)[stmtId]); + } + + void SetLastFreqMap(uint32 stmtID, uint32 freq) + { + if (freqLastMap == nullptr) { + freqLastMap = module->GetMemPool()->New>(module->GetMPAllocator().Adapter()); + } + (*freqLastMap)[stmtID] = freq; + } + + bool WithLocInfo() const + { + return withLocInfo; + } + void SetWithLocInfo(bool withInfo) + { + withLocInfo = withInfo; + } + + bool IsDirty() const + { + return isDirty; + } + void SetDirty(bool dirty) + { + isDirty = dirty; + } + + bool IsFromMpltInline() const + { + return fromMpltInline; + } + void SetFromMpltInline(bool isInline) + { + fromMpltInline = isInline; + } + + uint8 GetLayoutType() const + { + return layoutType; + } + void SetLayoutType(uint8 type) + { + layoutType = type; + } + + uint32 GetCallTimes() const + { + return callTimes; + } + void SetCallTimes(uint32 times) + { + callTimes = times; + } + + uint32 GetFrameSize() const + { + return frameSize; + } + void SetFrameSize(uint32 size) + { + frameSize = size; + } + + uint32 GetUpFormalSize() const + { + return upFormalSize; + } + void SetUpFormalSize(uint32 size) + { + upFormalSize = size; + } + + uint32 GetOutParmSize() const + { + return outParmSize; + } + void SetOutParmSize(uint32 size) + { + outParmSize = size; + } + + uint16 GetModuleId() const + { + return moduleID; + } + void SetModuleID(uint16 id) + { + moduleID = id; + } + + uint32 GetFuncSize() const + { + return funcSize; + } + void SetFuncSize(uint32 size) + { + funcSize = size; + } + + uint32 GetTempCount() const + { + return tempCount; + } + void IncTempCount() + { + ++tempCount; + } + + uint8 *GetFormalWordsTypeTagged() const + { + return formalWordsTypeTagged; + } + void SetFormalWordsTypeTagged(uint8 *tagged) + { + formalWordsTypeTagged = tagged; + } + uint8 **GetFwtAddress() + { + return &formalWordsTypeTagged; + } + + uint8 *GetLocalWordsTypeTagged() const + { + return localWordsTypeTagged; + } + void SetLocalWordsTypeTagged(uint8 *tagged) + { + localWordsTypeTagged = tagged; + } + uint8 **GetLwtAddress() + { + return &localWordsTypeTagged; + } + + uint8 *GetFormalWordsRefCounted() const + { + return formalWordsRefCounted; + } + void SetFormalWordsRefCounted(uint8 *counted) + { + formalWordsRefCounted = counted; + } + uint8 **GetFwrAddress() + { + return &formalWordsRefCounted; + } + + uint8 *GetLocalWordsRefCounted() const + { + return localWordsRefCounted; + } + void SetLocalWordsRefCounted(uint8 *counted) + { + localWordsRefCounted = counted; + } + + MeFunction *GetMeFunc() + { + return meFunc; + } + + void SetMeFunc(MeFunction *func) + { + meFunc = func; + } + + EAConnectionGraph *GetEACG() + { + return eacg; + } + void SetEACG(EAConnectionGraph *eacgVal) + { + eacg = eacgVal; + } + + void SetFormalDefVec(const MapleVector &currFormals) + { + formalDefVec = currFormals; + } + + MapleVector &GetFormalDefVec() + { + return formalDefVec; + } + + const FormalDef &GetFormalDefAt(size_t i) const + { + return formalDefVec[i]; + } + + FormalDef &GetFormalDefAt(size_t i) + { + return formalDefVec[i]; + } + + const MIRSymbol *GetFormal(size_t i) const + { + return formalDefVec[i].formalSym; + } + + MIRSymbol *GetFormal(size_t i) + { + return formalDefVec[i].formalSym; + } + + const std::string &GetFormalName(size_t i) const + { + auto *formal = formalDefVec[i].formalSym; + if (formal != nullptr) { + return formal->GetName(); + } + return GlobalTables::GetStrTable().GetStringFromStrIdx(formalDefVec[i].formalStrIdx); + } + + size_t GetFormalCount() const + { + return formalDefVec.size(); + } + + void ClearFormals() + { + formalDefVec.clear(); + } + + void ClearArguments() + { + formalDefVec.clear(); + funcType->GetParamTypeList().clear(); + funcType->GetParamAttrsList().clear(); + } + + size_t GetSymbolTabSize() const + { + DEBUG_ASSERT(symTab != nullptr, "symTab is nullptr"); + return symTab->GetSymbolTableSize(); + } + MIRSymbol *GetSymbolTabItem(uint32 idx, bool checkFirst = false) const + { + return symTab->GetSymbolFromStIdx(idx, checkFirst); + } + const MIRSymbolTable *GetSymTab() const + { + return symTab; + } + MIRSymbolTable *GetSymTab() + { + return symTab; + } + void AllocSymTab() + { + if (symTab == nullptr) { + symTab = module->GetMemPool()->New(module->GetMPAllocator()); + } + } + MIRLabelTable *GetLabelTab() const + { + CHECK_FATAL(labelTab != nullptr, "must be"); + return labelTab; + } + MIRLabelTable *GetLabelTab() + { + if (labelTab == nullptr) { + labelTab = module->GetMemPool()->New(module->GetMPAllocator()); + } + return labelTab; + } + void SetLabelTab(MIRLabelTable *currLabelTab) + { + labelTab = currLabelTab; + } + + const MapleSet &GetRetRefSym() const + { + return retRefSym; + } + void InsertMIRSymbol(MIRSymbol *sym) + { + (void)retRefSym.insert(sym); + } + + MemPool *GetDataMemPool() const + { + return module->GetMemPool(); + } + + MemPool *GetCodeMemPool() + { + if (codeMemPool == nullptr) { + codeMemPool = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolAllocator.SetMemPool(codeMemPool); + } + return codeMemPool; + } + + void SetCodeMemPool(MemPool *currCodeMemPool) + { + codeMemPool = currCodeMemPool; + } + + MapleAllocator &GetCodeMPAllocator() + { + GetCodeMemPool(); + return codeMemPoolAllocator; + } + + void AddFuncGenericDeclare(GenericDeclare *g) + { + genericDeclare.push_back(g); + } + + void AddFuncGenericArg(AnnotationType *a) + { + genericArg.push_back(a); + } + + void AddFuncGenericRet(AnnotationType *r) + { + genericRet = r; + } + + void AddFuncLocalGenericVar(const GStrIdx &str, AnnotationType *at) + { + genericLocalVar[str] = at; + } + + MapleVector &GetFuncGenericDeclare() + { + return genericDeclare; + } + + MapleVector &GetFuncGenericArg() + { + return genericArg; + } + + void SetRetrunAttrKind(const PointerAttr kind) + { + returnKind = kind; + } + + PointerAttr GetRetrunAttrKind() const + { + return returnKind; + } + + AnnotationType *GetFuncGenericRet() + { + return genericRet; + } + + AnnotationType *GetFuncLocalGenericVar(const GStrIdx &str) + { + if (genericLocalVar.find(str) == genericLocalVar.end()) { + return nullptr; + } + return genericLocalVar[str]; + } + + StmtNode *FindStmtWithId(StmtNode *stmt, uint32 stmtId) + { + while (stmt != nullptr) { + StmtNode *next = stmt->GetNext(); + switch (stmt->GetOpCode()) { + case OP_dowhile: + case OP_while: { + WhileStmtNode *wnode = static_cast(stmt); + if (wnode->GetBody() != nullptr && wnode->GetBody()->GetFirst() != nullptr) { + StmtNode *res = FindStmtWithId(wnode->GetBody()->GetFirst(), stmtId); + if (res != nullptr) { + return res; + } + } + break; + } + case OP_if: { + if (stmt->GetMeStmtID() == stmtId) { + return stmt; + } + IfStmtNode *inode = static_cast(stmt); + if (inode->GetThenPart() != nullptr && inode->GetThenPart()->GetFirst() != nullptr) { + StmtNode *res = FindStmtWithId(inode->GetThenPart()->GetFirst(), stmtId); + if (res != nullptr) { + return res; + } + } + if (inode->GetElsePart() != nullptr && inode->GetElsePart()->GetFirst() != nullptr) { + StmtNode *res = FindStmtWithId(inode->GetElsePart()->GetFirst(), stmtId); + if (res != nullptr) { + return res; + } + } + break; + } + case OP_callassigned: + case OP_call: + case OP_brtrue: + case OP_brfalse: { + if (stmt->GetMeStmtID() == stmtId) { + return stmt; + } + break; + } + default: { + break; + } + } + stmt = next; + } + return nullptr; + } + + StmtNode *GetStmtNodeFromMeId(uint32 stmtId) + { + if (GetBody() == nullptr) { + return nullptr; + } + StmtNode *stmt = GetBody()->GetFirst(); + return FindStmtWithId(stmt, stmtId); + } + + MemPool *GetCodeMemPoolTmp() + { + if (codeMemPoolTmp == nullptr) { + codeMemPoolTmp = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolTmpAllocator.SetMemPool(codeMemPoolTmp); + } + return codeMemPoolTmp; + } + + bool CheckParamNullType(MIRSymbol *sym) + { + return paramNonullTypeMap.find(sym) != paramNonullTypeMap.end(); + } + + PointerAttr GetParamNonull(MIRSymbol *sym) + { + return paramNonullTypeMap[sym]; + } + + void SetParamNonull(MIRSymbol *sym, PointerAttr type) + { + paramNonullTypeMap[sym] = type; + } + + void CopyReferedRegs(std::set regs) + { + for (auto reg : regs) { + referedPregs.insert(reg); + } + } + + MapleSet GetReferedRegs() const + { + return referedPregs; + } + + void SetDerived2BaseRef(PregIdx deriveRef, PregIdx baseRef) + { + CHECK_FATAL(derived2BaseRef.find(deriveRef) == derived2BaseRef.end(), "derived2BaseRef double set"); + derived2BaseRef[deriveRef] = baseRef; + } + + const MapleUnorderedMap &GetDerived2BaseRef() const + { + return derived2BaseRef; + } + + bool IsReferedRegsValid() const + { + return referedRegsValid; + } + + void SetReferedRegsValid(bool val) + { + referedRegsValid = val; + } + + FuncDesc &GetFuncDesc() + { + return funcDesc; + } + + void SetFuncDesc(const FuncDesc &value) + { + funcDesc = value; + } + + void SetProfCtrTbl(MIRSymbol *pct) + { + CHECK_FATAL(Options::profileGen, "This is only for profileGen"); + profCtrTbl = pct; + } + + MIRSymbol *GetProfCtrTbl() + { + return profCtrTbl; + } + + void SetNumCtrs(uint32 num) + { + CHECK_FATAL(Options::profileGen, "This is only for profileGen"); + nCtrs = num; + } + + uint32 GetNumCtrs() const + { + return nCtrs; + } + + void SetFileLineNoChksum(uint64 chksum) + { + CHECK_FATAL(Options::profileGen, "This is only for profileGen"); + fileLinenoChksum = chksum; + } + + uint64 GetFileLineNoChksum() const + { + return fileLinenoChksum; + } + + void SetCFGChksum(uint64 chksum) + { + CHECK_FATAL(Options::profileGen, "This is only for profileGen"); + cfgChksum = chksum; + } + + uint64 GetCFGChksum() const + { + return cfgChksum; + } + + void InitFuncDescToBest() + { + funcDesc.InitToBest(); + } + + const FuncDesc &GetFuncDesc() const + { + return funcDesc; + } + + void AddProfileDesc(uint64 hash, uint32 start, uint32 end) + { + profileDesc = module->GetMemPool()->New(hash, start, end); + } + + const IRProfileDesc *GetProfInf() + { + if (profileDesc == nullptr) { + // return profileDesc with default value + profileDesc = module->GetMemPool()->New(); + } + return profileDesc; + } + + bool IsVisited() const + { + return isVisited; + } + void SetIsVisited() + { + isVisited = true; + } + + void SetFuncProfData(GcovFuncInfo *data) + { + funcProfData = data; + } + GcovFuncInfo *GetFuncProfData() + { + return funcProfData; + } + GcovFuncInfo *GetFuncProfData() const + { + return funcProfData; + } + void SetStmtFreq(uint32_t stmtID, uint64_t freq) + { + DEBUG_ASSERT((funcProfData != nullptr && freq > 0), "nullptr check"); + funcProfData->SetStmtFreq(stmtID, static_cast(freq)); + } + + uint8 GetFrameReseverdSlot() + { + return funcAttrs.GetFrameResverdSlot(); + } + +private: + MIRModule *module; // the module that owns this function + PUIdx puIdx = 0; // the PU index of this function + PUIdx puIdxOrigin = 0; // the original puIdx when initial generation + StIdx symbolTableIdx; // the symbol table index of this function + int32 sccID = -1; // the scc id of this function, for mplipa + MIRFuncType *funcType = nullptr; + TyIdx inferredReturnTyIdx {0}; // the actual return type of of this function (may be a + // subclass of the above). 0 means can not be inferred. + TyIdx classTyIdx {0}; // class/interface type this function belongs to + MapleVector formalDefVec {module->GetMPAllocator().Adapter()}; // the formals in function definition + MapleSet retRefSym {module->GetMPAllocator().Adapter()}; + + MapleVector genericDeclare {module->GetMPAllocator().Adapter()}; + MapleVector genericArg {module->GetMPAllocator().Adapter()}; + MapleMap genericLocalVar {module->GetMPAllocator().Adapter()}; + AnnotationType *genericRet = nullptr; + + MIRSymbolTable *symTab = nullptr; + MIRTypeNameTable *typeNameTab = nullptr; + MIRLabelTable *labelTab = nullptr; + MIRPregTable *pregTab = nullptr; + MemPool *codeMemPool = nullptr; + MapleAllocator codeMemPoolAllocator {nullptr}; + uint32 callTimes = 0; + BlockNode *body = nullptr; + BlockNode *bodyLast = nullptr; + FuncAttrs funcAttrs {}; + uint32 flag = 0; + uint16 hashCode = 0; // for methodmetadata order + uint32 fileIndex = 0; // this function belongs to which file, used by VM for plugin manager + MIRInfoVector info {module->GetMPAllocator().Adapter()}; + MapleVector infoIsString {module->GetMPAllocator().Adapter()}; // tells if an entry has string value + MIRScope *scope = nullptr; + MapleMap *freqFirstMap = nullptr; // save bb frequency in its first_stmt, key is stmtId + MapleMap *freqLastMap = nullptr; // save bb frequency in its last_stmt, key is stmtId + MapleSet referedPregs {module->GetMPAllocator().Adapter()}; + MapleUnorderedMap derived2BaseRef {module->GetMPAllocator().Adapter()}; + bool referedRegsValid = false; + bool hasVlaOrAlloca = false; + bool withLocInfo = true; + bool isVisited = false; // only used in inline phase. + bool isDirty = false; + bool fromMpltInline = false; // Whether this function is imported from mplt_inline file or not. + uint8_t layoutType = kLayoutUnused; + uint32 frameSize = 0; + uint32 upFormalSize = 0; + uint32 outParmSize = 0; + uint16 moduleID = 0; + uint32 funcSize = 0; // size of code in words + uint32 tempCount = 0; + uint8 *formalWordsTypeTagged = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in the formal parameters area + // addressed upward from %%FP (that means + // the word at location (%%FP + N*4)) has + // typetag; if yes, the typetag is the word + // at (%%FP + N*4 + 4); the bitvector's size + // is given by BlockSize2BitvectorSize(upFormalSize) + uint8 *localWordsTypeTagged = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in the local stack frame + // addressed downward from %%FP (that means + // the word at location (%%FP - N*4)) has + // typetag; if yes, the typetag is the word + // at (%%FP - N*4 + 4); the bitvector's size + // is given by BlockSize2BitvectorSize(frameSize) + uint8 *formalWordsRefCounted = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in the formal parameters area + // addressed upward from %%FP (that means + // the word at location (%%FP + N*4)) points to + // a dynamic memory block that needs reference + // count; the bitvector's size is given by + // BlockSize2BitvectorSize(upFormalSize) + uint8 *localWordsRefCounted = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in the local stack frame + // addressed downward from %%FP (that means + // the word at location (%%FP - N*4)) points to + // a dynamic memory block that needs reference + // count; the bitvector's size is given by + // BlockSize2BitvectorSize(frameSize) + // uint16 numlabels; // removed. label table size + // StmtNode **lbl2stmt; // lbl2stmt table, removed; + // to hold unmangled class and function names + MeFunction *meFunc = nullptr; + EAConnectionGraph *eacg = nullptr; + IRProfileDesc *profileDesc = nullptr; + GStrIdx baseClassStrIdx {0}; // the string table index of base class name + GStrIdx baseFuncStrIdx {0}; // the string table index of base function name + // the string table index of base function name mangled with type info + GStrIdx baseFuncWithTypeStrIdx {0}; + // funcname + types of args, no type of retv + GStrIdx baseFuncSigStrIdx {0}; + GStrIdx signatureStrIdx {0}; + MemPool *codeMemPoolTmp {nullptr}; + MapleAllocator codeMemPoolTmpAllocator {nullptr}; + bool useTmpMemPool = false; + PointerAttr returnKind = PointerAttr::kPointerUndeiced; + MapleMap paramNonullTypeMap {module->GetMPAllocator().Adapter()}; + FuncDesc funcDesc {}; + MIRSymbol *profCtrTbl = nullptr; + uint32 nCtrs = 0; // number of counters + uint64 fileLinenoChksum = 0; + uint64 cfgChksum = 0; + GcovFuncInfo *funcProfData = nullptr; + void DumpFlavorLoweredThanMmpl() const; + MIRFuncType *ReconstructFormals(const std::vector &symbols, bool clearOldArgs); +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_FUNCTION_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mir_lower.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_lower.h new file mode 100644 index 0000000000000000000000000000000000000000..5ee14f941b84d112f638f9b1abca8eec6aa64ca8 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_lower.h @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_MIR_LOWER_H +#define MAPLE_IR_INCLUDE_MIR_LOWER_H +#include +#include "mir_builder.h" +#include "opcodes.h" + +namespace maple { +// The base value for branch probability notes and edge probabilities. +static constexpr int32 kProbAll = 10000; +static constexpr int32 kProbLikely = 9000; +static constexpr int32 kProbUnlikely = kProbAll - kProbLikely; +constexpr uint32 kNodeFirstOpnd = 0; +constexpr uint32 kNodeSecondOpnd = 1; +constexpr uint32 kNodeThirdOpnd = 2; +enum MirLowerPhase : uint8 { kLowerUnder, kLowerMe, kLowerExpandArray, kLowerBe, kLowerCG, kLowerLNO }; + +constexpr uint32 kShiftLowerMe = 1U << kLowerMe; +constexpr uint32 kShiftLowerExpandArray = 1U << kLowerExpandArray; +constexpr uint32 kShiftLowerBe = 1U << kLowerBe; +constexpr uint32 kShiftLowerCG = 1U << kLowerCG; +constexpr uint32 kShiftLowerLNO = 1U << kLowerLNO; +// check if a block node ends with an unconditional jump +inline bool OpCodeNoFallThrough(Opcode opCode) +{ + return opCode == OP_goto || opCode == OP_return || opCode == OP_switch || opCode == OP_throw || + opCode == OP_gosub || opCode == OP_retsub; +} + +inline bool IfStmtNoFallThrough(const IfStmtNode &ifStmt) +{ + return OpCodeNoFallThrough(ifStmt.GetThenPart()->GetLast()->GetOpCode()); +} + +class MIRLower { +public: + static const std::set kSetArrayHotFunc; + + MIRLower(MIRModule &mod, MIRFunction *f) : mirModule(mod), mirFunc(f) {} + + virtual ~MIRLower() = default; + + const MIRFunction *GetMirFunc() const + { + return mirFunc; + } + + void SetMirFunc(MIRFunction *f) + { + mirFunc = f; + } + + void Init() + { + mirBuilder = mirModule.GetMemPool()->New(&mirModule); + } + + virtual BlockNode *LowerIfStmt(IfStmtNode &ifStmt, bool recursive); + BlockNode *LowerSwitchStmt(SwitchNode *switchNode); + virtual BlockNode *LowerWhileStmt(WhileStmtNode &); + BlockNode *LowerDowhileStmt(WhileStmtNode &); + BlockNode *LowerDoloopStmt(DoloopNode &); + BlockNode *LowerBlock(BlockNode &); + BaseNode *LowerEmbeddedCandCior(BaseNode *x, StmtNode *curstmt, BlockNode *block); + void LowerCandCior(BlockNode &block); + void LowerBuiltinExpect(BlockNode &block); + void LowerFunc(MIRFunction &func); + BaseNode *LowerFarray(ArrayNode *array); + BaseNode *LowerCArray(ArrayNode *array); + void ExpandArrayMrt(MIRFunction &func); + IfStmtNode *ExpandArrayMrtIfBlock(IfStmtNode &node); + WhileStmtNode *ExpandArrayMrtWhileBlock(WhileStmtNode &node); + DoloopNode *ExpandArrayMrtDoloopBlock(DoloopNode &node); + ForeachelemNode *ExpandArrayMrtForeachelemBlock(ForeachelemNode &node); + BlockNode *ExpandArrayMrtBlock(BlockNode &block); + void AddArrayMrtMpl(BaseNode &exp, BlockNode &newblk); + MIRFuncType *FuncTypeFromFuncPtrExpr(BaseNode *x); + void SetLowerME() + { + lowerPhase |= kShiftLowerMe; + } + + void SetLowerLNO() + { + lowerPhase |= kShiftLowerLNO; + } + + void SetLowerExpandArray() + { + lowerPhase |= kShiftLowerExpandArray; + } + + void SetLowerBE() + { + lowerPhase |= kShiftLowerBe; + } + + void SetLowerCG() + { + lowerPhase |= kShiftLowerCG; + } + + uint8 GetOptLevel() const + { + return optLevel; + } + + void SetOptLevel(uint8 optlvl) + { + optLevel = optlvl; + } + + bool IsLowerME() const + { + return lowerPhase & kShiftLowerMe; + } + + bool IsLowerLNO() const + { + return lowerPhase & kShiftLowerLNO; + } + + bool IsLowerExpandArray() const + { + return lowerPhase & kShiftLowerExpandArray; + } + + bool IsLowerBE() const + { + return lowerPhase & kShiftLowerBe; + } + + bool IsLowerCG() const + { + return lowerPhase & kShiftLowerCG; + } + + static bool ShouldOptArrayMrt(const MIRFunction &func); + + virtual bool InLFO() const + { + return false; + } + + GcovFuncInfo *GetFuncProfData() + { + return mirFunc->GetFuncProfData(); + } + void CopyStmtFrequency(StmtNode *newStmt, StmtNode *oldStmt) + { + DEBUG_ASSERT(GetFuncProfData() != nullptr, "nullptr check"); + if (newStmt == oldStmt) + return; + int64_t freq = GetFuncProfData()->GetStmtFreq(oldStmt->GetStmtID()); + GetFuncProfData()->SetStmtFreq(newStmt->GetStmtID(), freq); + } + +protected: + MIRModule &mirModule; + +private: + MIRFunction *mirFunc; + MIRBuilder *mirBuilder = nullptr; + uint32 lowerPhase = 0; + uint8 optLevel = 0; + LabelIdx CreateCondGotoStmt(Opcode op, BlockNode &blk, const IfStmtNode &ifStmt); + void CreateBrFalseStmt(BlockNode &blk, const IfStmtNode &ifStmt); + void CreateBrTrueStmt(BlockNode &blk, const IfStmtNode &ifStmt); + void CreateBrFalseAndGotoStmt(BlockNode &blk, const IfStmtNode &ifStmt); +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_LOWER_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mir_module.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_module.h new file mode 100644 index 0000000000000000000000000000000000000000..dc5a78059ca63f0b195a8c5fbae4f81c06036c67 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_module.h @@ -0,0 +1,937 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_MIR_MODULE_H +#define MAPLE_IR_INCLUDE_MIR_MODULE_H +#include "types_def.h" +#include "prim_types.h" +#include "intrinsics.h" +#include "opcodes.h" +#include "mpl_logging.h" +#include "muid.h" +#include "profile.h" +#include "namemangler.h" +#include "gcov_profile.h" +#include "string_utils.h" +#if MIR_FEATURE_FULL +#include +#include +#include +#include +#include +#include +#include "thread_env.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "maple_string.h" +#endif // MIR_FEATURE_FULL + +namespace maple { +class CallInfo; // circular dependency exists, no other choice +class MIRModule; // circular dependency exists, no other choice +class MIRBuilder; // circular dependency exists, no other choice +using MIRModulePtr = MIRModule *; +using MIRBuilderPtr = MIRBuilder *; + +enum MIRFlavor { + kFlavorUnknown, + kFeProduced, + kMeProduced, + kBeLowered, + kFlavorMbc, + kMmpl, + kCmplV1, + kCmpl, // == CMPLv2 + kFlavorLmbc, +}; + +enum MIRSrcLang { + kSrcLangUnknown, + kSrcLangC, + kSrcLangJs, + kSrcLangCPlusPlus, + kSrcLangJava, + kSrcLangChar, + // SrcLangSwift : when clang adds support for Swift. +}; + +class CalleePair { +public: + CalleePair(PUIdx id, int32_t index) : id(id), index(index) {} + bool operator<(const CalleePair &func) const + { + if (id < func.id) + return true; + else if (id == func.id && index < func.index) { + return true; + } else { + return false; + } + } + +private: + PUIdx id; + int32_t index; +}; + +class CallerSummary { +public: + CallerSummary(PUIdx id, uint32 stmtId) : id(id), stmtId(stmtId) {} + PUIdx GetPuidx() const + { + return id; + }; + uint32 GetStmtId() const + { + return stmtId; + } + +private: + PUIdx id; + uint32 stmtId; +}; + +// This data structure is for the ipa-cp. Important expresstion is about the condtion statement. +class ImpExpr { +public: + ImpExpr(uint32 stmtId, uint32 paramIndex) : stmtId(stmtId), paramIndex(paramIndex) {} + uint32 GetStmtId() const + { + return stmtId; + } + uint32 GetParamIndex() const + { + return paramIndex; + } + +private: + uint32 stmtId; + uint32 paramIndex; +}; + +// blksize gives the size of the memory block in bytes; there are (blksize+3)/4 +// words; 1 bit for each word, so the bit vector's length in bytes is +// ((blksize+3)/4+7)/8 +static inline uint32 BlockSize2BitVectorSize(uint32 blkSize) +{ + uint32 bitVectorLen = ((blkSize + 3) / 4 + 7) / 8; + return ((bitVectorLen + 3) >> 2) << 2; // round up to word boundary +} + +#if MIR_FEATURE_FULL +class MIRType; // circular dependency exists, no other choice +class MIRFunction; // circular dependency exists, no other choice +class MIRSymbol; // circular dependency exists, no other choice +class MIRSymbolTable; // circular dependency exists, no other choice +class MIRFloatConst; // circular dependency exists, no other choice +class MIRDoubleConst; // circular dependency exists, no other choice +class MIRBuilder; // circular dependency exists, no other choice +class DebugInfo; // circular dependency exists, no other choice +class BinaryMplt; // circular dependency exists, no other choice +class EAConnectionGraph; // circular dependency exists, no other choice +using MIRInfoPair = std::pair; +using MIRInfoVector = MapleVector; +using MIRDataPair = std::pair>; +using MIRDataVector = MapleVector; +constexpr int kMaxEncodedValueLen = 10; +struct EncodedValue { + uint8 encodedValue[kMaxEncodedValueLen] = {0}; +}; + +class MIRTypeNameTable { +public: + explicit MIRTypeNameTable(MapleAllocator &allocator) : gStrIdxToTyIdxMap(std::less(), allocator.Adapter()) + { + } + + ~MIRTypeNameTable() = default; + + const MapleMap &GetGStrIdxToTyIdxMap() const + { + return gStrIdxToTyIdxMap; + } + + TyIdx GetTyIdxFromGStrIdx(GStrIdx idx) const + { + auto it = gStrIdxToTyIdxMap.find(idx); + if (it == gStrIdxToTyIdxMap.end()) { + return TyIdx(0); + } + return it->second; + } + + void SetGStrIdxToTyIdx(GStrIdx gStrIdx, TyIdx tyIdx) + { + gStrIdxToTyIdxMap[gStrIdx] = tyIdx; + } + + size_t Size() const + { + return gStrIdxToTyIdxMap.size(); + } + +private: + MapleMap gStrIdxToTyIdxMap; +}; + +class MIRModule { +public: + bool firstInline = true; + using CallSite = std::pair; + + explicit MIRModule(const std::string &fn = ""); + MIRModule(MIRModule &p) = delete; + MIRModule &operator=(const MIRModule &module) = delete; + ~MIRModule(); + + MemPool *GetMemPool() const + { + return memPool; + } + MemPool *GetPragmaMemPool() + { + return pragmaMemPool; + } + MapleAllocator &GetPragmaMPAllocator() + { + return pragmaMemPoolAllocator; + } + const MapleAllocator &GetMPAllocator() const + { + return memPoolAllocator; + } + + void ReleasePragmaMemPool() + { + if (pragmaMemPool) { + memPoolCtrler.DeleteMemPool(pragmaMemPool); + } + pragmaMemPool = nullptr; + } + + MapleAllocator &GetMPAllocator() + { + return memPoolAllocator; + } + + const auto &GetFunctionList() const + { + return functionList; + } + auto &GetFunctionList() + { + return functionList; + } + + const MapleVector &GetImportedMplt() const + { + return importedMplt; + } + void PushbackImportedMplt(const std::string &importFileName) + { + importedMplt.push_back(importFileName); + } + + MIRTypeNameTable *GetTypeNameTab() + { + return typeNameTab; + } + + const MapleVector &GetTypeDefOrder() const + { + return typeDefOrder; + } + void PushbackTypeDefOrder(GStrIdx gstrIdx) + { + typeDefOrder.push_back(gstrIdx); + } + + void AddClass(TyIdx tyIdx); + void RemoveClass(TyIdx tyIdx); + + void SetCurFunction(MIRFunction *f) + { + if (ThreadEnv::IsMeParallel()) { + std::lock_guard guard(curFunctionMutex); + auto tid = std::this_thread::get_id(); + curFunctionMap[tid] = f; + return; // DO NOT delete the return statement + } + curFunction = f; + } + + MIRSrcLang GetSrcLang() const + { + return srcLang; + } + + const MapleSet &GetSymbolSet() const + { + return symbolSet; + } + + const MapleVector &GetSymbolDefOrder() const + { + return symbolDefOrder; + } + + Profile &GetProfile() + { + return profile; + } + + GcovProfileData *GetGcovProfile() + { + return gcovProfile; + } + void SetGcovProfile(GcovProfileData *info) + { + gcovProfile = info; + } + + void SetSomeSymbolNeedForDecl(bool s) + { + someSymbolNeedForwDecl = s; + } + + MIRFunction *CurFunction() const + { + if (ThreadEnv::IsMeParallel()) { + std::lock_guard guard(curFunctionMutex); + auto tid = std::this_thread::get_id(); + auto pair = curFunctionMap.find(tid); + return pair->second; + } + return curFunction; + } + + MemPool *CurFuncCodeMemPool() const; + MapleAllocator *CurFuncCodeMemPoolAllocator() const; + MapleAllocator &GetCurFuncCodeMPAllocator() const; + void AddExternStructType(TyIdx tyIdx); + void AddExternStructType(const MIRType *t); + void AddSymbol(StIdx stIdx); + void AddSymbol(const MIRSymbol *s); + void AddFunction(MIRFunction *pf) + { + functionList.push_back(pf); + } + + void DumpGlobals(bool emitStructureType = true) const; + void Dump(bool emitStructureType = true, const std::unordered_set *dumpFuncSet = nullptr) const; + void DumpToFile(const std::string &fileNameStr, bool emitStructureType = true) const; + void DumpInlineCandidateToFile(const std::string &fileNameStr); + void DumpDefType(); + const std::string &GetFileNameFromFileNum(uint32 fileNum) const; + + void DumpToHeaderFile(bool binaryMplt, const std::string &outputName = ""); + void DumpToCxxHeaderFile(std::set &leafClasses, const std::string &pathToOutf) const; + void DumpClassToFile(const std::string &path) const; + void DumpFunctionList(const std::unordered_set *dumpFuncSet) const; + void DumpGlobalArraySymbol() const; + void Emit(const std::string &outFileName) const; + uint32 GetAndIncFloatNum() + { + return floatNum++; + } + + void SetEntryFunction(MIRFunction *f) + { + entryFunc = f; + } + + MIRFunction *GetEntryFunction() const + { + return entryFunc; + } + + MIRFunction *FindEntryFunction(); + uint32 GetFileinfo(GStrIdx strIdx) const; + void OutputAsciiMpl(const char *phaseName, const char *suffix, + const std::unordered_set *dumpFuncSet = nullptr, bool emitStructureType = true, + bool binaryform = false); + void OutputFunctionListAsciiMpl(const std::string &phaseName); + const std::string &GetFileName() const + { + return fileName; + } + + const std::string &GetFileText() const + { + return fileText; + } + + bool IsNeedFile() const + { + return needFile; + } + + std::string GetFileNameAsPostfix() const; + void SetFileName(const std::string &name) + { + fileName = name; + } + + std::string GetProfileDataFileName() const + { + std::string profileDataFileName = fileName.substr(0, fileName.find_last_of(".")); + std::replace(profileDataFileName.begin(), profileDataFileName.end(), '.', '_'); + std::replace(profileDataFileName.begin(), profileDataFileName.end(), '-', '_'); + std::replace(profileDataFileName.begin(), profileDataFileName.end(), '/', '_'); + profileDataFileName = profileDataFileName + namemangler::kProfFileNameExt; + return profileDataFileName; + } + + bool IsJavaModule() const + { + return srcLang == kSrcLangJava; + } + + bool IsCModule() const + { + return srcLang == kSrcLangC || srcLang == kSrcLangCPlusPlus; + } + + bool IsCPlusPlusModule() const + { + return srcLang == kSrcLangCPlusPlus; + } + + bool IsCharModule() const + { + return srcLang == kSrcLangChar; + } + + void addSuperCall(const std::string &func) + { + (void)superCallSet.insert(func); + } + + bool findSuperCall(const std::string &func) const + { + return superCallSet.find(func) != superCallSet.end(); + } + + void ReleaseCurFuncMemPoolTmp(); + void SetUseFuncCodeMemPoolTmp() + { + useFuncCodeMemPoolTmp = true; + } + + void ResetUseFuncCodeMemPoolTmp() + { + useFuncCodeMemPoolTmp = false; + } + + void SetFuncInfoPrinted() const; + size_t GetOptFuncsSize() const + { + return optimizedFuncs.size(); + } + + void AddOptFuncs(MIRFunction *func) + { + optimizedFuncs.emplace(func); + } + + const MapleSet &GetOptFuncs() const + { + return optimizedFuncs; + } + + bool IsOptFunc(MIRFunction *func) const + { + if (std::find(optimizedFuncs.begin(), optimizedFuncs.end(), func) != optimizedFuncs.end()) { + return true; + } + return false; + } + + void AddOptFuncsType(MIRType *type) + { + optimizedFuncsType.emplace(type); + } + + const MapleMap *> &GetPuIdxFieldInitializedMap() const + { + std::shared_lock lock(fieldMapMutex); + return puIdxFieldInitializedMap; + } + void SetPuIdxFieldSet(PUIdx puIdx, MapleSet *fieldIDSet) + { + std::unique_lock lock(fieldMapMutex); + puIdxFieldInitializedMap[puIdx] = fieldIDSet; + } + + std::map>> &GetCalleeParamAboutInt() + { + return calleeParamAboutInt; + } + + std::map>> &GetCalleeParamAboutFloat() + { + return calleeParamAboutFloat; + } + + std::map>> &GetCalleeParamAboutDouble() + { + return calleeParamAboutDouble; + } + + std::map> &GetFuncImportantExpr() + { + return funcImportantExpr; + } + + const auto &GetRealCaller() const + { + return realCaller; + } + + auto &GetRealCaller() + { + return realCaller; + } + + const MapleSet &GetInlineGlobals() const + { + return inliningGlobals; + } + void InsertInlineGlobal(uint32_t global) + { + (void)inliningGlobals.insert(global); + } + + const MapleSet *GetPUIdxFieldInitializedMapItem(PUIdx key) const + { + std::shared_lock lock(fieldMapMutex); + auto it = puIdxFieldInitializedMap.find(key); + if (it != puIdxFieldInitializedMap.end()) { + return it->second; + } + return nullptr; + } + + std::ostream &GetOut() const + { + return out; + } + + const MIRBuilderPtr &GetMIRBuilder() const + { + return mirBuilder; + } + + const std::string &GetEntryFuncName() const + { + return entryFuncName; + } + void SetEntryFuncName(const std::string &entryFunctionName) + { + entryFuncName = entryFunctionName; + } + + TyIdx GetThrowableTyIdx() const + { + return throwableTyIdx; + } + void SetThrowableTyIdx(TyIdx throwableTypeIndex) + { + throwableTyIdx = throwableTypeIndex; + } + + bool GetWithProfileInfo() const + { + return withProfileInfo; + } + void SetWithProfileInfo(bool withProfInfo) + { + withProfileInfo = withProfInfo; + } + + BinaryMplt *GetBinMplt() + { + return binMplt; + } + void SetBinMplt(BinaryMplt *binaryMplt) + { + binMplt = binaryMplt; + } + + bool IsInIPA() const + { + return inIPA; + } + bool IsWithMe() const + { + return withMe; + } + void SetWithMe(bool isWithMe) + { + withMe = isWithMe; + } + void SetInIPA(bool isInIPA) + { + inIPA = isInIPA; + } + + void SetFileText(const std::string &inText) + { + fileText = inText; + needFile = false; + } + + MIRInfoVector &GetFileInfo() + { + return fileInfo; + } + void PushFileInfoPair(MIRInfoPair pair) + { + fileInfo.push_back(pair); + } + void SetFileInfo(const MIRInfoVector &fileInf) + { + fileInfo = fileInf; + } + + MapleVector &GetFileInfoIsString() + { + return fileInfoIsString; + } + void SetFileInfoIsString(const MapleVector &fileInfoIsStr) + { + fileInfoIsString = fileInfoIsStr; + } + void PushFileInfoIsString(bool isString) + { + fileInfoIsString.push_back(isString); + } + + const MIRDataVector &GetFileData() const + { + return fileData; + } + void PushbackFileData(const MIRDataPair &pair) + { + fileData.push_back(pair); + } + + const MIRInfoVector &GetSrcFileInfo() const + { + return srcFileInfo; + } + void PushbackFileInfo(const MIRInfoPair &pair) + { + srcFileInfo.push_back(pair); + } + + const MIRFlavor &GetFlavor() const + { + return flavor; + } + void SetFlavor(MIRFlavor flv) + { + flavor = flv; + } + + void SetSrcLang(MIRSrcLang sourceLanguage) + { + srcLang = sourceLanguage; + } + + uint16 GetID() const + { + return id; + } + + void SetID(uint16 num) + { + id = num; + } + + uint32 GetGlobalMemSize() const + { + return globalMemSize; + } + void SetGlobalMemSize(uint32 globalMemberSize) + { + globalMemSize = globalMemberSize; + } + + uint8 *GetGlobalBlockMap() + { + return globalBlkMap; + } + void SetGlobalBlockMap(uint8 *globalBlockMap) + { + globalBlkMap = globalBlockMap; + } + + uint8 *GetGlobalWordsTypeTagged() + { + return globalWordsTypeTagged; + } + void SetGlobalWordsTypeTagged(uint8 *globalWordsTyTagged) + { + globalWordsTypeTagged = globalWordsTyTagged; + } + + uint8 *GetGlobalWordsRefCounted() + { + return globalWordsRefCounted; + } + void SetGlobalWordsRefCounted(uint8 *counted) + { + globalWordsRefCounted = counted; + } + + uint32 GetNumFuncs() const + { + return numFuncs; + } + + void SetNumFuncs(uint32 numFunc) + { + numFuncs = numFunc; + } + + MapleVector &GetImportFiles() + { + return importFiles; + } + + void PushbackImportPath(GStrIdx path) + { + importPaths.push_back(path); + } + + MapleVector &GetAsmDecls() + { + return asmDecls; + } + + const MapleSet &GetClassList() const + { + return classList; + } + + const std::map> &GetMethod2TargetMap() const + { + return method2TargetMap; + } + + std::vector &GetMemFromMethod2TargetMap(PUIdx methodPuIdx) + { + return method2TargetMap[methodPuIdx]; + } + + void SetMethod2TargetMap(const std::map> &map) + { + method2TargetMap = map; + } + + void AddMemToMethod2TargetMap(PUIdx idx, const std::vector &callSite) + { + method2TargetMap[idx] = callSite; + } + + bool HasTargetHash(PUIdx idx, uint32 key) const + { + auto it = method2TargetHash.find(idx); + if (it == method2TargetHash.end()) { + return false; + } + return it->second.find(key) != it->second.end(); + } + void InsertTargetHash(PUIdx idx, uint32 key) + { + (void)method2TargetHash[idx].insert(key); + } + void AddValueToMethod2TargetHash(PUIdx idx, const std::unordered_set &value) + { + method2TargetHash[idx] = value; + } + + const std::map &GetEASummary() const + { + return eaSummary; + } + void SetEAConnectionGraph(GStrIdx funcNameIdx, EAConnectionGraph *eaCg) + { + eaSummary[funcNameIdx] = eaCg; + } + + DebugInfo *GetDbgInfo() const + { + return dbgInfo; + } + + void SetWithDbgInfo(bool v) + { + withDbgInfo = v; + } + + bool IsWithDbgInfo() const + { + return withDbgInfo; + } + + bool HasPartO2List() const + { + return hasPartO2List; + } + + void SetHasPartO2List(bool value) + { + hasPartO2List = value; + } + + void InitPartO2List(const std::string &list); + bool IsInPartO2List(const GStrIdx &idx) const + { + return partO2FuncList.count(idx) > 0; + } + + void SetBaseName(const std::string &curbaseName) + { + baseName = curbaseName; + } + const std::string &GetBaseName() const + { + return baseName; + } + void SetOutputFileName(const std::string &curOFileName) + { + outputFileName = curOFileName; + } + const std::string &GetOutputFileName() const + { + return outputFileName; + } + void SetInputFileName(const std::string &curInFileName) + { + inputFileName = curInFileName; + } + const std::string &GetInputFileName() const + { + return inputFileName; + } + + uint32 GetUniqueID() const + { + return UINT_MAX; + } + + bool HasNotWarned(uint32 postion, uint32 stmtOriginalID); + +private: + void DumpTypeTreeToCxxHeaderFile(MIRType &ty, std::unordered_set &dumpedClasses) const; + + MemPool *memPool; + MemPool *pragmaMemPool; + MapleAllocator memPoolAllocator; + MapleAllocator pragmaMemPoolAllocator; + MapleList functionList; // function table in the order of the appearance of function bodies; it + // excludes prototype-only functions + MapleVector importedMplt; + MIRTypeNameTable *typeNameTab; + MapleVector typeDefOrder; + + MapleSet externStructTypeSet; + MapleSet symbolSet; + MapleVector symbolDefOrder; + Profile profile; + GcovProfileData *gcovProfile; + bool someSymbolNeedForwDecl = false; // some symbols' addressses used in initialization + + std::ostream &out; + MIRBuilder *mirBuilder; + std::string entryFuncName = ""; // name of the entry function + std::string fileName; + std::string fileText; + bool needFile = true; + TyIdx throwableTyIdx {0}; // a special type that is the base of java exception type. only used for java + bool withProfileInfo = false; + + DebugInfo *dbgInfo = nullptr; + bool withDbgInfo = false; + + // for cg in mplt + BinaryMplt *binMplt = nullptr; + bool inIPA = false; + bool withMe = true; + MIRInfoVector fileInfo; // store info provided under fileInfo keyword + MapleVector fileInfoIsString; // tells if an entry has string value + MIRDataVector fileData; + MIRInfoVector srcFileInfo; // store info provided under srcFileInfo keyword + MIRFlavor flavor = kFlavorUnknown; + MIRSrcLang srcLang = kSrcLangUnknown; // the source language + uint16 id = 0xffff; + uint32 globalMemSize = 0; // size of storage space for all global variables + uint8 *globalBlkMap = nullptr; // the memory map of the block containing all the + // globals, for specifying static initializations + uint8 *globalWordsTypeTagged = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in globalBlkMap has typetag; + // if yes, the typetag is the N+1th word; the + // bitvector's size is given by + // BlockSize2BitvectorSize(globalMemSize) + uint8 *globalWordsRefCounted = nullptr; // bit vector where the Nth bit tells whether + // the Nth word points to a reference-counted + // dynamic memory block; the bitvector's size + // is given by BlockSize2BitvectorSize(globalMemSize) + uint32 numFuncs = 0; // because puIdx 0 is reserved, numFuncs is also the highest puIdx + MapleVector importFiles; + MapleVector importPaths; + MapleVector asmDecls; + MapleSet classList; + + std::map> method2TargetMap; + std::map> method2TargetHash; + std::map eaSummary; + + bool useFuncCodeMemPoolTmp = false; + MIRFunction *entryFunc = nullptr; + uint32 floatNum = 0; + // curFunction for single thread, curFunctionMap for multiple threads + std::map curFunctionMap; + mutable std::mutex curFunctionMutex; + MIRFunction *curFunction; + MapleSet optimizedFuncs; + MapleSet optimizedFuncsType; + // Add the field for decouple optimization + std::unordered_set superCallSet; + // record all the fields that are initialized in the constructor. module scope, + // if puIdx doesn't appear in this map, it writes to all field id + // if puIdx appears in the map, but it's corresponding MapleSet is nullptr, it writes nothing fieldID + // if puIdx appears in the map, and the value of first corresponding MapleSet is 0, the puIdx appears in this module + // and writes to all field id otherwise, it writes the field ids in MapleSet + MapleMap *> puIdxFieldInitializedMap; + mutable std::shared_timed_mutex fieldMapMutex; + std::map, GStrIdx> realCaller; + MapleSet inliningGlobals; // global symbols accessed, used for inlining + bool hasPartO2List = false; + MapleSet partO2FuncList; + std::string inputFileName = ""; + std::string baseName = ""; + std::string outputFileName = ""; + MapleMap> safetyWarningMap; // indexed map for large module. + std::map>> calleeParamAboutInt; + std::map>> calleeParamAboutDouble; + std::map>> calleeParamAboutFloat; + std::map> funcImportantExpr; +}; +#endif // MIR_FEATURE_FULL +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_MODULE_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mir_nodes.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_nodes.h new file mode 100755 index 0000000000000000000000000000000000000000..ce318e59958bd3d2f1df392dca8d12ba454028b9 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_nodes.h @@ -0,0 +1,4376 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_MIR_NODES_H +#define MAPLE_IR_INCLUDE_MIR_NODES_H +#include +#include +#include +#include "opcodes.h" +#include "opcode_info.h" +#include "mir_type.h" +#include "cmpl.h" +#include "mir_module.h" +#include "mir_const.h" +#include "maple_string.h" +#include "src_position.h" +#include "ptr_list_ref.h" + +namespace maple { +constexpr size_t kFirstOpnd = 0; +constexpr size_t kSecondOpnd = 1; +constexpr size_t kThirdOpnd = 2; + +extern MIRModule *theMIRModule; +extern void EmitStr(const MapleString &mplStr); + +class MIRPregTable; // circular dependency exists, no other choice +class TypeTable; // circular dependency exists, no other choice +class VerifyResult; // circular dependency exists, no other choice + +struct RegFieldPair { +public: + RegFieldPair() = default; + + RegFieldPair(FieldID fidx, PregIdx pidx) : fieldID(fidx), pregIdx(pidx) {} + + bool IsReg() const + { + return pregIdx > 0; + } + + FieldID GetFieldID() const + { + return fieldID; + } + + PregIdx GetPregIdx() const + { + return pregIdx; + } + + void SetFieldID(FieldID fld) + { + fieldID = fld; + } + + void SetPregIdx(PregIdx idx) + { + pregIdx = idx; + } + +private: + FieldID fieldID = 0; + PregIdx pregIdx = 0; +}; + +using CallReturnPair = std::pair; +using CallReturnVector = MapleVector; +// Made public so that other modules (such as maplebe) can print intrinsic names +// in debug information or comments in assembly files. +const char *GetIntrinsicName(MIRIntrinsicID intrn); +class BaseNode : public BaseNodeT { +public: + explicit BaseNode(Opcode o) + { + op = o; + ptyp = kPtyInvalid; + typeFlag = 0; + numOpnds = 0; + } + + BaseNode(Opcode o, uint8 numOpr) + { + op = o; + ptyp = kPtyInvalid; + typeFlag = 0; + numOpnds = numOpr; + } + + BaseNode(const Opcode o, const PrimType typ, uint8 numOpr) + { + op = o; + ptyp = typ; + typeFlag = 0; + numOpnds = numOpr; + } + + virtual ~BaseNode() = default; + + virtual BaseNode *CloneTree(MapleAllocator &allocator) const + { + return allocator.GetMemPool()->New(*this); + } + + virtual void DumpBase(int32 indent) const; + + virtual void Dump(int32 indent) const + { + DumpBase(indent); + } + + void Dump() const + { + Dump(0); + LogInfo::MapleLogger() << '\n'; + } + + virtual uint8 SizeOfInstr() const + { + return kOpcodeInfo.GetTableItemAt(GetOpCode()).instrucSize; + } + + const char *GetOpName() const; + bool MayThrowException(); + virtual size_t NumOpnds() const + { + return numOpnds; + } + + virtual BaseNode *Opnd(size_t) const + { + DEBUG_ASSERT(0, "override needed"); + return nullptr; + } + + virtual void SetOpnd(BaseNode *, size_t) + { + DEBUG_ASSERT(0, "This should not happen"); + } + + virtual bool IsLeaf() const + { + return true; + } + + virtual CallReturnVector *GetCallReturnVector() + { + return nullptr; + } + + virtual MIRType *GetCallReturnType() + { + return nullptr; + } + + virtual bool IsUnaryNode() const + { + return false; + } + + virtual bool IsBinaryNode() const + { + return false; + } + + virtual bool IsTernaryNode() const + { + return false; + } + + virtual bool IsNaryNode() const + { + return false; + } + + bool IsCondBr() const + { + return kOpcodeInfo.IsCondBr(GetOpCode()); + } + + bool IsConstval() const + { + return op == OP_constval; + } + + virtual bool Verify() const + { + return true; + } + + virtual bool Verify(VerifyResult &) const + { + return Verify(); + } + + virtual bool IsSSANode() const + { + return false; + } + + virtual bool IsSameContent(const BaseNode *node) const + { + return false; + } +}; + +class UnaryNode : public BaseNode { +public: + explicit UnaryNode(Opcode o) : BaseNode(o, 1) {} + + UnaryNode(Opcode o, PrimType typ) : BaseNode(o, typ, 1) {} + + UnaryNode(Opcode o, PrimType typ, BaseNode *expr) : BaseNode(o, typ, 1), uOpnd(expr) {} + + virtual ~UnaryNode() override = default; + + void DumpOpnd(const MIRModule &mod, int32 indent) const; + void DumpOpnd(int32 indent) const; + void Dump(int32 indent) const override; + bool Verify() const override; + + bool Verify(VerifyResult &) const override + { + return Verify(); + } + + UnaryNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(uOpnd->CloneTree(allocator), 0); + return node; + } + + BaseNode *Opnd(size_t) const override + { + return uOpnd; + } + + size_t NumOpnds() const override + { + return 1; + } + + void SetOpnd(BaseNode *node, size_t) override + { + uOpnd = node; + } + + bool IsLeaf() const override + { + return false; + } + + bool IsUnaryNode() const override + { + return true; + } + + bool IsSameContent(const BaseNode *node) const override; + +private: + BaseNode *uOpnd = nullptr; +}; + +class TypeCvtNode : public UnaryNode { +public: + explicit TypeCvtNode(Opcode o) : UnaryNode(o) {} + + TypeCvtNode(Opcode o, PrimType typ) : UnaryNode(o, typ) {} + + TypeCvtNode(Opcode o, PrimType typ, PrimType fromtyp, BaseNode *expr) + : UnaryNode(o, typ, expr), fromPrimType(fromtyp) + { + } + + virtual ~TypeCvtNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + bool Verify(VerifyResult &) const override + { + return Verify(); + } + + TypeCvtNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + PrimType FromType() const + { + return fromPrimType; + } + + void SetFromType(PrimType from) + { + fromPrimType = from; + } + + bool IsSameContent(const BaseNode *node) const override; + +private: + PrimType fromPrimType = kPtyInvalid; +}; + +// used for retype +class RetypeNode : public TypeCvtNode { +public: + RetypeNode() : TypeCvtNode(OP_retype) {} + + explicit RetypeNode(PrimType typ) : TypeCvtNode(OP_retype, typ) {} + + RetypeNode(PrimType typ, PrimType fromtyp, TyIdx idx, BaseNode *expr) + : TypeCvtNode(OP_retype, typ, fromtyp, expr), tyIdx(idx) + { + } + + virtual ~RetypeNode() = default; + void Dump(int32 indent) const override; + bool Verify(VerifyResult &verifyResult) const override; + + RetypeNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + const TyIdx &GetTyIdx() const + { + return tyIdx; + } + + void SetTyIdx(const TyIdx tyIdxVal) + { + tyIdx = tyIdxVal; + } + +private: + bool VerifyPrimTypesAndOpnd() const; + bool CheckFromJarray(const MIRType &from, const MIRType &to, VerifyResult &verifyResult) const; + bool VerifyCompleteMIRType(const MIRType &from, const MIRType &to, bool isJavaRefType, + VerifyResult &verifyResult) const; + bool VerifyJarrayDimention(const MIRJarrayType &from, const MIRJarrayType &to, VerifyResult &verifyResult) const; + bool IsJavaAssignable(const MIRType &from, const MIRType &to, VerifyResult &verifyResult) const; + + bool BothPointerOrJarray(const MIRType &from, const MIRType &to) const + { + if (from.GetKind() != to.GetKind()) { + return false; + } + return from.IsMIRPtrType() || from.IsMIRJarrayType(); + } + + bool IsInterfaceOrClass(const MIRType &mirType) const + { + return mirType.IsMIRClassType() || mirType.IsMIRInterfaceType(); + } + + bool IsJavaRefType(const MIRType &mirType) const + { + return mirType.IsMIRJarrayType() || mirType.IsMIRClassType() || mirType.IsMIRInterfaceType(); + } + + TyIdx tyIdx = TyIdx(0); +}; + +// used for extractbits, sext, zext +class ExtractbitsNode : public UnaryNode { +public: + explicit ExtractbitsNode(Opcode o) : UnaryNode(o) {} + + ExtractbitsNode(Opcode o, PrimType typ) : UnaryNode(o, typ) {} + + ExtractbitsNode(Opcode o, PrimType typ, uint8 offset, uint8 size) + : UnaryNode(o, typ), bitsOffset(offset), bitsSize(size) + { + } + + ExtractbitsNode(Opcode o, PrimType typ, uint8 offset, uint8 size, BaseNode *expr) + : UnaryNode(o, typ, expr), bitsOffset(offset), bitsSize(size) + { + } + + virtual ~ExtractbitsNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + ExtractbitsNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + uint8 GetBitsOffset() const + { + return bitsOffset; + } + + void SetBitsOffset(uint8 offset) + { + bitsOffset = offset; + } + + uint8 GetBitsSize() const + { + return bitsSize; + } + + void SetBitsSize(uint8 size) + { + bitsSize = size; + } + +private: + uint8 bitsOffset = 0; + uint8 bitsSize = 0; +}; + +class GCMallocNode : public BaseNode { +public: + explicit GCMallocNode(Opcode o) : BaseNode(o) {} + + GCMallocNode(Opcode o, PrimType typ, TyIdx tIdx) : BaseNode(o, typ, 0), tyIdx(tIdx) {} + + virtual ~GCMallocNode() = default; + + void Dump(int32 indent) const override; + + GCMallocNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + return node; + } + + TyIdx GetTyIdx() const + { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) + { + tyIdx = idx; + } + + void SetOrigPType(PrimType type) + { + origPrimType = type; + } + +private: + TyIdx tyIdx = TyIdx(0); + PrimType origPrimType = kPtyInvalid; +}; + +class JarrayMallocNode : public UnaryNode { +public: + explicit JarrayMallocNode(Opcode o) : UnaryNode(o) {} + + JarrayMallocNode(Opcode o, PrimType typ) : UnaryNode(o, typ) {} + + JarrayMallocNode(Opcode o, PrimType typ, TyIdx typeIdx) : UnaryNode(o, typ), tyIdx(typeIdx) {} + + JarrayMallocNode(Opcode o, PrimType typ, TyIdx typeIdx, BaseNode *opnd) : UnaryNode(o, typ, opnd), tyIdx(typeIdx) {} + + virtual ~JarrayMallocNode() = default; + + void Dump(int32 indent) const override; + + JarrayMallocNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + TyIdx GetTyIdx() const + { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) + { + tyIdx = idx; + } + +private: + TyIdx tyIdx = TyIdx(0); +}; + +// iaddrof also use this node +class IreadNode : public UnaryNode { +public: + explicit IreadNode(Opcode o) : UnaryNode(o) {} + + IreadNode(Opcode o, PrimType typ) : UnaryNode(o, typ) {} + + IreadNode(Opcode o, PrimType typ, TyIdx typeIdx, FieldID fid) : UnaryNode(o, typ), tyIdx(typeIdx), fieldID(fid) {} + + IreadNode(Opcode o, PrimType typ, TyIdx typeIdx, FieldID fid, BaseNode *expr) + : UnaryNode(o, typ, expr), tyIdx(typeIdx), fieldID(fid) + { + } + + virtual ~IreadNode() = default; + void Dump(int32 indent) const override; + bool Verify() const override; + + IreadNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + const TyIdx &GetTyIdx() const + { + return tyIdx; + } + + void SetTyIdx(const TyIdx tyIdxVal) + { + tyIdx = tyIdxVal; + } + + FieldID GetFieldID() const + { + return fieldID; + } + + void SetFieldID(FieldID fieldIDVal) + { + fieldID = fieldIDVal; + } + + bool IsSameContent(const BaseNode *node) const override; + + // the base of an address expr is either a leaf or an iread + BaseNode &GetAddrExprBase() const + { + BaseNode *base = Opnd(0); + while (base->NumOpnds() != 0 && base->GetOpCode() != OP_iread) { + base = base->Opnd(0); + } + return *base; + } + + bool IsVolatile() const; + + MIRType *GetType() const; + +protected: + TyIdx tyIdx = TyIdx(0); + FieldID fieldID = 0; +}; + +// IaddrofNode has the same member fields and member methods as IreadNode +using IaddrofNode = IreadNode; + +class IreadoffNode : public UnaryNode { +public: + IreadoffNode() : UnaryNode(OP_ireadoff) {} + + IreadoffNode(PrimType ptyp, int32 ofst) : UnaryNode(OP_ireadoff, ptyp), offset(ofst) {} + + IreadoffNode(PrimType ptyp, BaseNode *opnd, int32 ofst) : UnaryNode(OP_ireadoff, ptyp, opnd), offset(ofst) {} + + virtual ~IreadoffNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IreadoffNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + int32 GetOffset() const + { + return offset; + } + + void SetOffset(int32 offsetValue) + { + offset = offsetValue; + } + + bool IsSameContent(const BaseNode *node) const override; + +private: + int32 offset = 0; +}; + +class IreadFPoffNode : public BaseNode { +public: + IreadFPoffNode() : BaseNode(OP_ireadfpoff) {} + + IreadFPoffNode(PrimType ptyp, int32 ofst) : BaseNode(OP_ireadfpoff, ptyp, 0), offset(ofst) {} + + virtual ~IreadFPoffNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IreadFPoffNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + return node; + } + + int32 GetOffset() const + { + return offset; + } + + void SetOffset(int32 offsetValue) + { + offset = offsetValue; + } + + bool IsSameContent(const BaseNode *node) const override; + +private: + int32 offset = 0; +}; + +class IreadPCoffNode : public IreadFPoffNode { +public: + IreadPCoffNode(Opcode o, PrimType typ, uint8 numopns) + { + op = o; + ptyp = typ; + numOpnds = numopns; + } + virtual ~IreadPCoffNode() {} +}; + +typedef IreadPCoffNode AddroffPCNode; + +class BinaryOpnds { +public: + virtual ~BinaryOpnds() = default; + + virtual void Dump(int32 indent) const; + + BaseNode *GetBOpnd(size_t i) const + { + CHECK_FATAL(i < kOperandNumBinary, "Invalid operand idx in BinaryOpnds"); + return bOpnd[i]; + } + + void SetBOpnd(BaseNode *node, size_t i) + { + CHECK_FATAL(i < kOperandNumBinary, "Invalid operand idx in BinaryOpnds"); + bOpnd[i] = node; + } + + virtual bool IsSameContent(const BaseNode *node) const; + +private: + BaseNode *bOpnd[kOperandNumBinary]; +}; + +class BinaryNode : public BaseNode, public BinaryOpnds { +public: + explicit BinaryNode(Opcode o) : BaseNode(o, kOperandNumBinary) {} + + BinaryNode(Opcode o, PrimType typ) : BaseNode(o, typ, kOperandNumBinary) {} + + BinaryNode(Opcode o, PrimType typ, BaseNode *l, BaseNode *r) : BaseNode(o, typ, kOperandNumBinary) + { + SetBOpnd(l, 0); + SetBOpnd(r, 1); + } + + virtual ~BinaryNode() = default; + + using BaseNode::Dump; + void Dump(int32 indent) const override; + bool Verify() const override; + + BinaryNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + bool IsCommutative() const + { + switch (GetOpCode()) { + case OP_add: + case OP_mul: + case OP_band: + case OP_bior: + case OP_bxor: + case OP_land: + case OP_lior: + return true; + default: + return false; + } + } + + BaseNode *Opnd(size_t i) const override + { + DEBUG_ASSERT(i < kOperandNumBinary, "invalid operand idx in BinaryNode"); + DEBUG_ASSERT(i >= 0, "invalid operand idx in BinaryNode"); + return GetBOpnd(i); + } + + size_t NumOpnds() const override + { + return kOperandNumBinary; + } + + void SetOpnd(BaseNode *node, size_t i = 0) override + { + SetBOpnd(node, i); + } + + bool IsLeaf() const override + { + return false; + } + + bool IsBinaryNode() const override + { + return true; + } + bool IsSameContent(const BaseNode *node) const override; +}; + +class CompareNode : public BinaryNode { +public: + explicit CompareNode(Opcode o) : BinaryNode(o) {} + + CompareNode(Opcode o, PrimType typ) : BinaryNode(o, typ) {} + + CompareNode(Opcode o, PrimType typ, PrimType otype, BaseNode *l, BaseNode *r) + : BinaryNode(o, typ, l, r), opndType(otype) + { + } + + virtual ~CompareNode() = default; + + using BinaryNode::Dump; + void Dump(int32 indent) const override; + bool Verify() const override; + + CompareNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + PrimType GetOpndType() const + { + return opndType; + } + + void SetOpndType(PrimType type) + { + opndType = type; + } + +private: + PrimType opndType = kPtyInvalid; // type of operands. +}; + +class DepositbitsNode : public BinaryNode { +public: + DepositbitsNode() : BinaryNode(OP_depositbits) {} + + DepositbitsNode(Opcode o, PrimType typ) : BinaryNode(o, typ) {} + + DepositbitsNode(Opcode o, PrimType typ, uint8 offset, uint8 size, BaseNode *l, BaseNode *r) + : BinaryNode(o, typ, l, r), bitsOffset(offset), bitsSize(size) + { + } + + virtual ~DepositbitsNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + DepositbitsNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + uint8 GetBitsOffset() const + { + return bitsOffset; + } + + void SetBitsOffset(uint8 offset) + { + bitsOffset = offset; + } + + uint8 GetBitsSize() const + { + return bitsSize; + } + + void SetBitsSize(uint8 size) + { + bitsSize = size; + } + +private: + uint8 bitsOffset = 0; + uint8 bitsSize = 0; +}; + +// used for resolveinterfacefunc, resolvevirtualfunc +// bOpnd[0] stores base vtab/itab address +// bOpnd[1] stores offset +class ResolveFuncNode : public BinaryNode { +public: + explicit ResolveFuncNode(Opcode o) : BinaryNode(o) {} + + ResolveFuncNode(Opcode o, PrimType typ) : BinaryNode(o, typ) {} + + ResolveFuncNode(Opcode o, PrimType typ, PUIdx idx) : BinaryNode(o, typ), puIdx(idx) {} + + ResolveFuncNode(Opcode o, PrimType typ, PUIdx pIdx, BaseNode *opnd0, BaseNode *opnd1) + : BinaryNode(o, typ, opnd0, opnd1), puIdx(pIdx) + { + } + + virtual ~ResolveFuncNode() = default; + + void Dump(int32 indent) const override; + + ResolveFuncNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + BaseNode *GetTabBaseAddr() const + { + return GetBOpnd(0); + } + + BaseNode *GetOffset() const + { + return GetBOpnd(1); + } + + PUIdx GetPuIdx() const + { + return puIdx; + } + + void SetPUIdx(PUIdx idx) + { + puIdx = idx; + } + +private: + PUIdx puIdx = 0; +}; + +class TernaryNode : public BaseNode { +public: + explicit TernaryNode(Opcode o) : BaseNode(o, kOperandNumTernary) {} + + TernaryNode(Opcode o, PrimType typ) : BaseNode(o, typ, kOperandNumTernary) {} + + TernaryNode(Opcode o, PrimType typ, BaseNode *e0, BaseNode *e1, BaseNode *e2) : BaseNode(o, typ, kOperandNumTernary) + { + topnd[0] = e0; + topnd[1] = e1; + topnd[2] = e2; + } + + virtual ~TernaryNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + TernaryNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->topnd[0] = topnd[0]->CloneTree(allocator); + node->topnd[1] = topnd[1]->CloneTree(allocator); + node->topnd[2] = topnd[2]->CloneTree(allocator); + return node; + } + + BaseNode *Opnd(size_t i) const override + { + CHECK_FATAL(i < kOperandNumTernary, "array index out of range"); + return topnd[i]; + } + + size_t NumOpnds() const override + { + return kOperandNumTernary; + } + + void SetOpnd(BaseNode *node, size_t i = 0) override + { + CHECK_FATAL(i < kOperandNumTernary, "array index out of range"); + topnd[i] = node; + } + + bool IsLeaf() const override + { + return false; + } + + bool IsTernaryNode() const override + { + return true; + } + +private: + BaseNode *topnd[kOperandNumTernary] = {nullptr, nullptr, nullptr}; +}; + +class NaryOpnds { +public: + explicit NaryOpnds(MapleAllocator &mpallocter) : nOpnd(mpallocter.Adapter()) {} + + virtual ~NaryOpnds() = default; + + virtual void Dump(int32 indent) const; + bool VerifyOpnds() const; + + const MapleVector &GetNopnd() const + { + return nOpnd; + } + + MapleVector &GetNopnd() + { + return nOpnd; + } + + size_t GetNopndSize() const + { + return nOpnd.size(); + } + + BaseNode *GetNopndAt(size_t i) const + { + CHECK_FATAL(i < nOpnd.size(), "array index out of range"); + return nOpnd[i]; + } + + void SetNOpndAt(size_t i, BaseNode *opnd) + { + CHECK_FATAL(i < nOpnd.size(), "array index out of range"); + nOpnd[i] = opnd; + } + + void SetNOpnd(const MapleVector &val) + { + nOpnd = val; + } + +private: + MapleVector nOpnd; +}; + +class MapleValue { +public: + MapleValue(PregIdx preg) : pregIdx(preg), kind(kPregKind) {} + MapleValue(MIRSymbol *sym) : symbol(sym), kind(kSymbolKind) {} + MapleValue(MIRConst *value) : constVal(value), kind(kConstKind) {} + MapleValue(const MapleValue &val) = default; + ~MapleValue() = default; + + enum MapleValueKind { + kPregKind, + kSymbolKind, + kConstKind, + }; + + MapleValueKind GetMapleValueKind() const + { + return kind; + } + + const MIRSymbol &GetSymbol() const + { + DEBUG_ASSERT(symbol != nullptr, "value is not be initialized with symbol"); + return *symbol; + } + + PregIdx GetPregIdx() const + { + DEBUG_ASSERT(kind == kPregKind, "value is not be initialized with preg"); + return pregIdx; + } + + const MIRConst &GetConstValue() const + { + DEBUG_ASSERT(kind == kConstKind, "value is not be initialized with preg"); + return *constVal; + } + +private: + PregIdx pregIdx = 0; + MIRSymbol *symbol = nullptr; + MIRConst *constVal = nullptr; + MapleValueKind kind; +}; + +class DeoptBundleInfo { +public: + explicit DeoptBundleInfo(MapleAllocator &mpallocter) : deoptBundleInfo(mpallocter.Adapter()) {} + + virtual ~DeoptBundleInfo() = default; + + virtual void Dump(int32 indent) const; + + const MapleUnorderedMap &GetDeoptBundleInfo() const + { + return deoptBundleInfo; + } + + MapleUnorderedMap &GetDeoptBundleInfo() + { + return deoptBundleInfo; + } + + void SetDeoptBundleInfo(const MapleUnorderedMap &vregMap) + { + deoptBundleInfo = vregMap; + } + + void AddDeoptBundleInfo(int32 deoptVreg, MapleValue value) + { + deoptBundleInfo.insert(std::pair(deoptVreg, value)); + } + +private: + MapleUnorderedMap deoptBundleInfo; +}; + +class NaryNode : public BaseNode, public NaryOpnds { +public: + NaryNode(MapleAllocator &allocator, Opcode o) : BaseNode(o), NaryOpnds(allocator) {} + + NaryNode(const MIRModule &mod, Opcode o) : NaryNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + NaryNode(MapleAllocator &allocator, Opcode o, PrimType typ) : BaseNode(o, typ, 0), NaryOpnds(allocator) {} + + NaryNode(const MIRModule &mod, Opcode o, PrimType typ) : NaryNode(mod.GetCurFuncCodeMPAllocator(), o, typ) {} + + NaryNode(MapleAllocator &allocator, const NaryNode &node) + : BaseNode(node.GetOpCode(), node.GetPrimType(), node.numOpnds), NaryOpnds(allocator) + { + } + + NaryNode(const MIRModule &mod, const NaryNode &node) : NaryNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + NaryNode(NaryNode &node) = delete; + NaryNode &operator=(const NaryNode &node) = delete; + virtual ~NaryNode() = default; + + void Dump(int32 indent) const override; + + NaryNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + return node; + } + + BaseNode *Opnd(size_t i) const override + { + return GetNopndAt(i); + } + + size_t NumOpnds() const override + { + DEBUG_ASSERT(numOpnds == GetNopndSize(), "NaryNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void SetOpnd(BaseNode *node, size_t i = 0) override + { + DEBUG_ASSERT(i < GetNopnd().size(), "array index out of range"); + SetNOpndAt(i, node); + } + + bool IsLeaf() const override + { + return false; + } + + bool Verify() const override + { + return true; + } + + bool IsNaryNode() const override + { + return true; + } +}; + +class IntrinsicopNode : public NaryNode { +public: + IntrinsicopNode(MapleAllocator &allocator, Opcode o, TyIdx typeIdx = TyIdx()) + : NaryNode(allocator, o), intrinsic(INTRN_UNDEFINED), tyIdx(typeIdx) + { + } + + IntrinsicopNode(const MIRModule &mod, Opcode o, TyIdx typeIdx = TyIdx()) + : IntrinsicopNode(mod.GetCurFuncCodeMPAllocator(), o, typeIdx) + { + } + + IntrinsicopNode(MapleAllocator &allocator, Opcode o, PrimType typ, TyIdx typeIdx = TyIdx()) + : NaryNode(allocator, o, typ), intrinsic(INTRN_UNDEFINED), tyIdx(typeIdx) + { + } + + IntrinsicopNode(const MIRModule &mod, Opcode o, PrimType typ, TyIdx typeIdx = TyIdx()) + : IntrinsicopNode(mod.GetCurFuncCodeMPAllocator(), o, typ, typeIdx) + { + } + + IntrinsicopNode(MapleAllocator &allocator, const IntrinsicopNode &node) + : NaryNode(allocator, node), intrinsic(node.GetIntrinsic()), tyIdx(node.GetTyIdx()) + { + } + + IntrinsicopNode(const MIRModule &mod, const IntrinsicopNode &node) + : IntrinsicopNode(mod.GetCurFuncCodeMPAllocator(), node) + { + } + + IntrinsicopNode(IntrinsicopNode &node) = delete; + IntrinsicopNode &operator=(const IntrinsicopNode &node) = delete; + virtual ~IntrinsicopNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + bool Verify(VerifyResult &verifyResult) const override; + + IntrinsicopNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } + + MIRIntrinsicID GetIntrinsic() const + { + return intrinsic; + } + + void SetIntrinsic(MIRIntrinsicID intrinsicID) + { + intrinsic = intrinsicID; + } + + TyIdx GetTyIdx() const + { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) + { + tyIdx = idx; + } + + // IntrinDesc query + const IntrinDesc &GetIntrinDesc() const + { + return IntrinDesc::intrinTable[intrinsic]; + } + + bool VerifyJArrayLength(VerifyResult &verifyResult) const; + +private: + MIRIntrinsicID intrinsic; + TyIdx tyIdx; +}; + +class ConstvalNode : public BaseNode { +public: + ConstvalNode() : BaseNode(OP_constval) {} + + explicit ConstvalNode(PrimType typ) : BaseNode(OP_constval, typ, 0) {} + + explicit ConstvalNode(MIRConst *constv) : BaseNode(OP_constval), constVal(constv) {} + + ConstvalNode(PrimType typ, MIRConst *constv) : BaseNode(OP_constval, typ, 0), constVal(constv) {} + virtual ~ConstvalNode() = default; + void Dump(int32 indent) const override; + + ConstvalNode *CloneTree(MapleAllocator &allocator) const override + { + return allocator.GetMemPool()->New(*this); + } + + const MIRConst *GetConstVal() const + { + return constVal; + } + + MIRConst *GetConstVal() + { + return constVal; + } + + void SetConstVal(MIRConst *val) + { + constVal = val; + } + + bool IsSameContent(const BaseNode *node) const override; + +private: + MIRConst *constVal = nullptr; +}; + +class ConststrNode : public BaseNode { +public: + ConststrNode() : BaseNode(OP_conststr) {} + + explicit ConststrNode(UStrIdx i) : BaseNode(OP_conststr), strIdx(i) {} + + ConststrNode(PrimType typ, UStrIdx i) : BaseNode(OP_conststr, typ, 0), strIdx(i) {} + + virtual ~ConststrNode() = default; + + void Dump(int32 indent) const override; + bool IsSameContent(const BaseNode *node) const override; + + ConststrNode *CloneTree(MapleAllocator &allocator) const override + { + return allocator.GetMemPool()->New(*this); + } + + UStrIdx GetStrIdx() const + { + return strIdx; + } + + void SetStrIdx(UStrIdx idx) + { + strIdx = idx; + } + +private: + UStrIdx strIdx = UStrIdx(0); +}; + +class Conststr16Node : public BaseNode { +public: + Conststr16Node() : BaseNode(OP_conststr16) {} + + explicit Conststr16Node(U16StrIdx i) : BaseNode(OP_conststr16), strIdx(i) {} + + Conststr16Node(PrimType typ, U16StrIdx i) : BaseNode(OP_conststr16, typ, 0), strIdx(i) {} + + virtual ~Conststr16Node() = default; + + void Dump(int32 indent) const override; + bool IsSameContent(const BaseNode *node) const override; + + Conststr16Node *CloneTree(MapleAllocator &allocator) const override + { + return allocator.GetMemPool()->New(*this); + } + + U16StrIdx GetStrIdx() const + { + return strIdx; + } + + void SetStrIdx(U16StrIdx idx) + { + strIdx = idx; + } + +private: + U16StrIdx strIdx = U16StrIdx(0); +}; + +class SizeoftypeNode : public BaseNode { +public: + SizeoftypeNode() : BaseNode(OP_sizeoftype) {} + + explicit SizeoftypeNode(TyIdx t) : BaseNode(OP_sizeoftype), tyIdx(t) {} + + SizeoftypeNode(PrimType type, TyIdx t) : BaseNode(OP_sizeoftype, type, 0), tyIdx(t) {} + + virtual ~SizeoftypeNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + SizeoftypeNode *CloneTree(MapleAllocator &allocator) const override + { + return allocator.GetMemPool()->New(*this); + } + + TyIdx GetTyIdx() const + { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) + { + tyIdx = idx; + } + +private: + TyIdx tyIdx = TyIdx(0); +}; + +class FieldsDistNode : public BaseNode { +public: + FieldsDistNode() : BaseNode(OP_fieldsdist) {} + + FieldsDistNode(TyIdx t, FieldID f1, FieldID f2) : BaseNode(OP_fieldsdist), tyIdx(t), fieldID1(f1), fieldID2(f2) {} + + FieldsDistNode(PrimType typ, TyIdx t, FieldID f1, FieldID f2) + : BaseNode(OP_fieldsdist, typ, 0), tyIdx(t), fieldID1(f1), fieldID2(f2) + { + } + + virtual ~FieldsDistNode() = default; + + void Dump(int32 indent) const override; + + FieldsDistNode *CloneTree(MapleAllocator &allocator) const override + { + return allocator.GetMemPool()->New(*this); + } + + TyIdx GetTyIdx() const + { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) + { + tyIdx = idx; + } + + FieldID GetFieldID1() const + { + return fieldID1; + } + + void SetFiledID1(FieldID id) + { + fieldID1 = id; + } + + FieldID GetFieldID2() const + { + return fieldID2; + } + + void SetFiledID2(FieldID id) + { + fieldID2 = id; + } + +private: + TyIdx tyIdx = TyIdx(0); + FieldID fieldID1 = 0; + FieldID fieldID2 = 0; +}; + +class ArrayNode : public NaryNode { +public: + ArrayNode(MapleAllocator &allocator) : NaryNode(allocator, OP_array) {} + + explicit ArrayNode(const MIRModule &mod) : ArrayNode(mod.GetCurFuncCodeMPAllocator()) {} + + ArrayNode(MapleAllocator &allocator, PrimType typ, TyIdx idx) : NaryNode(allocator, OP_array, typ), tyIdx(idx) {} + + ArrayNode(const MIRModule &mod, PrimType typ, TyIdx idx) : ArrayNode(mod.GetCurFuncCodeMPAllocator(), typ, idx) {} + + ArrayNode(MapleAllocator &allocator, PrimType typ, TyIdx idx, bool bcheck) + : NaryNode(allocator, OP_array, typ), tyIdx(idx), boundsCheck(bcheck) + { + } + + ArrayNode(const MIRModule &mod, PrimType typ, TyIdx idx, bool bcheck) + : ArrayNode(mod.GetCurFuncCodeMPAllocator(), typ, idx, bcheck) + { + } + + ArrayNode(MapleAllocator &allocator, const ArrayNode &node) + : NaryNode(allocator, node), tyIdx(node.tyIdx), boundsCheck(node.boundsCheck) + { + } + + ArrayNode(const MIRModule &mod, const ArrayNode &node) : ArrayNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + ArrayNode(ArrayNode &node) = delete; + ArrayNode &operator=(const ArrayNode &node) = delete; + virtual ~ArrayNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + bool IsSameBase(ArrayNode *); + + size_t NumOpnds() const override + { + DEBUG_ASSERT(numOpnds == GetNopndSize(), "ArrayNode has wrong numOpnds field"); + return GetNopndSize(); + } + + ArrayNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->boundsCheck = boundsCheck; + node->SetNumOpnds(GetNopndSize()); + return node; + } + + const MIRType *GetArrayType(const TypeTable &tt) const; + MIRType *GetArrayType(const TypeTable &tt); + + BaseNode *GetIndex(size_t i) + { + return Opnd(i + 1); + } + + const BaseNode *GetDim(const MIRModule &mod, TypeTable &tt, int i) const; + BaseNode *GetDim(const MIRModule &mod, TypeTable &tt, int i); + + BaseNode *GetBase() + { + return Opnd(0); + } + + TyIdx GetTyIdx() const + { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) + { + tyIdx = idx; + } + + bool GetBoundsCheck() const + { + return boundsCheck; + } + + void SetBoundsCheck(bool check) + { + boundsCheck = check; + } + +private: + TyIdx tyIdx; + bool boundsCheck = true; +}; + +class AddrofNode : public BaseNode { +public: + explicit AddrofNode(Opcode o) : BaseNode(o), stIdx() {} + + AddrofNode(Opcode o, PrimType typ) : AddrofNode(o, typ, StIdx(), 0) {} + + AddrofNode(Opcode o, PrimType typ, StIdx sIdx, FieldID fid) : BaseNode(o, typ, 0), stIdx(sIdx), fieldID(fid) {} + + virtual ~AddrofNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + bool CheckNode(const MIRModule &mod) const; + + AddrofNode *CloneTree(MapleAllocator &allocator) const override + { + return allocator.GetMemPool()->New(*this); + } + + StIdx GetStIdx() const + { + return stIdx; + } + + void SetStIdx(StIdx idx) + { + stIdx = idx; + } + + void SetStFullIdx(uint32 idx) + { + stIdx.SetFullIdx(idx); + } + + FieldID GetFieldID() const + { + return fieldID; + } + + void SetFieldID(FieldID fieldIDVal) + { + fieldID = fieldIDVal; + } + + bool IsVolatile(const MIRModule &mod) const; + + bool IsSameContent(const BaseNode *node) const override; + +private: + StIdx stIdx; + FieldID fieldID = 0; +}; + +// DreadNode has the same member fields and member methods as AddrofNode +using DreadNode = AddrofNode; + +class DreadoffNode : public BaseNode { +public: + explicit DreadoffNode(Opcode o) : BaseNode(o), stIdx() {} + + DreadoffNode(Opcode o, PrimType typ) : BaseNode(o, typ, 0), stIdx() {} + + virtual ~DreadoffNode() = default; + + void Dump(int32 indent) const override; + + DreadoffNode *CloneTree(MapleAllocator &allocator) const override + { + return allocator.GetMemPool()->New(*this); + } + + bool IsVolatile(const MIRModule &mod) const; + + bool IsSameContent(const BaseNode *node) const override; + +public: + StIdx stIdx; + int32 offset = 0; +}; + +// AddrofoffNode has the same member fields and member methods as DreadoffNode +using AddrofoffNode = DreadoffNode; + +class RegreadNode : public BaseNode { +public: + RegreadNode() : BaseNode(OP_regread) {} + + explicit RegreadNode(PregIdx pIdx) : BaseNode(OP_regread), regIdx(pIdx) {} + + RegreadNode(PrimType primType, PregIdx pIdx) : RegreadNode(pIdx) + { + ptyp = primType; + } + + virtual ~RegreadNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + RegreadNode *CloneTree(MapleAllocator &allocator) const override + { + return allocator.GetMemPool()->New(*this); + } + + PregIdx GetRegIdx() const + { + return regIdx; + } + void SetRegIdx(PregIdx reg) + { + regIdx = reg; + } + + bool IsSameContent(const BaseNode *node) const override; + +private: + PregIdx regIdx = 0; // 32bit, negative if special register +}; + +class AddroffuncNode : public BaseNode { +public: + AddroffuncNode() : BaseNode(OP_addroffunc) {} + + AddroffuncNode(PrimType typ, PUIdx pIdx) : BaseNode(OP_addroffunc, typ, 0), puIdx(pIdx) {} + + virtual ~AddroffuncNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + AddroffuncNode *CloneTree(MapleAllocator &allocator) const override + { + return allocator.GetMemPool()->New(*this); + } + + PUIdx GetPUIdx() const + { + return puIdx; + } + + void SetPUIdx(PUIdx puIdxValue) + { + puIdx = puIdxValue; + } + + bool IsSameContent(const BaseNode *node) const override; + +private: + PUIdx puIdx = 0; // 32bit now +}; + +class AddroflabelNode : public BaseNode { +public: + AddroflabelNode() : BaseNode(OP_addroflabel) {} + + explicit AddroflabelNode(uint32 ofst) : BaseNode(OP_addroflabel), offset(ofst) {} + + virtual ~AddroflabelNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + AddroflabelNode *CloneTree(MapleAllocator &allocator) const override + { + return allocator.GetMemPool()->New(*this); + } + + uint32 GetOffset() const + { + return offset; + } + + void SetOffset(uint32 offsetValue) + { + offset = offsetValue; + } + + bool IsSameContent(const BaseNode *node) const override; + +private: + LabelIdx offset = 0; +}; + +// for cleanuptry, jscatch, finally, retsub, endtry, membaracquire, membarrelease, +// membarstoreload, membarstorestore +class StmtNode : public BaseNode, public PtrListNodeBase { +public: + static std::atomic stmtIDNext; // for assigning stmtID, initialized to 1; 0 is reserved + static uint32 lastPrintedLineNum; // used during printing ascii output + static uint16 lastPrintedColumnNum; + + explicit StmtNode(Opcode o) : BaseNode(o), PtrListNodeBase(), stmtID(stmtIDNext), stmtOriginalID(stmtIDNext) + { + ++stmtIDNext; + } + + StmtNode(Opcode o, uint8 numOpr) + : BaseNode(o, numOpr), PtrListNodeBase(), stmtID(stmtIDNext), stmtOriginalID(stmtIDNext) + { + ++stmtIDNext; + } + + StmtNode(Opcode o, PrimType typ, uint8 numOpr) + : BaseNode(o, typ, numOpr), PtrListNodeBase(), stmtID(stmtIDNext), stmtOriginalID(stmtIDNext) + { + ++stmtIDNext; + } + + // used for NaryStmtNode when clone + StmtNode(Opcode o, PrimType typ, uint8 numOpr, const SrcPosition &srcPosition, uint32 stmtOriginalID, + StmtAttrs attrs) + : BaseNode(o, typ, numOpr), + PtrListNodeBase(), + srcPosition(srcPosition), + stmtID(stmtIDNext), + stmtOriginalID(stmtOriginalID), + stmtAttrs(attrs) + { + ++stmtIDNext; + } + + virtual ~StmtNode() = default; + + using BaseNode::Dump; + void DumpBase(int32 indent) const override; + void Dump(int32 indent) const override; + void InsertAfterThis(StmtNode &pos); + void InsertBeforeThis(StmtNode &pos); + + virtual StmtNode *CloneTree(MapleAllocator &allocator) const override + { + auto *s = allocator.GetMemPool()->New(*this); + s->SetStmtID(stmtIDNext++); + s->SetMeStmtID(meStmtID); + return s; + } + + virtual bool Verify() const override + { + return true; + } + + virtual bool Verify(VerifyResult &) const override + { + return Verify(); + } + + const SrcPosition &GetSrcPos() const + { + return srcPosition; + } + + SrcPosition &GetSrcPos() + { + return srcPosition; + } + + void SetSrcPos(SrcPosition pos) + { + srcPosition = pos; + } + + uint32 GetStmtID() const + { + return stmtID; + } + + void SetStmtID(uint32 id) + { + stmtID = id; + } + + uint32 GetOriginalID() const + { + return stmtOriginalID; + } + + void SetOriginalID(uint32 id) + { + stmtOriginalID = id; + } + + uint32 GetMeStmtID() const + { + return meStmtID; + } + + void SetMeStmtID(uint32 id) + { + meStmtID = id; + } + + StmtNode *GetRealNext() const; + + virtual BaseNode *GetRHS() const + { + return nullptr; + } + + bool GetIsLive() const + { + return isLive; + } + + void SetIsLive(bool live) const + { + isLive = live; + } + + bool IsInSafeRegion() const + { + return stmtAttrs.GetAttr(STMTATTR_insaferegion); + } + + void SetInSafeRegion() + { + stmtAttrs.SetAttr(STMTATTR_insaferegion); + } + + void CopySafeRegionAttr(const StmtAttrs &stmtAttr) + { + this->stmtAttrs.AppendAttr(stmtAttr.GetTargetAttrFlag(STMTATTR_insaferegion)); + } + + const StmtAttrs &GetStmtAttrs() const + { + return stmtAttrs; + } + + void SetAttr(StmtAttrKind x) + { + stmtAttrs.SetAttr(x); + } + + bool GetAttr(StmtAttrKind x) const + { + return stmtAttrs.GetAttr(x); + } + + void SetStmtAttrs(StmtAttrs stmtAttrs_) + { + stmtAttrs = stmtAttrs_; + } + +protected: + SrcPosition srcPosition; + +private: + uint32 stmtID; // a unique ID assigned to it + uint32 stmtOriginalID; // first define id, no change when clone, need copy when emit from MeStmt + uint32 meStmtID = 0; // Need copy when emit from MeStmt, attention:this just for two stmt(if && call) + mutable bool isLive = false; // only used for dse to save compile time + // mutable to keep const-ness at most situation + StmtAttrs stmtAttrs; +}; + +class IassignNode : public StmtNode { +public: + IassignNode() : IassignNode(TyIdx(0), 0, nullptr, nullptr) {} + + IassignNode(TyIdx tyIdx, FieldID fieldID, BaseNode *addrOpnd, BaseNode *rhsOpnd) + : StmtNode(OP_iassign), tyIdx(tyIdx), fieldID(fieldID), addrExpr(addrOpnd), rhs(rhsOpnd) + { + BaseNodeT::SetNumOpnds(kOperandNumBinary); + } + + virtual ~IassignNode() = default; + + TyIdx GetTyIdx() const + { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) + { + tyIdx = idx; + } + + FieldID GetFieldID() const + { + return fieldID; + } + + void SetFieldID(FieldID fid) + { + fieldID = fid; + } + + BaseNode *Opnd(size_t i) const override + { + if (i == 0) { + return addrExpr; + } + return rhs; + } + + size_t NumOpnds() const override + { + return kOperandNumBinary; + } + + void SetOpnd(BaseNode *node, size_t i) override + { + if (i == 0) { + addrExpr = node; + } else { + rhs = node; + } + } + + void Dump(int32 indent) const override; + bool Verify() const override; + + IassignNode *CloneTree(MapleAllocator &allocator) const override + { + auto *bn = allocator.GetMemPool()->New(*this); + bn->SetStmtID(stmtIDNext++); + bn->SetOpnd(addrExpr->CloneTree(allocator), 0); + bn->SetRHS(rhs->CloneTree(allocator)); + return bn; + } + + // the base of an address expr is either a leaf or an iread + BaseNode &GetAddrExprBase() const + { + BaseNode *base = addrExpr; + while (base->NumOpnds() != 0 && base->GetOpCode() != OP_iread) { + base = base->Opnd(0); + } + return *base; + } + + void SetAddrExpr(BaseNode *exp) + { + addrExpr = exp; + } + + BaseNode *GetRHS() const override + { + return rhs; + } + + void SetRHS(BaseNode *node) + { + rhs = node; + } + + bool AssigningVolatile() const; + +private: + TyIdx tyIdx; + FieldID fieldID; + +public: + BaseNode *addrExpr; + BaseNode *rhs; +}; + +// goto and gosub +class GotoNode : public StmtNode { +public: + explicit GotoNode(Opcode o) : StmtNode(o) {} + + GotoNode(Opcode o, uint32 ofst) : StmtNode(o), offset(ofst) {} + + virtual ~GotoNode() = default; + + void Dump(int32 indent) const override; + + GotoNode *CloneTree(MapleAllocator &allocator) const override + { + auto *g = allocator.GetMemPool()->New(*this); + g->SetStmtID(stmtIDNext++); + return g; + } + + uint32 GetOffset() const + { + return offset; + } + + void SetOffset(uint32 o) + { + offset = o; + } + +private: + uint32 offset = 0; +}; + +// jstry +class JsTryNode : public StmtNode { +public: + JsTryNode() : StmtNode(OP_jstry) {} + + JsTryNode(uint16 catchofst, uint16 finallyofset) + : StmtNode(OP_jstry), catchOffset(catchofst), finallyOffset(finallyofset) + { + } + + virtual ~JsTryNode() = default; + + void Dump(int32 indent) const override; + + JsTryNode *CloneTree(MapleAllocator &allocator) const override + { + auto *t = allocator.GetMemPool()->New(*this); + t->SetStmtID(stmtIDNext++); + return t; + } + + uint16 GetCatchOffset() const + { + return catchOffset; + } + + void SetCatchOffset(uint32 offset) + { + catchOffset = offset; + } + + uint16 GetFinallyOffset() const + { + return finallyOffset; + } + + void SetFinallyOffset(uint32 offset) + { + finallyOffset = offset; + } + +private: + uint16 catchOffset = 0; + uint16 finallyOffset = 0; +}; + +// try, cpptry +class TryNode : public StmtNode { +public: + explicit TryNode(MapleAllocator &allocator) : StmtNode(OP_try), offsets(allocator.Adapter()) {} + + explicit TryNode(const MapleVector &offsets) : StmtNode(OP_try), offsets(offsets) {} + + explicit TryNode(const MIRModule &mod) : TryNode(mod.GetCurFuncCodeMPAllocator()) {} + + TryNode(TryNode &node) = delete; + TryNode &operator=(const TryNode &node) = delete; + virtual ~TryNode() = default; + + using StmtNode::Dump; + void Dump(int32 indent) const override; + + MapleVector &GetOffsets() + { + return offsets; + } + + LabelIdx GetOffset(size_t i) const + { + DEBUG_ASSERT(i < offsets.size(), "array index out of range"); + return offsets.at(i); + } + + void SetOffset(LabelIdx offsetValue, size_t i) + { + DEBUG_ASSERT(i < offsets.size(), "array index out of range"); + offsets[i] = offsetValue; + } + + void AddOffset(LabelIdx offsetValue) + { + offsets.push_back(offsetValue); + } + + void ResizeOffsets(size_t offsetSize) + { + offsets.resize(offsetSize); + } + + void SetOffsets(const MapleVector &offsetsValue) + { + offsets = offsetsValue; + } + + size_t GetOffsetsCount() const + { + return offsets.size(); + } + + MapleVector::iterator GetOffsetsBegin() + { + return offsets.begin(); + } + + MapleVector::iterator GetOffsetsEnd() + { + return offsets.end(); + } + + void OffsetsInsert(MapleVector::iterator a, MapleVector::iterator b, + MapleVector::iterator c) + { + (void)offsets.insert(a, b, c); + } + + TryNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(allocator); + node->SetStmtID(stmtIDNext++); + for (size_t i = 0; i < offsets.size(); ++i) { + node->AddOffset(offsets[i]); + } + return node; + } + +private: + MapleVector offsets; +}; + +// catch +class CatchNode : public StmtNode { +public: + explicit CatchNode(MapleAllocator &allocator) : StmtNode(OP_catch), exceptionTyIdxVec(allocator.Adapter()) {} + + explicit CatchNode(const MapleVector &tyIdxVec) : StmtNode(OP_catch), exceptionTyIdxVec(tyIdxVec) {} + + explicit CatchNode(const MIRModule &mod) : CatchNode(mod.GetCurFuncCodeMPAllocator()) {} + + CatchNode(CatchNode &node) = delete; + CatchNode &operator=(const CatchNode &node) = delete; + virtual ~CatchNode() = default; + + using StmtNode::Dump; + void Dump(int32 indent) const override; + + TyIdx GetExceptionTyIdxVecElement(size_t i) const + { + CHECK_FATAL(i < exceptionTyIdxVec.size(), "array index out of range"); + return exceptionTyIdxVec[i]; + } + + const MapleVector &GetExceptionTyIdxVec() const + { + return exceptionTyIdxVec; + } + + size_t Size() const + { + return exceptionTyIdxVec.size(); + } + + void SetExceptionTyIdxVecElement(TyIdx idx, size_t i) + { + CHECK_FATAL(i < exceptionTyIdxVec.size(), "array index out of range"); + exceptionTyIdxVec[i] = idx; + } + + void SetExceptionTyIdxVec(MapleVector vec) + { + exceptionTyIdxVec = vec; + } + + void PushBack(TyIdx idx) + { + exceptionTyIdxVec.push_back(idx); + } + + CatchNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(allocator); + node->SetStmtID(stmtIDNext++); + for (size_t i = 0; i < Size(); ++i) { + node->PushBack(GetExceptionTyIdxVecElement(i)); + } + return node; + } + +private: + MapleVector exceptionTyIdxVec; +}; + +// cppcatch +class CppCatchNode : public StmtNode { +public: + explicit CppCatchNode(const TyIdx &idx) : StmtNode(OP_cppcatch), exceptionTyIdx(idx) {} + explicit CppCatchNode() : CppCatchNode(TyIdx(0)) {} + + explicit CppCatchNode(const CppCatchNode &node) = delete; + CppCatchNode &operator=(const CppCatchNode &node) = delete; + ~CppCatchNode() = default; + + void Dump(int32 indent) const override; + + CppCatchNode *CloneTree(MapleAllocator &allocator) const override + { + CppCatchNode *node = allocator.GetMemPool()->New(); + node->SetStmtID(stmtIDNext++); + node->exceptionTyIdx = exceptionTyIdx; + return node; + } + + CppCatchNode *CloneTree(const MIRModule &mod) const + { + return CppCatchNode::CloneTree(*mod.CurFuncCodeMemPoolAllocator()); + } + +public: + TyIdx exceptionTyIdx; +}; + +using CasePair = std::pair; +using CaseVector = MapleVector; +class SwitchNode : public StmtNode { +public: + explicit SwitchNode(MapleAllocator &allocator) : StmtNode(OP_switch, 1), switchTable(allocator.Adapter()) {} + + explicit SwitchNode(const MIRModule &mod) : SwitchNode(mod.GetCurFuncCodeMPAllocator()) {} + + SwitchNode(MapleAllocator &allocator, LabelIdx label) : SwitchNode(allocator, label, nullptr) {} + + SwitchNode(MapleAllocator &allocator, LabelIdx label, BaseNode *opnd) + : StmtNode(OP_switch, 1), switchOpnd(opnd), defaultLabel(label), switchTable(allocator.Adapter()) + { + } + + SwitchNode(const MIRModule &mod, LabelIdx label) : SwitchNode(mod.GetCurFuncCodeMPAllocator(), label) {} + + SwitchNode(MapleAllocator &allocator, const SwitchNode &node) + : StmtNode(node.GetOpCode(), node.GetPrimType(), node.numOpnds), + defaultLabel(node.GetDefaultLabel()), + switchTable(allocator.Adapter()) + { + } + + SwitchNode(const MIRModule &mod, const SwitchNode &node) : SwitchNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + SwitchNode(SwitchNode &node) = delete; + SwitchNode &operator=(const SwitchNode &node) = delete; + virtual ~SwitchNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + SwitchNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(allocator, *this); + node->SetSwitchOpnd(switchOpnd->CloneTree(allocator)); + for (size_t i = 0; i < switchTable.size(); ++i) { + node->InsertCasePair(switchTable[i]); + } + return node; + } + + BaseNode *Opnd(size_t) const override + { + return switchOpnd; + } + + void SetOpnd(BaseNode *node, size_t) override + { + switchOpnd = node; + } + + BaseNode *GetSwitchOpnd() const + { + return switchOpnd; + } + + void SetSwitchOpnd(BaseNode *node) + { + switchOpnd = node; + } + + LabelIdx GetDefaultLabel() const + { + return defaultLabel; + } + + void SetDefaultLabel(LabelIdx idx) + { + defaultLabel = idx; + } + + const CaseVector &GetSwitchTable() const + { + return switchTable; + } + + CaseVector &GetSwitchTable() + { + return switchTable; + } + + CasePair GetCasePair(size_t idx) const + { + DEBUG_ASSERT(idx < switchTable.size(), "out of range in SwitchNode::GetCasePair"); + return switchTable.at(idx); + } + + void SetSwitchTable(CaseVector vec) + { + switchTable = vec; + } + + void InsertCasePair(CasePair pair) + { + switchTable.push_back(pair); + } + + void UpdateCaseLabelAt(size_t i, LabelIdx idx) + { + switchTable[i] = std::make_pair(switchTable[i].first, idx); + } + + void SortCasePair(bool func(const CasePair &, const CasePair &)) + { + std::sort(switchTable.begin(), switchTable.end(), func); + } + +private: + BaseNode *switchOpnd = nullptr; + LabelIdx defaultLabel = 0; + CaseVector switchTable; +}; + +using MCasePair = std::pair; +using MCaseVector = MapleVector; +class MultiwayNode : public StmtNode { +public: + explicit MultiwayNode(MapleAllocator &allocator) : StmtNode(OP_multiway, 1), multiWayTable(allocator.Adapter()) {} + + explicit MultiwayNode(const MIRModule &mod) : MultiwayNode(mod.GetCurFuncCodeMPAllocator()) {} + + MultiwayNode(MapleAllocator &allocator, LabelIdx label) + : StmtNode(OP_multiway, 1), defaultLabel(label), multiWayTable(allocator.Adapter()) + { + } + + MultiwayNode(const MIRModule &mod, LabelIdx label) : MultiwayNode(mod.GetCurFuncCodeMPAllocator(), label) {} + + MultiwayNode(MapleAllocator &allocator, const MultiwayNode &node) + : StmtNode(node.GetOpCode(), node.GetPrimType(), node.numOpnds, node.GetSrcPos(), node.GetOriginalID(), + node.GetStmtAttrs()), + defaultLabel(node.defaultLabel), + multiWayTable(allocator.Adapter()) + { + } + + MultiwayNode(const MIRModule &mod, const MultiwayNode &node) : MultiwayNode(mod.GetCurFuncCodeMPAllocator(), node) + { + } + + MultiwayNode(MultiwayNode &node) = delete; + MultiwayNode &operator=(const MultiwayNode &node) = delete; + virtual ~MultiwayNode() = default; + + void Dump(int32 indent) const override; + + MultiwayNode *CloneTree(MapleAllocator &allocator) const override + { + auto *nd = allocator.GetMemPool()->New(allocator, *this); + nd->multiWayOpnd = static_cast(multiWayOpnd->CloneTree(allocator)); + for (size_t i = 0; i < multiWayTable.size(); ++i) { + BaseNode *node = multiWayTable[i].first->CloneTree(allocator); + MCasePair pair(static_cast(node), multiWayTable[i].second); + nd->multiWayTable.push_back(pair); + } + return nd; + } + + BaseNode *Opnd(size_t i) const override + { + return *(&multiWayOpnd + static_cast(i)); + } + + const BaseNode *GetMultiWayOpnd() const + { + return multiWayOpnd; + } + + void SetMultiWayOpnd(BaseNode *multiwayOpndPara) + { + multiWayOpnd = multiwayOpndPara; + } + + void SetDefaultlabel(LabelIdx defaultLabelPara) + { + defaultLabel = defaultLabelPara; + } + + void AppendElemToMultiWayTable(const MCasePair &mCasrPair) + { + multiWayTable.push_back(mCasrPair); + } + + const MCaseVector &GetMultiWayTable() const + { + return multiWayTable; + } + +private: + BaseNode *multiWayOpnd = nullptr; + LabelIdx defaultLabel = 0; + MCaseVector multiWayTable; +}; + +// eval, throw, free, decref, incref, decrefreset, assertnonnull, igoto +class UnaryStmtNode : public StmtNode { +public: + explicit UnaryStmtNode(Opcode o) : StmtNode(o, 1) {} + + UnaryStmtNode(Opcode o, PrimType typ) : StmtNode(o, typ, 1) {} + + UnaryStmtNode(Opcode o, PrimType typ, BaseNode *opnd) : StmtNode(o, typ, 1), uOpnd(opnd) {} + + virtual ~UnaryStmtNode() = default; + + using StmtNode::Dump; + void Dump(int32 indent) const override; + void DumpOpnd(const MIRModule &mod, int32 indent) const; + void DumpOpnd(int32 indent) const; + + bool Verify() const override + { + return uOpnd->Verify(); + } + + bool Verify(VerifyResult &verifyResult) const override + { + if (GetOpCode() == OP_throw && !VerifyThrowable(verifyResult)) { + return false; + } + return uOpnd->Verify(verifyResult); + } + + UnaryStmtNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(uOpnd->CloneTree(allocator), 0); + return node; + } + + bool IsLeaf() const override + { + return false; + } + + BaseNode *GetRHS() const override + { + return Opnd(0); + } + + virtual void SetRHS(BaseNode *rhs) + { + this->SetOpnd(rhs, 0); + } + + BaseNode *Opnd(size_t i = 0) const override + { + (void)i; + return uOpnd; + } + + void SetOpnd(BaseNode *node, size_t) override + { + uOpnd = node; + } + +private: + bool VerifyThrowable(VerifyResult &verifyResult) const; + + BaseNode *uOpnd = nullptr; +}; + +// dassign, maydassign +class DassignNode : public UnaryStmtNode { +public: + DassignNode() : UnaryStmtNode(OP_dassign), stIdx() {} + + explicit DassignNode(PrimType typ) : UnaryStmtNode(OP_dassign, typ), stIdx() {} + + DassignNode(PrimType typ, BaseNode *opnd) : UnaryStmtNode(OP_dassign, typ, opnd), stIdx() {} + + DassignNode(PrimType typ, BaseNode *opnd, StIdx idx, FieldID fieldID) + : UnaryStmtNode(OP_dassign, typ, opnd), stIdx(idx), fieldID(fieldID) + { + } + + DassignNode(BaseNode *opnd, StIdx idx, FieldID fieldID) : DassignNode(kPtyInvalid, opnd, idx, fieldID) {} + + virtual ~DassignNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + DassignNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + size_t NumOpnds() const override + { + return 1; + } + + bool IsIdentityDassign() const + { + BaseNode *rhs = GetRHS(); + if (rhs->GetOpCode() != OP_dread) { + return false; + } + auto *dread = static_cast(rhs); + return (stIdx == dread->GetStIdx()); + } + + BaseNode *GetRHS() const override + { + return UnaryStmtNode::GetRHS(); + } + + void SetRHS(BaseNode *rhs) override + { + UnaryStmtNode::SetOpnd(rhs, 0); + } + + StIdx GetStIdx() const + { + return stIdx; + } + void SetStIdx(StIdx s) + { + stIdx = s; + } + + const FieldID &GetFieldID() const + { + return fieldID; + } + + void SetFieldID(FieldID f) + { + fieldID = f; + } + + bool AssigningVolatile(const MIRModule &mod) const; + +private: + StIdx stIdx; + FieldID fieldID = 0; +}; + +class DassignoffNode : public UnaryStmtNode { +public: + DassignoffNode() : UnaryStmtNode(OP_dassignoff), stIdx() {} + + explicit DassignoffNode(PrimType typ) : UnaryStmtNode(OP_dassignoff, typ), stIdx() {} + + DassignoffNode(PrimType typ, BaseNode *opnd) : UnaryStmtNode(OP_dassignoff, typ, opnd), stIdx() {} + + DassignoffNode(const StIdx &lhsStIdx, int32 dOffset, PrimType rhsType, BaseNode *rhsNode) + : DassignoffNode(rhsType, rhsNode) + { + stIdx = lhsStIdx; + offset = dOffset; + } + virtual ~DassignoffNode() = default; + + void Dump(int32 indent) const override; + + DassignoffNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + size_t NumOpnds() const override + { + return 1; + } + + BaseNode *GetRHS() const override + { + return UnaryStmtNode::GetRHS(); + } + + void SetRHS(BaseNode *rhs) override + { + UnaryStmtNode::SetOpnd(rhs, 0); + } + +public: + StIdx stIdx; + int32 offset = 0; +}; + +class RegassignNode : public UnaryStmtNode { +public: + RegassignNode() : UnaryStmtNode(OP_regassign) {} + + RegassignNode(PrimType primType, PregIdx idx, BaseNode *opnd) + : UnaryStmtNode(OP_regassign, primType, opnd), regIdx(idx) + { + } + + virtual ~RegassignNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + RegassignNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + BaseNode *GetRHS() const override + { + return UnaryStmtNode::GetRHS(); + } + + void SetRHS(BaseNode *rhs) override + { + UnaryStmtNode::SetOpnd(rhs, 0); + } + + PregIdx GetRegIdx() const + { + return regIdx; + } + void SetRegIdx(PregIdx idx) + { + regIdx = idx; + } + +private: + PregIdx regIdx = 0; // 32bit, negative if special register +}; + +// brtrue and brfalse +class CondGotoNode : public UnaryStmtNode { +public: + static const int32 probAll; + explicit CondGotoNode(Opcode o) : CondGotoNode(o, 0, nullptr) {} + + CondGotoNode(Opcode o, uint32 offset, BaseNode *opnd) : UnaryStmtNode(o, kPtyInvalid, opnd), offset(offset) + { + BaseNodeT::SetNumOpnds(kOperandNumUnary); + } + + virtual ~CondGotoNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + uint32 GetOffset() const + { + return offset; + } + + void SetOffset(uint32 offsetValue) + { + offset = offsetValue; + } + + bool IsBranchProbValid() const + { + return branchProb > 0 && branchProb < probAll; + } + + int32 GetBranchProb() const + { + return branchProb; + } + + void SetBranchProb(int32 prob) + { + branchProb = prob; + } + + void ReverseBranchProb() + { + if (IsBranchProbValid()) { + branchProb = probAll - branchProb; + } + } + + void InvalidateBranchProb() + { + if (IsBranchProbValid()) { + branchProb = -1; + } + } + + CondGotoNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + +private: + uint32 offset; + int32 branchProb = -1; // branch probability, a negative number indicates that the probability is invalid +}; + +using SmallCasePair = std::pair; +using SmallCaseVector = MapleVector; +class RangeGotoNode : public UnaryStmtNode { +public: + explicit RangeGotoNode(MapleAllocator &allocator) : UnaryStmtNode(OP_rangegoto), rangegotoTable(allocator.Adapter()) + { + } + + explicit RangeGotoNode(const MIRModule &mod) : RangeGotoNode(mod.GetCurFuncCodeMPAllocator()) {} + + RangeGotoNode(MapleAllocator &allocator, const RangeGotoNode &node) + : UnaryStmtNode(node), tagOffset(node.tagOffset), rangegotoTable(allocator.Adapter()) + { + } + + RangeGotoNode(const MIRModule &mod, const RangeGotoNode &node) + : RangeGotoNode(mod.GetCurFuncCodeMPAllocator(), node) + { + } + + RangeGotoNode(RangeGotoNode &node) = delete; + RangeGotoNode &operator=(const RangeGotoNode &node) = delete; + virtual ~RangeGotoNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + RangeGotoNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(allocator, *this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + for (size_t i = 0; i < rangegotoTable.size(); ++i) { + node->rangegotoTable.push_back(rangegotoTable[i]); + } + return node; + } + + const SmallCaseVector &GetRangeGotoTable() const + { + return rangegotoTable; + } + + const SmallCasePair &GetRangeGotoTableItem(size_t i) const + { + return rangegotoTable.at(i); + } + + void SetRangeGotoTable(SmallCaseVector rt) + { + rangegotoTable = rt; + } + + void AddRangeGoto(uint32 tag, LabelIdx idx) + { + rangegotoTable.push_back(SmallCasePair(tag, idx)); + } + + int32 GetTagOffset() const + { + return tagOffset; + } + + void SetTagOffset(int32 offset) + { + tagOffset = offset; + } + +private: + int32 tagOffset = 0; + // add each tag to tagOffset field to get the actual tag values + SmallCaseVector rangegotoTable; +}; + +class BlockNode : public StmtNode { +public: + using StmtNodes = PtrListRef; + + BlockNode() : StmtNode(OP_block) {} + + ~BlockNode() + { + stmtNodeList.clear(); + } + + void AddStatement(StmtNode *stmt); + void AppendStatementsFromBlock(BlockNode &blk); + void InsertFirst(StmtNode *stmt); // Insert stmt as the first + void InsertLast(StmtNode *stmt); // Insert stmt as the last + void ReplaceStmtWithBlock(StmtNode &stmtNode, BlockNode &blk); + void ReplaceStmt1WithStmt2(const StmtNode *stmtNode1, StmtNode *stmtNode2); + void RemoveStmt(const StmtNode *stmtNode1); + void InsertBefore(const StmtNode *stmtNode1, StmtNode *stmtNode2); // Insert ss2 before ss1 in current block. + void InsertAfter(const StmtNode *stmtNode1, StmtNode *stmtNode2); // Insert ss2 after ss1 in current block. + // insert all the stmts in inblock to the current block after stmt1 + void InsertBlockAfter(BlockNode &inblock, const StmtNode *stmt1); + void Dump(int32 indent, const MIRSymbolTable *theSymTab, MIRPregTable *thePregTab, bool withInfo, bool isFuncbody, + MIRFlavor flavor) const; + bool Verify() const override; + bool Verify(VerifyResult &verifyResult) const override; + + void Dump(int32 indent) const override + { + Dump(indent, nullptr, nullptr, false, false, kFlavorUnknown); + } + + BlockNode *CloneTree(MapleAllocator &allocator) const override + { + auto *blk = allocator.GetMemPool()->New(); + blk->SetStmtID(stmtIDNext++); + for (auto &stmt : stmtNodeList) { + StmtNode *newStmt = static_cast(stmt.CloneTree(allocator)); + DEBUG_ASSERT(newStmt != nullptr, "null ptr check"); + newStmt->SetPrev(nullptr); + newStmt->SetNext(nullptr); + blk->AddStatement(newStmt); + } + return blk; + } + + BlockNode *CloneTreeWithSrcPosition(const MIRModule &mod) + { + MapleAllocator &allocator = mod.GetCurFuncCodeMPAllocator(); + auto *blk = allocator.GetMemPool()->New(); + blk->SetStmtID(stmtIDNext++); + for (auto &stmt : stmtNodeList) { + StmtNode *newStmt = static_cast(stmt.CloneTree(allocator)); + DEBUG_ASSERT(newStmt != nullptr, "null ptr check"); + newStmt->SetSrcPos(stmt.GetSrcPos()); + newStmt->SetPrev(nullptr); + newStmt->SetNext(nullptr); + blk->AddStatement(newStmt); + } + return blk; + } + + BlockNode *CloneTreeWithFreqs(MapleAllocator &allocator, std::unordered_map &toFreqs, + std::unordered_map &fromFreqs, uint64_t numer, uint64_t denom, + uint32_t updateOp); + + bool IsEmpty() const + { + return stmtNodeList.empty(); + } + + void ResetBlock() + { + stmtNodeList.clear(); + } + + StmtNode *GetFirst() + { + return &(stmtNodeList.front()); + } + + const StmtNode *GetFirst() const + { + return &(stmtNodeList.front()); + } + + void SetFirst(StmtNode *node) + { + stmtNodeList.update_front(node); + } + + StmtNode *GetLast() + { + return &(stmtNodeList.back()); + } + + const StmtNode *GetLast() const + { + return &(stmtNodeList.back()); + } + + void SetLast(StmtNode *node) + { + stmtNodeList.update_back(node); + } + + StmtNodes &GetStmtNodes() + { + return stmtNodeList; + } + + const StmtNodes &GetStmtNodes() const + { + return stmtNodeList; + } + +private: + StmtNodes stmtNodeList; +}; + +class IfStmtNode : public UnaryStmtNode { +public: + IfStmtNode() : UnaryStmtNode(OP_if) + { + numOpnds = kOperandNumTernary; + } + + virtual ~IfStmtNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IfStmtNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd()->CloneTree(allocator), 0); + node->thenPart = thenPart->CloneTree(allocator); + if (elsePart != nullptr) { + node->elsePart = elsePart->CloneTree(allocator); + } + node->SetMeStmtID(GetMeStmtID()); + return node; + } + + IfStmtNode *CloneTreeWithFreqs(MapleAllocator &allocator, std::unordered_map &toFreqs, + std::unordered_map &fromFreqs, uint64_t numer, uint64_t denom, + uint32_t updateOp) + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd()->CloneTree(allocator), 0); + if (fromFreqs.count(GetStmtID()) > 0) { + uint64_t oldFreq = fromFreqs[GetStmtID()]; + uint64_t newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / denom) : oldFreq); + toFreqs[node->GetStmtID()] = (newFreq > 0 || numer == 0) ? newFreq : 1; + if (updateOp & kUpdateOrigFreq) { + uint64_t left = ((oldFreq - newFreq) > 0 || oldFreq == 0) ? (oldFreq - newFreq) : 1; + fromFreqs[GetStmtID()] = left; + } + } + node->thenPart = thenPart->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp); + if (elsePart != nullptr) { + node->elsePart = elsePart->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp); + } + node->SetMeStmtID(GetMeStmtID()); + return node; + } + + BaseNode *Opnd(size_t i = 0) const override + { + if (i == 0) { + return UnaryStmtNode::Opnd(0); + } else if (i == 1) { + return thenPart; + } else if (i == 2) { + DEBUG_ASSERT(elsePart != nullptr, "IfStmtNode has wrong numOpnds field, the elsePart is nullptr"); + DEBUG_ASSERT(numOpnds == kOperandNumTernary, + "IfStmtNode has wrong numOpnds field, the elsePart is nullptr"); + return elsePart; + } + DEBUG_ASSERT(false, "IfStmtNode has wrong numOpnds field: %u", NumOpnds()); + return nullptr; + } + + BlockNode *GetThenPart() const + { + return thenPart; + } + + void SetThenPart(BlockNode *node) + { + thenPart = node; + } + + BlockNode *GetElsePart() const + { + return elsePart; + } + + void SetElsePart(BlockNode *node) + { + elsePart = node; + } + + size_t NumOpnds() const override + { + if (elsePart == nullptr) { + return kOperandNumBinary; + } + return kOperandNumTernary; + } + +private: + BlockNode *thenPart = nullptr; + BlockNode *elsePart = nullptr; +}; + +// for both while and dowhile +class WhileStmtNode : public UnaryStmtNode { +public: + explicit WhileStmtNode(Opcode o) : UnaryStmtNode(o) + { + BaseNodeT::SetNumOpnds(kOperandNumBinary); + } + + virtual ~WhileStmtNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + WhileStmtNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + node->body = body->CloneTree(allocator); + return node; + } + + WhileStmtNode *CloneTreeWithFreqs(MapleAllocator &allocator, std::unordered_map &toFreqs, + std::unordered_map &fromFreqs, uint64_t numer, uint64_t denom, + uint32_t updateOp) + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + if (fromFreqs.count(GetStmtID()) > 0) { + int64_t oldFreq = fromFreqs[GetStmtID()]; + int64_t newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / denom) : oldFreq); + toFreqs[node->GetStmtID()] = (newFreq > 0 || numer == 0) ? static_cast(newFreq) : 1; + if (updateOp & kUpdateOrigFreq) { + int64_t left = (oldFreq - newFreq) > 0 ? (oldFreq - newFreq) : 1; + fromFreqs[GetStmtID()] = left; + } + } + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + node->body = body->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp); + return node; + } + + void SetBody(BlockNode *node) + { + body = node; + } + + BlockNode *GetBody() const + { + return body; + } + + BaseNode *Opnd(size_t i = 0) const override + { + if (i == 0) { + return UnaryStmtNode::Opnd(); + } else if (i == 1) { + return body; + } + DEBUG_ASSERT(false, "WhileStmtNode has wrong numOpnds field: %u", NumOpnds()); + return nullptr; + } + +private: + BlockNode *body = nullptr; +}; + +class DoloopNode : public StmtNode { +public: + DoloopNode() : DoloopNode(StIdx(), false, nullptr, nullptr, nullptr, nullptr) {} + + DoloopNode(StIdx doVarStIdx, bool isPReg, BaseNode *startExp, BaseNode *contExp, BaseNode *incrExp, + BlockNode *doBody) + : StmtNode(OP_doloop, kOperandNumDoloop), + doVarStIdx(doVarStIdx), + isPreg(isPReg), + startExpr(startExp), + condExpr(contExp), + incrExpr(incrExp), + doBody(doBody) + { + } + + virtual ~DoloopNode() = default; + + void DumpDoVar(const MIRModule &mod) const; + void Dump(int32 indent) const override; + bool Verify() const override; + + DoloopNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetStartExpr(startExpr->CloneTree(allocator)); + node->SetContExpr(GetCondExpr()->CloneTree(allocator)); + node->SetIncrExpr(GetIncrExpr()->CloneTree(allocator)); + node->SetDoBody(GetDoBody()->CloneTree(allocator)); + return node; + } + + DoloopNode *CloneTreeWithFreqs(MapleAllocator &allocator, std::unordered_map &toFreqs, + std::unordered_map &fromFreqs, uint64_t numer, uint64_t denom, + uint32_t updateOp) + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + if (fromFreqs.count(GetStmtID()) > 0) { + uint64_t oldFreq = fromFreqs[GetStmtID()]; + uint64_t newFreq = oldFreq; + if (updateOp & kUpdateFreqbyScale) { // used in inline/clone + newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / denom) : oldFreq); + } else if (updateOp & kUpdateUnrolledFreq) { // used in unrolled part + uint64_t bodyFreq = fromFreqs[GetDoBody()->GetStmtID()]; + newFreq = denom > 0 ? (bodyFreq * numer / denom + (oldFreq - bodyFreq)) : oldFreq; + } else if (updateOp & kUpdateUnrollRemainderFreq) { // used in unrolled remainder + uint64_t bodyFreq = fromFreqs[GetDoBody()->GetStmtID()]; + newFreq = denom > 0 ? (((bodyFreq * numer) % denom) + (oldFreq - bodyFreq)) : oldFreq; + } + toFreqs[node->GetStmtID()] = static_cast(newFreq); + DEBUG_ASSERT(oldFreq >= newFreq, "sanity check"); + if (updateOp & kUpdateOrigFreq) { + uint64_t left = oldFreq - newFreq; + fromFreqs[GetStmtID()] = left; + } + } + node->SetStartExpr(startExpr->CloneTree(allocator)); + node->SetContExpr(GetCondExpr()->CloneTree(allocator)); + node->SetIncrExpr(GetIncrExpr()->CloneTree(allocator)); + node->SetDoBody(GetDoBody()->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + return node; + } + + void SetDoVarStIdx(StIdx idx) + { + doVarStIdx = idx; + } + + PregIdx GetDoVarPregIdx() const + { + return static_cast(doVarStIdx.FullIdx()); + } + + const StIdx &GetDoVarStIdx() const + { + return doVarStIdx; + } + + void SetDoVarStFullIdx(uint32 idx) + { + doVarStIdx.SetFullIdx(idx); + } + + void SetIsPreg(bool isPregVal) + { + isPreg = isPregVal; + } + + bool IsPreg() const + { + return isPreg; + } + + void SetStartExpr(BaseNode *node) + { + startExpr = node; + } + + BaseNode *GetStartExpr() const + { + return startExpr; + } + + void SetContExpr(BaseNode *node) + { + condExpr = node; + } + + BaseNode *GetCondExpr() const + { + return condExpr; + } + + void SetIncrExpr(BaseNode *node) + { + incrExpr = node; + } + + BaseNode *GetIncrExpr() const + { + return incrExpr; + } + + void SetDoBody(BlockNode *node) + { + doBody = node; + } + + BlockNode *GetDoBody() const + { + return doBody; + } + + BaseNode *Opnd(size_t i) const override + { + if (i == 0) { + return startExpr; + } + if (i == 1) { + return condExpr; + } + if (i == 2) { + return incrExpr; + } + return *(&doBody + i - 3); + } + + size_t NumOpnds() const override + { + return kOperandNumDoloop; + } + + void SetOpnd(BaseNode *node, size_t i) override + { + if (i == 0) { + startExpr = node; + } + if (i == 1) { + SetContExpr(node); + } + if (i == 2) { + incrExpr = node; + } else { + *(&doBody + i - 3) = static_cast(node); + } + } + +private: + static constexpr int kOperandNumDoloop = 4; + StIdx doVarStIdx; // must be local; cast to PregIdx for preg + bool isPreg; + BaseNode *startExpr; + BaseNode *condExpr; + BaseNode *incrExpr; + BlockNode *doBody; +}; + +class ForeachelemNode : public StmtNode { +public: + ForeachelemNode() : StmtNode(OP_foreachelem) + { + BaseNodeT::SetNumOpnds(kOperandNumUnary); + } + + virtual ~ForeachelemNode() = default; + + const StIdx &GetElemStIdx() const + { + return elemStIdx; + } + + void SetElemStIdx(StIdx elemStIdxValue) + { + elemStIdx = elemStIdxValue; + } + + const StIdx &GetArrayStIdx() const + { + return arrayStIdx; + } + + void SetArrayStIdx(StIdx arrayStIdxValue) + { + arrayStIdx = arrayStIdxValue; + } + + BlockNode *GetLoopBody() const + { + return loopBody; + } + + void SetLoopBody(BlockNode *loopBodyValue) + { + loopBody = loopBodyValue; + } + + BaseNode *Opnd(size_t) const override + { + return loopBody; + } + + size_t NumOpnds() const override + { + return numOpnds; + } + + void Dump(int32 indent) const override; + + ForeachelemNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetLoopBody(loopBody->CloneTree(allocator)); + return node; + } + +private: + StIdx elemStIdx; // must be local symbol + StIdx arrayStIdx; // symbol table entry of the array/collection variable + BlockNode *loopBody = nullptr; +}; + +// used by assertge, assertlt +class BinaryStmtNode : public StmtNode, public BinaryOpnds { +public: + explicit BinaryStmtNode(Opcode o) : StmtNode(o, kOperandNumBinary) {} + + virtual ~BinaryStmtNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + BinaryStmtNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + BaseNode *Opnd(size_t i) const override + { + DEBUG_ASSERT(i < kOperandNumBinary, "Invalid operand idx in BinaryStmtNode"); + DEBUG_ASSERT(i >= 0, "Invalid operand idx in BinaryStmtNode"); + return GetBOpnd(i); + } + + size_t NumOpnds() const override + { + return kOperandNumBinary; + } + + void SetOpnd(BaseNode *node, size_t i) override + { + SetBOpnd(node, i); + } + + bool IsLeaf() const override + { + return false; + } +}; + +class IassignoffNode : public BinaryStmtNode { +public: + IassignoffNode() : BinaryStmtNode(OP_iassignoff) {} + + explicit IassignoffNode(int32 ofst) : BinaryStmtNode(OP_iassignoff), offset(ofst) {} + + IassignoffNode(PrimType primType, int32 offset, BaseNode *addrOpnd, BaseNode *srcOpnd) : IassignoffNode(offset) + { + BaseNodeT::SetPrimType(primType); + SetBOpnd(addrOpnd, 0); + SetBOpnd(srcOpnd, 1); + } + + virtual ~IassignoffNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IassignoffNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + int32 GetOffset() const + { + return offset; + } + + void SetOffset(int32 newOffset) + { + offset = newOffset; + } + +private: + int32 offset = 0; +}; + +// for iassignfpoff, iassignspoff, iassignpcoff +class IassignFPoffNode : public UnaryStmtNode { +public: + IassignFPoffNode(Opcode o) : UnaryStmtNode(o) {} + + explicit IassignFPoffNode(Opcode o, int32 ofst) : UnaryStmtNode(o), offset(ofst) {} + + IassignFPoffNode(Opcode o, PrimType primType, int32 offset, BaseNode *src) : IassignFPoffNode(o, offset) + { + BaseNodeT::SetPrimType(primType); + UnaryStmtNode::SetOpnd(src, 0); + } + + virtual ~IassignFPoffNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IassignFPoffNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + void SetOffset(int32 ofst) + { + offset = ofst; + } + + int32 GetOffset() const + { + return offset; + } + +private: + int32 offset = 0; +}; + +typedef IassignFPoffNode IassignPCoffNode; + +class BlkassignoffNode : public BinaryStmtNode { +public: + BlkassignoffNode() : BinaryStmtNode(OP_blkassignoff) + { + ptyp = PTY_agg; + ptyp = PTY_agg; + alignLog2 = 0; + offset = 0; + } + explicit BlkassignoffNode(int32 ofst, int32 bsize) : BinaryStmtNode(OP_blkassignoff), offset(ofst), blockSize(bsize) + { + ptyp = PTY_agg; + alignLog2 = 0; + } + explicit BlkassignoffNode(int32 ofst, int32 bsize, BaseNode *dest, BaseNode *src) + : BinaryStmtNode(OP_blkassignoff), offset(ofst), blockSize(bsize) + { + ptyp = PTY_agg; + alignLog2 = 0; + SetBOpnd(dest, 0); + SetBOpnd(src, 1); + } + ~BlkassignoffNode() = default; + + void Dump(int32 indent) const override; + + BlkassignoffNode *CloneTree(MapleAllocator &allocator) const override + { + BlkassignoffNode *node = allocator.GetMemPool()->New(offset, blockSize); + node->SetStmtID(stmtIDNext++); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + uint32 GetAlign() const + { + uint32 res = 1; + for (uint32 i = 0; i < alignLog2; i++) { + res *= 2; + } + return res; + } + + void SetAlign(uint32 x) + { + if (x == 0) { + alignLog2 = 0; + return; + } + DEBUG_ASSERT((~(x - 1) & x) == x, "SetAlign called with non power of 2"); + uint32 res = 0; + while (x != 1) { + x >>= 1; + ++res; + } + alignLog2 = res; + } + + uint32 alignLog2 : 4; + int32 offset : 28; + int32 blockSize = 0; +}; + +// used by return, syncenter, syncexit +class NaryStmtNode : public StmtNode, public NaryOpnds { +public: + NaryStmtNode(MapleAllocator &allocator, Opcode o) : StmtNode(o), NaryOpnds(allocator) {} + + NaryStmtNode(const MIRModule &mod, Opcode o) : NaryStmtNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + NaryStmtNode(MapleAllocator &allocator, const NaryStmtNode &node) + // do not use stmt copy constructor + : StmtNode(node.GetOpCode(), node.GetPrimType(), node.numOpnds, node.GetSrcPos(), node.GetOriginalID(), + node.GetStmtAttrs()), + NaryOpnds(allocator) + { + } + + NaryStmtNode(const MIRModule &mod, const NaryStmtNode &node) : NaryStmtNode(mod.GetCurFuncCodeMPAllocator(), node) + { + } + + explicit NaryStmtNode(const NaryStmtNode &node) = delete; + NaryStmtNode &operator=(const NaryStmtNode &node) = delete; + virtual ~NaryStmtNode() = default; + + void Dump(int32 indent) const override; + void DumpCallConvInfo() const + { + if (GetAttr(STMTATTR_ccall)) { + LogInfo::MapleLogger() << " ccall"; + } else if (GetAttr(STMTATTR_webkitjscall)) { + LogInfo::MapleLogger() << " webkitjscc"; + } else if (GetAttr(STMTATTR_ghcall)) { + LogInfo::MapleLogger() << " ghccc"; + } else { + // default is ccall + LogInfo::MapleLogger() << " ccall"; + } + } + bool Verify() const override; + + NaryStmtNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } + + BaseNode *Opnd(size_t i) const override + { + return GetNopndAt(i); + } + + void SetOpnd(BaseNode *node, size_t i) override + { + DEBUG_ASSERT(i < GetNopnd().size(), "array index out of range"); + SetNOpndAt(i, node); + } + + size_t NumOpnds() const override + { + DEBUG_ASSERT(numOpnds == GetNopndSize(), "NaryStmtNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void SetOpnds(const MapleVector &arguments) + { + SetNOpnd(arguments); + SetNumOpnds(arguments.size()); + } + + void PushOpnd(BaseNode *node) + { + if (node != nullptr) { + GetNopnd().push_back(node); + } + SetNumOpnds(GetNopndSize()); + } + + void InsertOpnd(BaseNode *node, size_t idx) + { + if (node == nullptr || idx > GetNopndSize()) { + return; + } + auto begin = GetNopnd().begin(); + for (size_t i = 0; i < idx; ++i) { + ++begin; + } + (void)GetNopnd().insert(begin, node); + SetNumOpnds(GetNopndSize()); + } +}; + +class SafetyCheckStmtNode { +public: + explicit SafetyCheckStmtNode(GStrIdx funcNameIdx) : funcNameIdx(funcNameIdx) {} + explicit SafetyCheckStmtNode(const SafetyCheckStmtNode &stmtNode) : funcNameIdx(stmtNode.GetFuncNameIdx()) {} + + virtual ~SafetyCheckStmtNode() = default; + + std::string GetFuncName() const; + + GStrIdx GetFuncNameIdx() const + { + return funcNameIdx; + } + + void Dump() const + { + LogInfo::MapleLogger() << " <&" << GetFuncName() << ">"; + } + +private: + GStrIdx funcNameIdx; +}; + +// used by callassertnonnull, callassertle +class SafetyCallCheckStmtNode { +public: + SafetyCallCheckStmtNode(GStrIdx callFuncNameIdx, size_t paramIndex, GStrIdx stmtFuncNameIdx) + : callFuncNameIdx(callFuncNameIdx), paramIndex(paramIndex), stmtFuncNameIdx(stmtFuncNameIdx) + { + } + explicit SafetyCallCheckStmtNode(const SafetyCallCheckStmtNode &stmtNode) + : callFuncNameIdx(stmtNode.GetFuncNameIdx()), + paramIndex(stmtNode.GetParamIndex()), + stmtFuncNameIdx(stmtNode.GetStmtFuncNameIdx()) + { + } + + virtual ~SafetyCallCheckStmtNode() = default; + + std::string GetFuncName() const; + GStrIdx GetFuncNameIdx() const + { + return callFuncNameIdx; + } + std::string GetStmtFuncName() const; + size_t GetParamIndex() const + { + return paramIndex; + } + + GStrIdx GetStmtFuncNameIdx() const + { + return stmtFuncNameIdx; + } + + void Dump() const + { + LogInfo::MapleLogger() << " <&" << GetFuncName() << ", " << paramIndex << ", &" << GetStmtFuncName() << ">"; + } + +private: + GStrIdx callFuncNameIdx; + size_t paramIndex; + GStrIdx stmtFuncNameIdx; +}; + +// used by callassertnonnull +class CallAssertNonnullStmtNode : public UnaryStmtNode, public SafetyCallCheckStmtNode { +public: + CallAssertNonnullStmtNode(Opcode o, GStrIdx callFuncNameIdx, size_t paramIndex, GStrIdx stmtFuncNameIdx) + : UnaryStmtNode(o), SafetyCallCheckStmtNode(callFuncNameIdx, paramIndex, stmtFuncNameIdx) + { + } + virtual ~CallAssertNonnullStmtNode() {} + + void Dump(int32 indent) const override; + + CallAssertNonnullStmtNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd()->CloneTree(allocator), 0); + return node; + } +}; + +// used by assertnonnull +class AssertNonnullStmtNode : public UnaryStmtNode, public SafetyCheckStmtNode { +public: + AssertNonnullStmtNode(Opcode o, GStrIdx funcNameIdx) : UnaryStmtNode(o), SafetyCheckStmtNode(funcNameIdx) {} + virtual ~AssertNonnullStmtNode() {} + + void Dump(int32 indent) const override; + + AssertNonnullStmtNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd()->CloneTree(allocator), 0); + return node; + } +}; + +// used by assertle +class AssertBoundaryStmtNode : public NaryStmtNode, public SafetyCheckStmtNode { +public: + AssertBoundaryStmtNode(MapleAllocator &allocator, Opcode o, GStrIdx funcNameIdx) + : NaryStmtNode(allocator, o), SafetyCheckStmtNode(funcNameIdx) + { + } + virtual ~AssertBoundaryStmtNode() {} + + AssertBoundaryStmtNode(MapleAllocator &allocator, const AssertBoundaryStmtNode &stmtNode) + : NaryStmtNode(allocator, stmtNode), SafetyCheckStmtNode(stmtNode) + { + } + + AssertBoundaryStmtNode(const MIRModule &mod, Opcode o, GStrIdx funcNameIdx) + : AssertBoundaryStmtNode(mod.GetCurFuncCodeMPAllocator(), o, funcNameIdx) + { + } + + void Dump(int32 indent) const override; + + AssertBoundaryStmtNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(allocator, *this); + node->SetStmtID(stmtIDNext++); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } +}; + +// used by callassertle +class CallAssertBoundaryStmtNode : public NaryStmtNode, public SafetyCallCheckStmtNode { +public: + CallAssertBoundaryStmtNode(MapleAllocator &allocator, Opcode o, GStrIdx funcNameIdx, size_t paramIndex, + GStrIdx stmtFuncNameIdx) + : NaryStmtNode(allocator, o), SafetyCallCheckStmtNode(funcNameIdx, paramIndex, stmtFuncNameIdx) + { + } + virtual ~CallAssertBoundaryStmtNode() {} + + CallAssertBoundaryStmtNode(MapleAllocator &allocator, const CallAssertBoundaryStmtNode &stmtNode) + : NaryStmtNode(allocator, stmtNode), SafetyCallCheckStmtNode(stmtNode) + { + } + + CallAssertBoundaryStmtNode(const MIRModule &mod, Opcode o, GStrIdx funcNameIdx, size_t paramIndex, + GStrIdx stmtFuncNameIdx) + : CallAssertBoundaryStmtNode(mod.GetCurFuncCodeMPAllocator(), o, funcNameIdx, paramIndex, stmtFuncNameIdx) + { + } + + void Dump(int32 indent) const override; + + CallAssertBoundaryStmtNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(allocator, *this); + node->SetStmtID(stmtIDNext++); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } +}; + +// used by call, virtualcall, virtualicall, superclasscall, interfacecall, +// interfaceicall, customcall +// polymorphiccall +// callassigned, virtualcallassigned, virtualicallassigned, +// superclasscallassigned, interfacecallassigned, interfaceicallassigned, +// customcallassigned +// polymorphiccallassigned +class CallNode : public NaryStmtNode, public DeoptBundleInfo { +public: + CallNode(MapleAllocator &allocator, Opcode o) + : NaryStmtNode(allocator, o), DeoptBundleInfo(allocator), returnValues(allocator.Adapter()) + { + } + + CallNode(MapleAllocator &allocator, Opcode o, PUIdx idx) : CallNode(allocator, o, idx, TyIdx()) {} + + CallNode(MapleAllocator &allocator, Opcode o, PUIdx idx, TyIdx tdx) + : NaryStmtNode(allocator, o), + DeoptBundleInfo(allocator), + puIdx(idx), + tyIdx(tdx), + returnValues(allocator.Adapter()) + { + } + + CallNode(const MIRModule &mod, Opcode o) : CallNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + CallNode(const MIRModule &mod, Opcode o, PUIdx idx, TyIdx tdx) + : CallNode(mod.GetCurFuncCodeMPAllocator(), o, idx, tdx) + { + } + + CallNode(MapleAllocator &allocator, const CallNode &node) + : NaryStmtNode(allocator, node), + DeoptBundleInfo(allocator), + puIdx(node.GetPUIdx()), + tyIdx(node.tyIdx), + returnValues(allocator.Adapter()) + { + } + + CallNode(const MIRModule &mod, const CallNode &node) : CallNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + CallNode(CallNode &node) = delete; + CallNode &operator=(const CallNode &node) = delete; + virtual ~CallNode() = default; + virtual void Dump(int32 indent, bool newline) const; + bool Verify() const override; + MIRType *GetCallReturnType() override; + const MIRSymbol *GetCallReturnSymbol(const MIRModule &mod) const; + + CallNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < returnValues.size(); ++i) { + node->GetReturnVec().push_back(returnValues[i]); + } + node->SetNumOpnds(GetNopndSize()); + for (const auto &elem : GetDeoptBundleInfo()) { + node->AddDeoptBundleInfo(elem.first, elem.second); + } + return node; + } + + PUIdx GetPUIdx() const + { + return puIdx; + } + + void SetPUIdx(const PUIdx idx) + { + puIdx = idx; + } + + TyIdx GetTyIdx() const + { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) + { + tyIdx = idx; + } + + CallReturnVector &GetReturnVec() + { + return returnValues; + } + + CallReturnPair GetReturnPair(size_t idx) const + { + DEBUG_ASSERT(idx < returnValues.size(), "out of range in CallNode::GetReturnPair"); + return returnValues.at(idx); + } + + void SetReturnPair(CallReturnPair retVal, size_t idx) + { + DEBUG_ASSERT(idx < returnValues.size(), "out of range in CallNode::GetReturnPair"); + returnValues.at(idx) = retVal; + } + + const CallReturnVector &GetReturnVec() const + { + return returnValues; + } + + CallReturnPair GetNthReturnVec(size_t i) const + { + DEBUG_ASSERT(i < returnValues.size(), "array index out of range"); + return returnValues[i]; + } + + void SetReturnVec(const CallReturnVector &vec) + { + returnValues = vec; + } + + size_t NumOpnds() const override + { + DEBUG_ASSERT(numOpnds == GetNopndSize(), "CallNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void Dump(int32 indent) const override + { + Dump(indent, true); + } + + CallReturnVector *GetCallReturnVector() override + { + return &returnValues; + } + + void SetCallReturnVector(const CallReturnVector &value) + { + returnValues = value; + } + +private: + PUIdx puIdx = 0; + TyIdx tyIdx = TyIdx(0); + CallReturnVector returnValues; +}; + +// icall, icallassigned, icallproto and icallprotoassigned +class IcallNode : public NaryStmtNode, public DeoptBundleInfo { +public: + IcallNode(MapleAllocator &allocator, Opcode o) + : NaryStmtNode(allocator, o), DeoptBundleInfo(allocator), retTyIdx(0), returnValues(allocator.Adapter()) + { + BaseNodeT::SetNumOpnds(kOperandNumUnary); + } + + IcallNode(MapleAllocator &allocator, Opcode o, TyIdx idx) + : NaryStmtNode(allocator, o), DeoptBundleInfo(allocator), retTyIdx(idx), returnValues(allocator.Adapter()) + { + BaseNodeT::SetNumOpnds(kOperandNumUnary); + } + + IcallNode(const MIRModule &mod, Opcode o) : IcallNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + IcallNode(const MIRModule &mod, Opcode o, TyIdx idx) : IcallNode(mod.GetCurFuncCodeMPAllocator(), o, idx) {} + + IcallNode(MapleAllocator &allocator, const IcallNode &node) + : NaryStmtNode(allocator, node), + DeoptBundleInfo(allocator), + retTyIdx(node.retTyIdx), + returnValues(allocator.Adapter()) + { + } + + IcallNode(const MIRModule &mod, const IcallNode &node) : IcallNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + IcallNode(IcallNode &node) = delete; + IcallNode &operator=(const IcallNode &node) = delete; + virtual ~IcallNode() = default; + + virtual void Dump(int32 indent, bool newline) const; + bool Verify() const override; + MIRType *GetCallReturnType() override; + const MIRSymbol *GetCallReturnSymbol(const MIRModule &mod) const; + IcallNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < returnValues.size(); ++i) { + node->returnValues.push_back(returnValues[i]); + } + node->SetNumOpnds(GetNopndSize()); + for (const auto &elem : GetDeoptBundleInfo()) { + node->AddDeoptBundleInfo(elem.first, elem.second); + } + return node; + } + + TyIdx GetRetTyIdx() const + { + return retTyIdx; + } + + void SetRetTyIdx(TyIdx idx) + { + retTyIdx = idx; + } + + const CallReturnVector &GetReturnVec() const + { + return returnValues; + } + + CallReturnVector &GetReturnVec() + { + return returnValues; + } + + void SetReturnVec(const CallReturnVector &vec) + { + returnValues = vec; + } + + size_t NumOpnds() const override + { + DEBUG_ASSERT(numOpnds == GetNopndSize(), "IcallNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void Dump(int32 indent) const override + { + Dump(indent, true); + } + + CallReturnVector *GetCallReturnVector() override + { + return &returnValues; + } + +private: + TyIdx retTyIdx; // for icall: return type for callee; for icallproto: the prototye + // the 0th operand is the function pointer + CallReturnVector returnValues; +}; + +// used by intrinsiccall and xintrinsiccall +class IntrinsiccallNode : public NaryStmtNode, public DeoptBundleInfo { +public: + IntrinsiccallNode(MapleAllocator &allocator, Opcode o) + : NaryStmtNode(allocator, o), + DeoptBundleInfo(allocator), + intrinsic(INTRN_UNDEFINED), + tyIdx(0), + returnValues(allocator.Adapter()) + { + } + + IntrinsiccallNode(MapleAllocator &allocator, Opcode o, MIRIntrinsicID id) + : NaryStmtNode(allocator, o), + DeoptBundleInfo(allocator), + intrinsic(id), + tyIdx(0), + returnValues(allocator.Adapter()) + { + } + + IntrinsiccallNode(const MIRModule &mod, Opcode o) : IntrinsiccallNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + IntrinsiccallNode(const MIRModule &mod, Opcode o, MIRIntrinsicID id) + : IntrinsiccallNode(mod.GetCurFuncCodeMPAllocator(), o, id) + { + } + + IntrinsiccallNode(MapleAllocator &allocator, const IntrinsiccallNode &node) + : NaryStmtNode(allocator, node), + DeoptBundleInfo(allocator), + intrinsic(node.GetIntrinsic()), + tyIdx(node.tyIdx), + returnValues(allocator.Adapter()) + { + } + + IntrinsiccallNode(const MIRModule &mod, const IntrinsiccallNode &node) + : IntrinsiccallNode(mod.GetCurFuncCodeMPAllocator(), node) + { + } + + IntrinsiccallNode(IntrinsiccallNode &node) = delete; + IntrinsiccallNode &operator=(const IntrinsiccallNode &node) = delete; + virtual ~IntrinsiccallNode() = default; + + virtual void Dump(int32 indent, bool newline) const; + bool Verify() const override; + MIRType *GetCallReturnType() override; + + IntrinsiccallNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < returnValues.size(); ++i) { + node->GetReturnVec().push_back(returnValues[i]); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } + + MIRIntrinsicID GetIntrinsic() const + { + return intrinsic; + } + + void SetIntrinsic(MIRIntrinsicID id) + { + intrinsic = id; + } + + TyIdx GetTyIdx() const + { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) + { + tyIdx = idx; + } + + CallReturnVector &GetReturnVec() + { + return returnValues; + } + + const CallReturnVector &GetReturnVec() const + { + return returnValues; + } + + void SetReturnVec(const CallReturnVector &vec) + { + returnValues = vec; + } + + size_t NumOpnds() const override + { + DEBUG_ASSERT(numOpnds == GetNopndSize(), "IntrinsiccallNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void Dump(int32 indent) const override + { + Dump(indent, true); + } + + CallReturnVector *GetCallReturnVector() override + { + return &returnValues; + } + + CallReturnPair &GetCallReturnPair(uint32 i) + { + DEBUG_ASSERT(i < returnValues.size(), "array index out of range"); + return returnValues.at(i); + } + +private: + MIRIntrinsicID intrinsic; + TyIdx tyIdx; + CallReturnVector returnValues; +}; + +// used by callinstant, virtualcallinstant, superclasscallinstant and +// interfacecallinstant, callinstantassigned, virtualcallinstantassigned, +// superclasscallinstantassigned and interfacecallinstantassigned +class CallinstantNode : public CallNode { +public: + CallinstantNode(MapleAllocator &allocator, Opcode o, TyIdx tIdx) : CallNode(allocator, o), instVecTyIdx(tIdx) {} + + CallinstantNode(const MIRModule &mod, Opcode o, TyIdx tIdx) + : CallinstantNode(mod.GetCurFuncCodeMPAllocator(), o, tIdx) + { + } + + CallinstantNode(MapleAllocator &allocator, const CallinstantNode &node) + : CallNode(allocator, node), instVecTyIdx(node.instVecTyIdx) + { + } + + CallinstantNode(const MIRModule &mod, const CallinstantNode &node) + : CallinstantNode(mod.GetCurFuncCodeMPAllocator(), node) + { + } + + CallinstantNode(CallinstantNode &node) = delete; + CallinstantNode &operator=(const CallinstantNode &node) = delete; + virtual ~CallinstantNode() = default; + + void Dump(int32 indent, bool newline) const override; + void Dump(int32 indent) const override + { + Dump(indent, true); + } + + CallinstantNode *CloneTree(MapleAllocator &allocator) const override + { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < GetReturnVec().size(); ++i) { + node->GetReturnVec().push_back(GetNthReturnVec(i)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } + + CallReturnVector *GetCallReturnVector() override + { + return &GetReturnVec(); + } + +private: + TyIdx instVecTyIdx; +}; + +class LabelNode : public StmtNode { +public: + LabelNode() : StmtNode(OP_label) {} + + explicit LabelNode(LabelIdx idx) : StmtNode(OP_label), labelIdx(idx) {} + + virtual ~LabelNode() = default; + + void Dump(int32 indent) const override; + + LabelNode *CloneTree(MapleAllocator &allocator) const override + { + auto *l = allocator.GetMemPool()->New(*this); + l->SetStmtID(stmtIDNext++); + return l; + } + + LabelIdx GetLabelIdx() const + { + return labelIdx; + } + + void SetLabelIdx(LabelIdx idx) + { + labelIdx = idx; + } + +private: + LabelIdx labelIdx = 0; +}; + +class CommentNode : public StmtNode { +public: + explicit CommentNode(const MapleAllocator &allocator) : StmtNode(OP_comment), comment(allocator.GetMemPool()) {} + + explicit CommentNode(const MIRModule &mod) : CommentNode(mod.GetCurFuncCodeMPAllocator()) {} + + CommentNode(const MapleAllocator &allocator, const std::string &cmt) + : StmtNode(OP_comment), comment(cmt, allocator.GetMemPool()) + { + } + + CommentNode(const MIRModule &mod, const std::string &cmt) : CommentNode(mod.GetCurFuncCodeMPAllocator(), cmt) {} + + CommentNode(const MapleAllocator &allocator, const CommentNode &node) + : StmtNode(node.GetOpCode(), node.GetPrimType()), comment(node.comment, allocator.GetMemPool()) + { + } + + CommentNode(const MIRModule &mod, const CommentNode &node) : CommentNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + CommentNode(CommentNode &node) = delete; + CommentNode &operator=(const CommentNode &node) = delete; + virtual ~CommentNode() = default; + + void Dump(int32 indent) const override; + + CommentNode *CloneTree(MapleAllocator &allocator) const override + { + auto *c = allocator.GetMemPool()->New(allocator, *this); + return c; + } + + const MapleString &GetComment() const + { + return comment; + } + + void SetComment(MapleString com) + { + comment = com; + } + + void SetComment(const std::string &str) + { + comment = str; + } + + void SetComment(const char *str) + { + comment = str; + } + + void Append(const std::string &str) + { + comment.append(str); + } + +private: + MapleString comment; +}; + +enum AsmQualifierKind : unsigned { // they are alreadgy Maple IR keywords + kASMvolatile, + kASMinline, + kASMgoto, +}; + +class AsmNode : public NaryStmtNode { +public: + explicit AsmNode(MapleAllocator *alloc) + : NaryStmtNode(*alloc, OP_asm), + asmString(alloc->GetMemPool()), + inputConstraints(alloc->Adapter()), + asmOutputs(alloc->Adapter()), + outputConstraints(alloc->Adapter()), + clobberList(alloc->Adapter()), + gotoLabels(alloc->Adapter()), + qualifiers(0) + { + } + + AsmNode(MapleAllocator &allocator, const AsmNode &node) + : NaryStmtNode(allocator, node), + asmString(node.asmString, allocator.GetMemPool()), + inputConstraints(allocator.Adapter()), + asmOutputs(allocator.Adapter()), + outputConstraints(allocator.Adapter()), + clobberList(allocator.Adapter()), + gotoLabels(allocator.Adapter()), + qualifiers(node.qualifiers) + { + } + + virtual ~AsmNode() = default; + + AsmNode *CloneTree(MapleAllocator &allocator) const override; + + void SetQualifier(AsmQualifierKind x) + { + qualifiers |= (1U << static_cast(x)); + } + + bool GetQualifier(AsmQualifierKind x) const + { + return (qualifiers & (1U << static_cast(x))) != 0; + } + + CallReturnVector *GetCallReturnVector() override + { + return &asmOutputs; + } + + void SetHasWriteInputs() + { + hasWriteInputs = true; + } + + bool HasWriteInputs() const + { + return hasWriteInputs; + } + + void DumpOutputs(int32 indent, std::string &uStr) const; + void DumpInputOperands(int32 indent, std::string &uStr) const; + void Dump(int32 indent) const override; + + MapleString asmString; + MapleVector inputConstraints; // length is numOpnds + CallReturnVector asmOutputs; + MapleVector outputConstraints; // length is returnValues.size() + MapleVector clobberList; + MapleVector gotoLabels; + uint32 qualifiers; + +private: + bool hasWriteInputs = false; +}; + +void DumpCallReturns(const MIRModule &mod, CallReturnVector nrets, int32 indent); +bool HasIreadExpr(const BaseNode *expr); +size_t MaxDepth(const BaseNode *expr); +} // namespace maple + +#define LOAD_SAFE_CAST_FOR_MIR_NODE +#include "ir_safe_cast_traits.def" + +#endif // MAPLE_IR_INCLUDE_MIR_NODES_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mir_parser.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_parser.h new file mode 100755 index 0000000000000000000000000000000000000000..420f521bf596d083b3e2e9fbf47fd36068e793ad --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_parser.h @@ -0,0 +1,337 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_MIR_PARSER_H +#define MAPLE_IR_INCLUDE_MIR_PARSER_H +#include "mir_module.h" +#include "lexer.h" +#include "mir_nodes.h" +#include "mir_preg.h" +#include "mir_scope.h" +#include "parser_opt.h" + +namespace maple { +using BaseNodePtr = BaseNode *; +using StmtNodePtr = StmtNode *; +using BlockNodePtr = BlockNode *; + +class FormalDef; + +class MIRParser { +public: + explicit MIRParser(MIRModule &md) : lexer(md), mod(md), definedLabels(mod.GetMPAllocator().Adapter()) + { + safeRegionFlag.push(false); + } + + ~MIRParser() = default; + + MIRFunction *CreateDummyFunction(); + void ResetCurrentFunction() + { + mod.SetCurFunction(dummyFunction); + } + + bool ParseLoc(); + bool ParseLocStmt(StmtNodePtr &stmt); + bool ParsePosition(SrcPosition &pos); + bool ParseOneScope(MIRScope &scope); + bool ParseScope(StmtNodePtr &stmt); + bool ParseOneAlias(GStrIdx &strIdx, MIRAliasVars &aliasVar); + bool ParseAlias(StmtNodePtr &stmt); + uint8 *ParseWordsInfo(uint32 size); + bool ParseSwitchCase(int64 &, LabelIdx &); + bool ParseExprOneOperand(BaseNodePtr &expr); + bool ParseExprTwoOperand(BaseNodePtr &opnd0, BaseNodePtr &opnd1); + bool ParseExprNaryOperand(MapleVector &opndVec); + bool IsDelimitationTK(TokenKind tk) const; + Opcode GetOpFromToken(TokenKind tk) const; + bool IsStatement(TokenKind tk) const; + PrimType GetPrimitiveType(TokenKind tk) const; + MIRIntrinsicID GetIntrinsicID(TokenKind tk) const; + bool ParseScalarValue(MIRConstPtr &stype, MIRType &type); + bool ParseConstAddrLeafExpr(MIRConstPtr &cexpr); + bool ParseInitValue(MIRConstPtr &theConst, TyIdx tyIdx, bool allowEmpty = false); + bool ParseDeclaredSt(StIdx &stidx); + void CreateFuncMIRSymbol(PUIdx &puidx, GStrIdx strIdx); + bool ParseDeclaredFunc(PUIdx &puidx); + bool ParseTypeAttrs(TypeAttrs &attrs); + bool ParseVarTypeAttrs(MIRSymbol &st); + bool CheckAlignTk(); + bool ParseAlignAttrs(TypeAttrs &tA); + bool ParsePackAttrs(); + bool ParseFieldAttrs(FieldAttrs &attrs); + bool ParseFuncAttrs(FuncAttrs &attrs); + void SetAttrContent(FuncAttrs &attrs, FuncAttrKind x, const MIRLexer &lexer); + bool CheckPrimAndDerivedType(TokenKind tokenKind, TyIdx &tyIdx); + bool ParsePrimType(TyIdx &tyIdx); + bool ParseFarrayType(TyIdx &arrayTyIdx); + bool ParseArrayType(TyIdx &arrayTyIdx); + bool ParseBitFieldType(TyIdx &fieldTyIdx); + bool ParsePragmaElement(MIRPragmaElement &elem); + bool ParsePragmaElementForArray(MIRPragmaElement &elem); + bool ParsePragmaElementForAnnotation(MIRPragmaElement &elem); + bool ParsePragma(MIRStructType &type); + bool ParseFields(MIRStructType &type); + bool ParseStructType(TyIdx &styIdx, const GStrIdx &strIdx = GStrIdx(0)); + bool ParseClassType(TyIdx &styidx, const GStrIdx &strIdx = GStrIdx(0)); + bool ParseInterfaceType(TyIdx &sTyIdx, const GStrIdx &strIdx = GStrIdx(0)); + bool ParseDefinedTypename(TyIdx &definedTyIdx, MIRTypeKind kind = kTypeUnknown); + bool ParseTypeParam(TyIdx &definedTyIdx); + bool ParsePointType(TyIdx &tyIdx); + bool ParseFuncType(TyIdx &tyIdx); + bool ParseGenericInstantVector(MIRInstantVectorType &insVecType); + bool ParseDerivedType(TyIdx &tyIdx, MIRTypeKind kind = kTypeUnknown, const GStrIdx &strIdx = GStrIdx(0)); + bool ParseType(TyIdx &tyIdx); + bool ParseStatement(StmtNodePtr &stmt); + bool ParseSpecialReg(PregIdx &pRegIdx); + bool ParsePseudoReg(PrimType primType, PregIdx &pRegIdx); + bool ParseStmtBlock(BlockNodePtr &blk); + bool ParsePrototype(MIRFunction &func, MIRSymbol &funcSymbol, TyIdx &funcTyIdx); + bool ParseFunction(uint32 fileIdx = 0); + bool ParseStorageClass(MIRSymbol &symbol) const; + bool ParseDeclareVarInitValue(MIRSymbol &symbol); + bool ParseDeclareVar(MIRSymbol &); + bool ParseDeclareReg(MIRSymbol &symbol, const MIRFunction &func); + bool ParseDeclareFormal(FormalDef &formalDef); + bool ParsePrototypeRemaining(MIRFunction &func, std::vector &vecTyIdx, std::vector &vecAttrs, + bool &varArgs); + + // Stmt Parser + bool ParseStmtDassign(StmtNodePtr &stmt); + bool ParseStmtDassignoff(StmtNodePtr &stmt); + bool ParseStmtRegassign(StmtNodePtr &stmt); + bool ParseStmtIassign(StmtNodePtr &stmt); + bool ParseStmtIassignoff(StmtNodePtr &stmt); + bool ParseStmtIassignFPoff(StmtNodePtr &stmt); + bool ParseStmtBlkassignoff(StmtNodePtr &stmt); + bool ParseStmtDoloop(StmtNodePtr &stmt); + bool ParseStmtForeachelem(StmtNodePtr &stmt); + bool ParseStmtDowhile(StmtNodePtr &stmt); + bool ParseStmtIf(StmtNodePtr &stmt); + bool ParseStmtWhile(StmtNodePtr &stmt); + bool ParseStmtLabel(StmtNodePtr &stmt); + bool ParseStmtGoto(StmtNodePtr &stmt); + bool ParseStmtBr(StmtNodePtr &stmt); + bool ParseStmtSwitch(StmtNodePtr &stmt); + bool ParseStmtRangegoto(StmtNodePtr &stmt); + bool ParseStmtMultiway(StmtNodePtr &stmt); + PUIdx EnterUndeclaredFunction(bool isMcount = false); // for -pg in order to add "void _mcount()" + bool ParseStmtCall(StmtNodePtr &stmt); + bool ParseStmtCallMcount(StmtNodePtr &stmt); // for -pg in order to add "void _mcount()" to all the functions + bool ParseStmtIcall(StmtNodePtr &stmt, Opcode op); + bool ParseStmtIcall(StmtNodePtr &stmt); + bool ParseStmtIcallassigned(StmtNodePtr &stmt); + bool ParseStmtIcallproto(StmtNodePtr &stmt); + bool ParseStmtIcallprotoassigned(StmtNodePtr &stmt); + bool ParseStmtIntrinsiccall(StmtNodePtr &stmt, bool isAssigned); + bool ParseStmtIntrinsiccall(StmtNodePtr &stmt); + bool ParseStmtIntrinsiccallassigned(StmtNodePtr &stmt); + bool ParseStmtIntrinsiccallwithtype(StmtNodePtr &stmt, bool isAssigned); + bool ParseStmtIntrinsiccallwithtype(StmtNodePtr &stmt); + bool ParseStmtIntrinsiccallwithtypeassigned(StmtNodePtr &stmt); + bool ParseCallReturnPair(CallReturnPair &retpair); + bool ParseCallReturns(CallReturnVector &retsvec); + bool ParseBinaryStmt(StmtNodePtr &stmt, Opcode op); + bool ParseNaryStmtAssert(StmtNodePtr &stmt, Opcode op); + bool ParseNaryStmtAssertGE(StmtNodePtr &stmt); + bool ParseNaryStmtAssertLT(StmtNodePtr &stmt); + bool ParseNaryStmtCalcassertGE(StmtNodePtr &stmt); + bool ParseNaryStmtCalcassertLT(StmtNodePtr &stmt); + bool ParseNaryStmtCallAssertLE(StmtNodePtr &stmt); + bool ParseNaryStmtReturnAssertLE(StmtNodePtr &stmt); + bool ParseNaryStmtAssignAssertLE(StmtNodePtr &stmt); + bool ParseNaryStmt(StmtNodePtr &stmt, Opcode op); + bool ParseNaryStmtReturn(StmtNodePtr &stmt); + bool ParseNaryStmtSyncEnter(StmtNodePtr &stmt); + bool ParseNaryStmtSyncExit(StmtNodePtr &stmt); + bool ParseStmtJsTry(StmtNodePtr &stmt); + bool ParseStmtTry(StmtNodePtr &stmt); + bool ParseStmtCatch(StmtNodePtr &stmt); + bool ParseUnaryStmt(Opcode op, StmtNodePtr &stmt); + bool ParseUnaryStmtThrow(StmtNodePtr &stmt); + bool ParseUnaryStmtDecRef(StmtNodePtr &stmt); + bool ParseUnaryStmtIncRef(StmtNodePtr &stmt); + bool ParseUnaryStmtDecRefReset(StmtNodePtr &stmt); + bool ParseUnaryStmtIGoto(StmtNodePtr &stmt); + bool ParseUnaryStmtEval(StmtNodePtr &stmt); + bool ParseUnaryStmtFree(StmtNodePtr &stmt); + bool ParseUnaryStmtAssertNonNullCheck(Opcode op, StmtNodePtr &stmt); + bool ParseUnaryStmtAssertNonNull(StmtNodePtr &stmt); + bool ParseUnaryStmtCallAssertNonNull(StmtNodePtr &stmt); + bool ParseUnaryStmtAssignAssertNonNull(StmtNodePtr &stmt); + bool ParseUnaryStmtReturnAssertNonNull(StmtNodePtr &stmt); + bool ParseStmtMarker(StmtNodePtr &stmt); + bool ParseStmtGosub(StmtNodePtr &stmt); + bool ParseStmtAsm(StmtNodePtr &stmt); + bool ParseStmtSafeRegion(StmtNodePtr &stmt); + + // Expression Parser + bool ParseExpression(BaseNodePtr &expr); + bool ParseExprDread(BaseNodePtr &expr); + bool ParseExprDreadoff(BaseNodePtr &expr); + bool ParseExprRegread(BaseNodePtr &expr); + bool ParseExprBinary(BaseNodePtr &expr); + bool ParseExprCompare(BaseNodePtr &expr); + bool ParseExprDepositbits(BaseNodePtr &expr); + bool ParseExprConstval(BaseNodePtr &expr); + bool ParseExprConststr(BaseNodePtr &expr); + bool ParseExprConststr16(BaseNodePtr &expr); + bool ParseExprSizeoftype(BaseNodePtr &expr); + bool ParseExprFieldsDist(BaseNodePtr &expr); + bool ParseExprIreadIaddrof(IreadNode &expr); + bool ParseExprIread(BaseNodePtr &expr); + bool ParseExprIreadoff(BaseNodePtr &expr); + bool ParseExprIreadFPoff(BaseNodePtr &expr); + bool ParseExprIaddrof(BaseNodePtr &expr); + bool ParseExprAddrof(BaseNodePtr &expr); + bool ParseExprAddrofoff(BaseNodePtr &expr); + bool ParseExprAddroffunc(BaseNodePtr &expr); + bool ParseExprAddroflabel(BaseNodePtr &expr); + bool ParseExprUnary(BaseNodePtr &expr); + bool ParseExprJarray(BaseNodePtr &expr); + bool ParseExprSTACKJarray(BaseNodePtr &expr); + bool ParseExprGCMalloc(BaseNodePtr &expr); + bool ParseExprExtractbits(BaseNodePtr &expr); + bool ParseExprTyconvert(BaseNodePtr &expr); + bool ParseExprRetype(BaseNodePtr &expr); + bool ParseExprTernary(BaseNodePtr &expr); + bool ParseExprArray(BaseNodePtr &expr); + bool ParseExprIntrinsicop(BaseNodePtr &expr); + bool ParseNaryExpr(NaryStmtNode &stmtNode); + + // funcName and paramIndex is out parameter + bool ParseCallAssertInfo(std::string &funcName, int *paramIndex, std::string &stmtFuncName); + bool ParseAssertInfo(std::string &funcName); + bool ParseTypedef(); + bool ParseJavaClassInterface(MIRSymbol &symbol, bool isClass); + bool ParseIntrinsicId(IntrinsicopNode &intrnOpNode); + void Error(const std::string &str); + void Warning(const std::string &str); + void FixForwardReferencedTypeForOneAgg(MIRType *type); + void FixupForwardReferencedTypeByMap(); + + const std::string &GetError(); + const std::string &GetWarning() const; + bool ParseFuncInfo(void); + void PrepareParsingMIR(); + void PrepareParsingMplt(); + bool ParseSrcLang(MIRSrcLang &srcLang); + bool ParseMIR(uint32 fileIdx = 0, uint32 option = 0, bool isIPA = false, bool isComb = false); + bool ParseMIR(std::ifstream &mplFile); // the main entry point + bool ParseInlineFuncBody(std::ifstream &mplFile); + bool ParseMPLT(std::ifstream &mpltFile, const std::string &importFileName); + bool ParseMPLTStandalone(std::ifstream &mpltFile, const std::string &importFileName); + bool ParseTypeFromString(const std::string &src, TyIdx &tyIdx); + void EmitError(const std::string &fileName); + void EmitWarning(const std::string &fileName); + uint32 GetOptions() const + { + return options; + } + +private: + // func ptr map for ParseMIR() + using FuncPtrParseMIRForElem = bool (MIRParser::*)(); + static std::map funcPtrMapForParseMIR; + static std::map InitFuncPtrMapForParseMIR(); + + bool TypeCompatible(TyIdx typeIdx1, TyIdx typeIdx2); + bool IsTypeIncomplete(MIRType *type); + + // func for ParseMIR + bool ParseMIRForFunc(); + bool ParseMIRForVar(); + bool ParseMIRForClass(); + bool ParseMIRForInterface(); + bool ParseMIRForFlavor(); + bool ParseMIRForSrcLang(); + bool ParseMIRForGlobalMemSize(); + bool ParseMIRForGlobalMemMap(); + bool ParseMIRForGlobalWordsTypeTagged(); + bool ParseMIRForGlobalWordsRefCounted(); + bool ParseMIRForID(); + bool ParseMIRForNumFuncs(); + bool ParseMIRForEntryFunc(); + bool ParseMIRForFileInfo(); + bool ParseMIRForFileData(); + bool ParseMIRForSrcFileInfo(); + bool ParseMIRForImport(); + bool ParseMIRForImportPath(); + bool ParseMIRForAsmdecl(); + + // func for ParseExpr + using FuncPtrParseExpr = bool (MIRParser::*)(BaseNodePtr &ptr); + static std::map funcPtrMapForParseExpr; + static std::map InitFuncPtrMapForParseExpr(); + + // func and param for ParseStmt + using FuncPtrParseStmt = bool (MIRParser::*)(StmtNodePtr &stmt); + static std::map funcPtrMapForParseStmt; + static std::map InitFuncPtrMapForParseStmt(); + + // func and param for ParseStmtBlock + using FuncPtrParseStmtBlock = bool (MIRParser::*)(); + static std::map funcPtrMapForParseStmtBlock; + static std::map InitFuncPtrMapForParseStmtBlock(); + void ParseStmtBlockForSeenComment(BlockNodePtr blk, uint32 mplNum); + bool ParseStmtBlockForVar(TokenKind stmtTK); + bool ParseStmtBlockForVar(); + bool ParseStmtBlockForTempVar(); + bool ParseStmtBlockForReg(); + bool ParseStmtBlockForType(); + bool ParseStmtBlockForFrameSize(); + bool ParseStmtBlockForUpformalSize(); + bool ParseStmtBlockForModuleID(); + bool ParseStmtBlockForFuncSize(); + bool ParseStmtBlockForFuncID(); + bool ParseStmtBlockForFormalWordsTypeTagged(); + bool ParseStmtBlockForLocalWordsTypeTagged(); + bool ParseStmtBlockForFormalWordsRefCounted(); + bool ParseStmtBlockForLocalWordsRefCounted(); + bool ParseStmtBlockForFuncInfo(); + + // common func + void SetSrcPos(SrcPosition &srcPosition, uint32 mplNum); + + // func for ParseExpr + Opcode paramOpForStmt = OP_undef; + TokenKind paramTokenKindForStmt = TK_invalid; + // func and param for ParseStmtBlock + MIRFunction *paramCurrFuncForParseStmtBlock = nullptr; + MIRLexer lexer; + MIRModule &mod; + std::string message; + std::string warningMessage; + uint32 options = kKeepFirst; + MapleVector definedLabels; // true if label at labidx is defined + MIRFunction *dummyFunction = nullptr; + MIRFunction *curFunc = nullptr; + uint16 lastFileNum = 0; // to remember first number after LOC + uint32 lastLineNum = 0; // to remember second number after LOC + uint16 lastColumnNum = 0; // to remember third number after LOC + uint32 firstLineNum = 0; // to track function starting line + std::map typeDefIdxMap; // map previous declared tyIdx + bool firstImport = true; // Mark the first imported mplt file + bool paramParseLocalType = false; // param for ParseTypedef + uint32 paramFileIdx = 0; // param for ParseMIR() + bool paramIsIPA = false; + bool paramIsComb = false; + TokenKind paramTokenKind = TK_invalid; + std::vector paramImportFileList; + std::stack safeRegionFlag; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_PARSER_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mir_pragma.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_pragma.h new file mode 100644 index 0000000000000000000000000000000000000000..364b80971f6fd1d02b85534e99079a24481341b5 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_pragma.h @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_MIR_PRAGMA_H +#define MAPLE_IR_INCLUDE_MIR_PRAGMA_H +#include "types_def.h" +#include "prim_types.h" +#include "mir_module.h" +#include "mpl_logging.h" +#include "mempool_allocator.h" + +namespace maple { +class MIRModule; // circular dependency exists, no other choice +class MIRType; // circular dependency exists, no other choice +class MIRFunction; // circular dependency exists, no other choice +class MIRSymbol; // circular dependency exists, no other choice +class MIRSymbolTable; // circular dependency exists, no other choice +class MIRTypeNameTable; // circular dependency exists, no other choice +enum PragmaKind { + kPragmaUnknown, + kPragmaClass, + kPragmaFunc, + kPragmaField, + kPragmaParam, + kPragmaPkg, + kPragmaVar, + kPragmaGlbvar, + kPragmaFuncExecptioni, + kPragmaFuncVar +}; + +enum PragmaVisibility { kVisBuild, kVisRuntime, kVisSystem, kVisMaple }; + +enum PragmaValueType { + kValueByte = 0x00, // (none; must be 0) ubyte[1] + kValueShort = 0x02, // size - 1 (0…1) ubyte[size] + kValueChar = 0x03, // size - 1 (0…1) ubyte[size] + kValueInt = 0x04, // size - 1 (0…3) ubyte[size] + kValueLong = 0x06, // size - 1 (0…7) ubyte[size] + kValueFloat = 0x10, // size - 1 (0…3) ubyte[size] + kValueDouble = 0x11, // size - 1 (0…7) ubyte[size] + kValueMethodType = 0x15, // size - 1 (0…3) ubyte[size] + kValueMethodHandle = 0x16, // size - 1 (0…3) ubyte[size] + kValueString = 0x17, // size - 1 (0…3) ubyte[size] + kValueType = 0x18, // size - 1 (0…3) ubyte[size] + kValueField = 0x19, // size - 1 (0…3) ubyte[size] + kValueMethod = 0x1a, // size - 1 (0…3) ubyte[size] + kValueEnum = 0x1b, // size - 1 (0…3) ubyte[size] + kValueArray = 0x1c, // (none; must be 0) encoded_array + kValueAnnotation = 0x1d, // (none; must be 0) encoded_annotation + kValueNull = 0x1e, // (none; must be 0) (none) + kValueBoolean = 0x1f // boolean (0…1) (none) +}; + +class MIRPragmaElement { +public: + explicit MIRPragmaElement(MIRModule &m) : MIRPragmaElement(m.GetPragmaMPAllocator()) + { + val.d = 0; + } + + explicit MIRPragmaElement(MapleAllocator &subElemAllocator) : subElemVec(subElemAllocator.Adapter()) + { + subElemVec.clear(); + val.d = 0; + } + + ~MIRPragmaElement() = default; + void Dump(int indent) const; + void SubElemVecPushBack(MIRPragmaElement *elem) + { + subElemVec.push_back(elem); + } + + const MapleVector &GetSubElemVec() const + { + return subElemVec; + } + + const MIRPragmaElement *GetSubElement(uint64 i) const + { + return subElemVec[i]; + } + + MapleVector &GetSubElemVec() + { + return subElemVec; + } + + const GStrIdx GetNameStrIdx() const + { + return nameStrIdx; + } + + const GStrIdx GetTypeStrIdx() const + { + return typeStrIdx; + } + + PragmaValueType GetType() const + { + return valueType; + } + + int32 GetI32Val() const + { + return val.i; + } + + int64 GetI64Val() const + { + return val.j; + } + + uint64 GetU64Val() const + { + return val.u; + } + + float GetFloatVal() const + { + return val.f; + } + + double GetDoubleVal() const + { + return val.d; + } + + void SetTypeStrIdx(GStrIdx strIdx) + { + typeStrIdx = strIdx; + } + + void SetNameStrIdx(GStrIdx strIdx) + { + nameStrIdx = strIdx; + } + + void SetType(PragmaValueType type) + { + valueType = type; + } + + void SetI32Val(int32 val) + { + this->val.i = val; + } + + void SetI64Val(int64 val) + { + this->val.j = val; + } + + void SetU64Val(uint64 val) + { + this->val.u = val; + } + + void SetFloatVal(float val) + { + this->val.f = val; + } + + void SetDoubleVal(double val) + { + this->val.d = val; + } + +private: + GStrIdx nameStrIdx {0}; + GStrIdx typeStrIdx {0}; + PragmaValueType valueType = kValueNull; + union { + int32 i; + int64 j; + uint64 u; + float f; + double d; + } val; + MapleVector subElemVec; +}; + +class MIRPragma { +public: + explicit MIRPragma(MIRModule &m) : MIRPragma(m, m.GetPragmaMPAllocator()) {} + + MIRPragma(MIRModule &m, MapleAllocator &elemAllocator) : mod(&m), elementVec(elemAllocator.Adapter()) {} + + ~MIRPragma() = default; + MIRPragmaElement *GetPragmaElemFromSignature(const std::string &signature); + void Dump(int indent) const; + void PushElementVector(MIRPragmaElement *elem) + { + elementVec.push_back(elem); + } + + void ClearElementVector() + { + elementVec.clear(); + } + + PragmaKind GetKind() const + { + return pragmaKind; + } + + uint8 GetVisibility() const + { + return visibility; + } + + const GStrIdx GetStrIdx() const + { + return strIdx; + } + + const TyIdx GetTyIdx() const + { + return tyIdx; + } + + const TyIdx GetTyIdxEx() const + { + return tyIdxEx; + } + + int32 GetParamNum() const + { + return paramNum; + } + + const MapleVector &GetElementVector() const + { + return elementVec; + } + + const MIRPragmaElement *GetNthElement(uint32 i) const + { + return elementVec[i]; + } + + void ElementVecPushBack(MIRPragmaElement *elem) + { + elementVec.push_back(elem); + } + + void SetKind(PragmaKind kind) + { + pragmaKind = kind; + } + + void SetVisibility(uint8 visValue) + { + visibility = visValue; + } + + void SetStrIdx(GStrIdx idx) + { + strIdx = idx; + } + + void SetTyIdx(TyIdx idx) + { + tyIdx = idx; + } + + void SetTyIdxEx(TyIdx idx) + { + tyIdxEx = idx; + } + + void SetParamNum(int32 num) + { + paramNum = num; + } + +private: + MIRModule *mod; + PragmaKind pragmaKind = kPragmaUnknown; + uint8 visibility = 0; + GStrIdx strIdx {0}; + TyIdx tyIdx {0}; + TyIdx tyIdxEx {0}; + int32 paramNum = -1; // paramNum th param in function, -1 not for param annotation + MapleVector elementVec; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_PRAGMA_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mir_preg.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_preg.h new file mode 100644 index 0000000000000000000000000000000000000000..b24c4bdb5e1bd2ced1b60274a164a2323a765c93 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_preg.h @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_MIR_PREG_H +#define MAPLE_IR_INCLUDE_MIR_PREG_H +#if MIR_FEATURE_FULL +#include +#include "mir_module.h" +#include "global_tables.h" +#endif // MIR_FEATURE_FULL + +namespace maple { +extern void PrintIndentation(int32 indent); + +// these special registers are encoded by negating the enumeration +enum SpecialReg : signed int { + kSregSp = 1, + kSregFp = 2, + kSregGp = 3, + kSregThrownval = 4, + kSregMethodhdl = 5, + kSregRetval0 = 6, + kSregRetval1 = 7, + kSregLast = 8, +}; +#if MIR_FEATURE_FULL +class MIRPreg { +public: + explicit MIRPreg(uint32 n = 0) : MIRPreg(n, kPtyInvalid, nullptr) {} + + MIRPreg(uint32 n, PrimType ptyp) : primType(ptyp), pregNo(n) {} + + MIRPreg(uint32 n, PrimType ptyp, MIRType *mType) : primType(ptyp), pregNo(n), mirType(mType) {} + + ~MIRPreg() = default; + void SetNeedRC(bool needRC = true) + { + this->needRC = needRC; + } + + bool NeedRC() const + { + return needRC; + } + + bool IsRef() const + { + return mirType != nullptr && primType == PTY_ref; + } + + PrimType GetPrimType() const + { + return primType; + } + + void SetPrimType(PrimType pty) + { + primType = pty; + } + + Opcode GetOp() const + { + return op; + } + + void SetOp(Opcode o) + { + this->op = o; + } + + int32 GetPregNo() const + { + return pregNo; + } + + void SetPregNo(int32 pregNo) + { + this->pregNo = pregNo; + } + + MIRType *GetMIRType() const + { + return mirType; + } + + void SetMIRType(MIRType *mirType) + { + this->mirType = mirType; + } + +private: + PrimType primType = kPtyInvalid; + bool needRC = false; + Opcode op = OP_undef; // OP_constval, OP_addrof or OP_dread if rematerializable + int32 pregNo; // the number in maple IR after the % + MIRType *mirType = nullptr; + +public: + union RematInfo { + const MIRConst *mirConst; // used only when op is OP_constval + const MIRSymbol *sym; // used only when op is OP_addrof or OP_dread + } rematInfo; + FieldID fieldID = 0; // used only when op is OP_addrof or OP_dread + bool addrUpper = false; // used only when op is OP_addrof to indicate upper bits of address +}; + +class MIRPregTable { +public: + explicit MIRPregTable(MapleAllocator *allocator) + : pregNoToPregIdxMap(allocator->Adapter()), pregTable(allocator->Adapter()), mAllocator(allocator) + { + pregTable.push_back(nullptr); + specPregTable[0].SetPregNo(0); + specPregTable[kSregSp].SetPregNo(-kSregSp); + specPregTable[kSregFp].SetPregNo(-kSregFp); + specPregTable[kSregGp].SetPregNo(-kSregGp); + specPregTable[kSregThrownval].SetPregNo(-kSregThrownval); + specPregTable[kSregMethodhdl].SetPregNo(-kSregMethodhdl); + specPregTable[kSregRetval0].SetPregNo(-kSregRetval0); + specPregTable[kSregRetval1].SetPregNo(-kSregRetval1); + for (uint32 i = 0; i < kSregLast; ++i) { + specPregTable[i].SetPrimType(PTY_unknown); + } + } + + ~MIRPregTable(); + + PregIdx CreatePreg(PrimType primType, MIRType *mtype = nullptr) + { + DEBUG_ASSERT(!mtype || mtype->GetPrimType() == PTY_ref || mtype->GetPrimType() == PTY_ptr, "ref or ptr type"); + uint32 index = ++maxPregNo; + auto *preg = mAllocator->GetMemPool()->New(index, primType, mtype); + return AddPreg(*preg); + } + + PregIdx ClonePreg(const MIRPreg &rfpreg) + { + PregIdx idx = CreatePreg(rfpreg.GetPrimType(), rfpreg.GetMIRType()); + MIRPreg *preg = pregTable[static_cast(idx)]; + preg->SetNeedRC(rfpreg.NeedRC()); + return idx; + } + + MIRPreg *PregFromPregIdx(PregIdx pregidx) + { + if (pregidx < 0) { // special register + return &specPregTable[-pregidx]; + } else { + return pregTable.at(static_cast(pregidx)); + } + } + + PregIdx GetPregIdxFromPregno(uint32 pregNo) + { + auto it = pregNoToPregIdxMap.find(pregNo); + return (it == pregNoToPregIdxMap.end()) ? PregIdx(0) : it->second; + } + + void DumpPregsWithTypes(int32 indent) + { + MapleVector &pregtable = pregTable; + for (uint32 i = 1; i < pregtable.size(); i++) { + MIRPreg *mirpreg = pregtable[i]; + if (mirpreg->GetMIRType() == nullptr) { + continue; + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "reg "; + LogInfo::MapleLogger() << "%" << mirpreg->GetPregNo(); + LogInfo::MapleLogger() << " "; + mirpreg->GetMIRType()->Dump(0); + LogInfo::MapleLogger() << " " << (mirpreg->NeedRC() ? 1 : 0); + LogInfo::MapleLogger() << "\n"; + } + } + + size_t Size() const + { + return pregTable.size(); + } + + PregIdx AddPreg(MIRPreg &preg) + { + PregIdx idx = static_cast(pregTable.size()); + pregTable.push_back(&preg); + DEBUG_ASSERT(pregNoToPregIdxMap.find(preg.GetPregNo()) == pregNoToPregIdxMap.end(), + "The same pregno is already taken"); + pregNoToPregIdxMap[preg.GetPregNo()] = idx; + return idx; + } + + PregIdx EnterPregNo(uint32 pregNo, PrimType ptyp, MIRType *ty = nullptr) + { + PregIdx idx = GetPregIdxFromPregno(pregNo); + if (idx == 0) { + if (pregNo > maxPregNo) { + maxPregNo = pregNo; + } + MIRPreg *preg = mAllocator->GetMemPool()->New(pregNo, ptyp, ty); + return AddPreg(*preg); + } + return idx; + } + + MapleVector &GetPregTable() + { + return pregTable; + } + + const MapleVector &GetPregTable() const + { + return pregTable; + } + + const MIRPreg *GetPregTableItem(const uint32 index) const + { + CHECK_FATAL(index < pregTable.size(), "array index out of range"); + return pregTable[index]; + } + + void SetPregNoToPregIdxMapItem(uint32 key, PregIdx value) + { + pregNoToPregIdxMap[key] = value; + } + + uint32 GetMaxPregNo() const + { + return maxPregNo; + } + + void SetMaxPregNo(uint32 index) + { + maxPregNo = index; + } + + size_t SpecPregSize() + { + return kSregLast; + } + +private: + uint32 maxPregNo = 0; // the max pregNo that has been allocated + MapleMap pregNoToPregIdxMap; // for quick lookup based on pregno + MapleVector pregTable; + MIRPreg specPregTable[kSregLast]; // for the MIRPreg nodes corresponding to special registers + MapleAllocator *mAllocator; +}; + +#endif // MIR_FEATURE_FULL +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_PREG_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mir_scope.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_scope.h new file mode 100644 index 0000000000000000000000000000000000000000..300bc12ff0f5e041b61371a9777f3044a6ee9ef2 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_scope.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_MIR_SCOPE_H +#define MAPLE_IR_INCLUDE_MIR_SCOPE_H +#include "mir_module.h" +#include "mir_type.h" +#include "src_position.h" + +namespace maple { +// mapping src variable to mpl variables to display debug info +struct MIRAliasVars { + GStrIdx mplStrIdx; // maple varialbe name + TyIdx tyIdx; + bool isLocal; + GStrIdx sigStrIdx; +}; + +class MIRScope { +public: + explicit MIRScope(MIRModule *mod) : module(mod) {} + MIRScope(MIRModule *mod, unsigned l) : module(mod), level(l) {} + ~MIRScope() = default; + + bool NeedEmitAliasInfo() const + { + return aliasVarMap.size() != 0 || subScopes.size() != 0; + } + + bool IsSubScope(const MIRScope *scp) const; + bool HasJoinScope(const MIRScope *scp1, const MIRScope *scp2) const; + bool HasSameRange(const MIRScope *s1, const MIRScope *s2) const; + + unsigned GetLevel() const + { + return level; + } + + const SrcPosition &GetRangeLow() const + { + return range.first; + } + + const SrcPosition &GetRangeHigh() const + { + return range.second; + } + + void SetRange(SrcPosition low, SrcPosition high) + { + DEBUG_ASSERT(low.IsBfOrEq(high), "wrong order of low and high"); + range.first = low; + range.second = high; + } + + void SetAliasVarMap(GStrIdx idx, const MIRAliasVars &vars) + { + DEBUG_ASSERT(aliasVarMap.find(idx) == aliasVarMap.end(), "alias already exist"); + aliasVarMap[idx] = vars; + } + + void AddAliasVarMap(GStrIdx idx, const MIRAliasVars &vars) + { + /* allow same idx, save last aliasVars */ + aliasVarMap[idx] = vars; + } + + MapleMap &GetAliasVarMap() + { + return aliasVarMap; + } + + MapleVector &GetSubScopes() + { + return subScopes; + } + + void IncLevel(); + bool AddScope(MIRScope *scope); + void Dump(int32 indent) const; + void Dump() const; + +private: + MIRModule *module; + unsigned level = 0; + std::pair range; + // source to maple variable alias + MapleMap aliasVarMap {module->GetMPAllocator().Adapter()}; + MapleVector subScopes {module->GetMPAllocator().Adapter()}; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_SCOPE_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mir_symbol.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_symbol.h new file mode 100644 index 0000000000000000000000000000000000000000..0ab6daceb483bee6eedd223bc7a5261f601abee7 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_symbol.h @@ -0,0 +1,843 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_MIR_SYMBOL_H +#define MAPLE_IR_INCLUDE_MIR_SYMBOL_H +#include +#include "mir_const.h" +#include "mir_preg.h" +#include "src_position.h" + +constexpr int kScopeLocal = 2; // the default scope level for function variables +constexpr int kScopeGlobal = 1; // the scope level for global variables + +namespace maple { +enum MIRSymKind { kStInvalid, kStVar, kStFunc, kStConst, kStJavaClass, kStJavaInterface, kStPreg }; + +enum MIRStorageClass : uint8 { + kScInvalid, + kScAuto, + kScAliased, + kScFormal, + kScExtern, + kScGlobal, + kScPstatic, // PU-static + kScFstatic, // file-static + kScText, + kScTypeInfo, // used for eh type st + kScTypeInfoName, // used for eh type st name + kScTypeCxxAbi, // used for eh inherited from c++ __cxxabiv1 + kScEHRegionSupp, // used for tables that control C++ exception handling + kScUnused +}; + +// to represent a single symbol +class MIRSymbol { +public: + union SymbolType { // a symbol can either be a const or a function or a preg which currently used for formal + MIRConst *konst; + MIRFunction *mirFunc; + MIRPreg *preg; // the MIRSymKind must be kStPreg + }; + + MIRSymbol() = default; + MIRSymbol(uint32 idx, uint8 scp) : stIdx(scp, idx) {} + ~MIRSymbol() = default; + + void SetIsTmp(bool temp) + { + isTmp = temp; + } + + bool GetIsTmp() const + { + return isTmp; + } + + void SetNeedForwDecl() + { + needForwDecl = true; + } + + bool IsNeedForwDecl() const + { + return needForwDecl; + } + + void SetInstrumented() + { + instrumented = true; + } + + bool IsInstrumented() const + { + return instrumented; + } + + void SetIsImported(bool imported) + { + isImported = imported; + } + + bool GetIsImported() const + { + return isImported; + } + + void SetWPOFakeParm() + { + wpoFakeParam = true; + } + + bool IsWpoFakeParm() const + { + return wpoFakeParam; + } + + bool IsWpoFakeRet() const + { + return wpoFakeRet; + } + + void SetWPOFakeRet() + { + wpoFakeRet = true; + } + + void SetIsTmpUnused(bool unused) + { + isTmpUnused = unused; + } + + void SetIsImportedDecl(bool imported) + { + isImportedDecl = imported; + } + + bool GetIsImportedDecl() const + { + return isImportedDecl; + } + + bool IsTmpUnused() const + { + return isTmpUnused; + } + + void SetAppearsInCode(bool appears) + { + appearsInCode = appears; + } + + bool GetAppearsInCode() const + { + return appearsInCode; + } + + void SetTyIdx(TyIdx tyIdx) + { + this->tyIdx = tyIdx; + } + + TyIdx GetTyIdx() const + { + return tyIdx; + } + + void SetInferredTyIdx(TyIdx inferredTyIdx) + { + this->inferredTyIdx = inferredTyIdx; + } + + TyIdx GetInferredTyIdx() const + { + return inferredTyIdx; + } + + void SetStIdx(StIdx stIdx) + { + this->stIdx = stIdx; + } + + StIdx GetStIdx() const + { + return stIdx; + } + + void SetSKind(MIRSymKind m) + { + sKind = m; + } + + MIRSymKind GetSKind() const + { + return sKind; + } + + uint32 GetScopeIdx() const + { + return stIdx.Scope(); + } + + uint32 GetStIndex() const + { + return stIdx.Idx(); + } + + bool IsLocal() const + { + return stIdx.Islocal(); + } + + bool IsGlobal() const + { + return stIdx.IsGlobal(); + } + + const TypeAttrs &GetAttrs() const + { + return typeAttrs; + } + + TypeAttrs &GetAttrs() + { + return typeAttrs; + } + + void SetAttrs(TypeAttrs attr) + { + typeAttrs = attr; + } + + // AddAttrs adds more attributes instead of overrides the current one + void AddAttrs(TypeAttrs attr) + { + typeAttrs.SetAttrFlag(typeAttrs.GetAttrFlag() | attr.GetAttrFlag()); + typeAttrs.AddAttrBoundary(attr.GetAttrBoundary()); + } + + bool GetAttr(AttrKind attrKind) const + { + return typeAttrs.GetAttr(attrKind); + } + + void SetAttr(AttrKind attrKind) + { + typeAttrs.SetAttr(attrKind); + } + + void ResetAttr(AttrKind attrKind) + { + typeAttrs.ResetAttr(attrKind); + } + + bool IsVolatile() const + { + return typeAttrs.GetAttr(ATTR_volatile); + } + + bool IsTypeVolatile(int fieldID) const; + + bool NeedPIC() const; + + bool IsThreadLocal() const + { + return typeAttrs.GetAttr(ATTR_tls_static) || typeAttrs.GetAttr(ATTR_tls_dynamic); + } + + bool IsStatic() const + { + return typeAttrs.GetAttr(ATTR_static); + } + + bool IsPUStatic() const + { + return GetStorageClass() == kScPstatic; + } + + bool IsFinal() const + { + return ((typeAttrs.GetAttr(ATTR_final) || typeAttrs.GetAttr(ATTR_readonly)) && + staticFinalBlackList.find(GetName()) == staticFinalBlackList.end()) || + IsLiteral() || IsLiteralPtr(); + } + + bool IsWeak() const + { + return typeAttrs.GetAttr(ATTR_weak); + } + + bool IsPrivate() const + { + return typeAttrs.GetAttr(ATTR_private); + } + + bool IsRefType() const + { + return typeAttrs.GetAttr(ATTR_localrefvar); + } + + void SetNameStrIdx(GStrIdx strIdx) + { + nameStrIdx = strIdx; + } + + void SetNameStrIdx(const std::string &name); + + GStrIdx GetNameStrIdx() const + { + return nameStrIdx; + } + + MIRStorageClass GetStorageClass() const + { + return storageClass; + } + + void SetStorageClass(MIRStorageClass cl) + { + storageClass = cl; + } + + bool IsReadOnly() const + { + return kScFstatic == storageClass && kStConst == sKind; + } + + bool IsConst() const + { + return sKind == kStConst || (sKind == kStVar && value.konst != nullptr); + } + + MIRType *GetType() const; + + const std::string &GetName() const + { + return GlobalTables::GetStrTable().GetStringFromStrIdx(nameStrIdx); + } + + MIRConst *GetKonst() const + { + DEBUG_ASSERT((sKind == kStConst || sKind == kStVar), "must be const symbol"); + return value.konst; + } + + void SetKonst(MIRConst *mirconst) + { + DEBUG_ASSERT((sKind == kStConst || sKind == kStVar), "must be const symbol"); + value.konst = mirconst; + } + + void SetIsDeleted() + { + isDeleted = true; + } + + void ResetIsDeleted() + { + isDeleted = false; + } + + bool IsDeleted() const + { + return isDeleted; + } + + bool IsVar() const + { + return sKind == kStVar; + } + + bool IsPreg() const + { + return sKind == kStPreg; + } + + bool IsJavaClassInterface() const + { + return sKind == kStJavaClass || sKind == kStJavaInterface; + } + + SymbolType GetValue() const + { + return value; + } + + void SetValue(SymbolType value) + { + this->value = value; + } + + SrcPosition &GetSrcPosition() + { + return srcPosition; + } + + const SrcPosition &GetSrcPosition() const + { + return srcPosition; + } + + void SetSrcPosition(const SrcPosition &position) + { + srcPosition = position; + } + + MIRPreg *GetPreg() + { + DEBUG_ASSERT(IsPreg(), "must be Preg"); + return value.preg; + } + + const MIRPreg *GetPreg() const + { + CHECK_FATAL(IsPreg(), "must be Preg"); + return value.preg; + } + + void SetPreg(MIRPreg *preg) + { + CHECK_FATAL(IsPreg(), "must be Preg"); + value.preg = preg; + } + + bool CanBeIgnored() const + { + return isDeleted; + } + + void SetLocalRefVar() + { + SetAttr(ATTR_localrefvar); + } + + void ResetLocalRefVar() + { + ResetAttr(ATTR_localrefvar); + } + + MIRFunction *GetFunction() const + { + DEBUG_ASSERT(sKind == kStFunc, "must be function symbol"); + return value.mirFunc; + } + + void SetFunction(MIRFunction *func) + { + DEBUG_ASSERT(sKind == kStFunc, "must be function symbol"); + value.mirFunc = func; + } + + bool IsEhIndex() const + { + return GetName() == "__eh_index__"; + } + + bool HasAddrOfValues() const; + bool IsLiteral() const; + bool IsLiteralPtr() const; + bool PointsToConstString() const; + bool IsConstString() const; + bool IsClassInitBridge() const; + bool IsReflectionStrTab() const; + bool IsReflectionHashTabBucket() const; + bool IsReflectionInfo() const; + bool IsReflectionFieldsInfo() const; + bool IsReflectionFieldsInfoCompact() const; + bool IsReflectionSuperclassInfo() const; + bool IsReflectionFieldOffsetData() const; + bool IsReflectionMethodAddrData() const; + bool IsReflectionMethodSignature() const; + bool IsReflectionClassInfo() const; + bool IsReflectionArrayClassInfo() const; + bool IsReflectionClassInfoPtr() const; + bool IsReflectionClassInfoRO() const; + bool IsITabConflictInfo() const; + bool IsVTabInfo() const; + bool IsITabInfo() const; + bool IsReflectionPrimitiveClassInfo() const; + bool IsReflectionMethodsInfo() const; + bool IsReflectionMethodsInfoCompact() const; + bool IsRegJNITab() const; + bool IsRegJNIFuncTab() const; + bool IsMuidTab() const; + bool IsMuidRoTab() const; + bool IsCodeLayoutInfo() const; + std::string GetMuidTabName() const; + bool IsMuidFuncDefTab() const; + bool IsMuidFuncDefOrigTab() const; + bool IsMuidFuncInfTab() const; + bool IsMuidFuncUndefTab() const; + bool IsMuidDataDefTab() const; + bool IsMuidDataDefOrigTab() const; + bool IsMuidDataUndefTab() const; + bool IsMuidFuncDefMuidTab() const; + bool IsMuidFuncUndefMuidTab() const; + bool IsMuidDataDefMuidTab() const; + bool IsMuidDataUndefMuidTab() const; + bool IsMuidFuncMuidIdxMuidTab() const; + bool IsMuidRangeTab() const; + bool IsArrayClassCache() const; + bool IsArrayClassCacheName() const; + bool IsForcedGlobalFunc() const; + bool IsForcedGlobalClassinfo() const; + bool IsGctibSym() const; + bool IsPrimordialObject() const; + bool IgnoreRC() const; + void Dump(bool isLocal, int32 indent, bool suppressInit = false, const MIRSymbolTable *localsymtab = nullptr) const; + void DumpAsLiteralVar() const; + bool operator==(const MIRSymbol &msym) const + { + return nameStrIdx == msym.nameStrIdx; + } + + bool operator!=(const MIRSymbol &msym) const + { + return nameStrIdx != msym.nameStrIdx; + } + + bool operator<(const MIRSymbol &msym) const + { + return nameStrIdx < msym.nameStrIdx; + } + + static uint32 &LastPrintedLineNumRef() + { + return lastPrintedLineNum; + } + + static uint16 &LastPrintedColumnNumRef() + { + return lastPrintedColumnNum; + } + + bool HasPotentialAssignment() const + { + return hasPotentialAssignment; + } + + void SetHasPotentialAssignment() + { + hasPotentialAssignment = true; + } + + void SetAsmAttr(const UStrIdx &idx) + { + asmAttr = idx; + } + + const UStrIdx &GetAsmAttr() const + { + return asmAttr; + } + + void SetWeakrefAttr(const std::pair &idx) + { + weakrefAttr = idx; + } + + const std::pair &GetWeakrefAttr() const + { + return weakrefAttr; + } + + bool IsFormal() const + { + return storageClass == kScFormal; + } + + bool LMBCAllocateOffSpecialReg() const + { + if (isDeleted) { + return false; + } + switch (storageClass) { + case kScAuto: + return true; + case kScPstatic: + case kScFstatic: + return value.konst == nullptr && !hasPotentialAssignment; + default: + return false; + } + } + + // Please keep order of the fields, avoid paddings. +private: + TyIdx tyIdx {0}; + TyIdx inferredTyIdx {kInitTyIdx}; + MIRStorageClass storageClass {kScInvalid}; + MIRSymKind sKind {kStInvalid}; + bool isTmp = false; + bool needForwDecl = false; // addrof of this symbol used in initialization, NOT serialized + bool wpoFakeParam = false; // fake symbol introduced in wpo phase for a parameter, NOT serialized + bool wpoFakeRet = false; // fake symbol introduced in wpo phase for return value, NOT serialized + bool isDeleted = false; // tell if it is deleted, NOT serialized + bool instrumented = false; // a local ref pointer instrumented by RC opt, NOT serialized + bool isImported = false; + bool isImportedDecl = false; + bool isTmpUnused = false; // when parse the mplt_inline file, mark all the new symbol as tmpunused + bool appearsInCode = false; // only used for kStFunc + bool hasPotentialAssignment = false; // for global static vars, init as false and will be set true + // if assigned by stmt or the address of itself is taken + StIdx stIdx {0, 0}; + TypeAttrs typeAttrs; + GStrIdx nameStrIdx {0}; + std::pair weakrefAttr {false, 0}; + +public: + UStrIdx asmAttr {0}; // if not 0, the string for the name in C's asm attribute + UStrIdx sectionAttr {0}; // if not 0, the string for the name in C's section attribute +private: + SymbolType value = {nullptr}; + SrcPosition srcPosition; // where the symbol is defined + // following cannot be assumed final even though they are declared final + static const std::set staticFinalBlackList; + static GStrIdx reflectClassNameIdx; + static GStrIdx reflectMethodNameIdx; + static GStrIdx reflectFieldNameIdx; + static uint32 lastPrintedLineNum; // used during printing ascii output + static uint16 lastPrintedColumnNum; +}; + +class MIRSymbolTable { +public: + explicit MIRSymbolTable(const MapleAllocator &allocator) + : mAllocator(allocator), strIdxToStIdxMap(mAllocator.Adapter()), symbolTable({nullptr}, mAllocator.Adapter()) + { + } + + ~MIRSymbolTable() = default; + + bool IsValidIdx(uint32 idx) const + { + return idx < symbolTable.size(); + } + + MIRSymbol *GetSymbolFromStIdx(uint32 idx, bool checkFirst = false) const + { + if (checkFirst && idx >= symbolTable.size()) { + return nullptr; + } + CHECK_FATAL(IsValidIdx(idx), "symbol table index out of range"); + return symbolTable[idx]; + } + + MIRSymbol *CreateSymbol(uint8 scopeID) + { + auto *st = mAllocator.GetMemPool()->New(symbolTable.size(), scopeID); + symbolTable.push_back(st); + return st; + } + + void PushNullSymbol() + { + symbolTable.push_back(nullptr); + } + + // add sym from other symbol table, happens in inline + bool AddStOutside(MIRSymbol *sym) + { + if (sym == nullptr) { + return false; + } + sym->SetStIdx(StIdx(sym->GetScopeIdx(), symbolTable.size())); + symbolTable.push_back(sym); + return AddToStringSymbolMap(*sym); + } + + bool AddToStringSymbolMap(const MIRSymbol &st) + { + GStrIdx strIdx = st.GetNameStrIdx(); + if (strIdxToStIdxMap[strIdx].FullIdx() != 0) { + return false; + } + strIdxToStIdxMap[strIdx] = st.GetStIdx(); + return true; + } + + StIdx GetStIdxFromStrIdx(GStrIdx idx) const + { + auto it = strIdxToStIdxMap.find(idx); + return (it == strIdxToStIdxMap.end()) ? StIdx() : it->second; + } + + MIRSymbol *GetSymbolFromStrIdx(GStrIdx idx, bool checkFirst = false) + { + return GetSymbolFromStIdx(GetStIdxFromStrIdx(idx).Idx(), checkFirst); + } + + void Dump(bool isLocal, int32 indent = 0, bool printDeleted = false, MIRFlavor flavor = kFlavorUnknown) const; + + size_t GetSymbolTableSize() const + { + return symbolTable.size(); + } + + MapleVector &GetTable() + { + return symbolTable; + } + + const MapleVector &GetTable() const + { + return symbolTable; + } + + const MIRSymbol *GetSymbolAt(uint32 idx) const + { + DEBUG_ASSERT(idx < symbolTable.size(), "symbol id out of table range"); + return symbolTable[idx]; + } + + MIRSymbol *GetSymbolAt(uint32 idx) + { + return const_cast(const_cast(this)->GetSymbolAt(idx)); + } + + void Clear() + { + symbolTable.clear(); + strIdxToStIdxMap.clear(); + } + + MIRSymbol *CloneLocalSymbol(const MIRSymbol &oldSym) const + { + auto *memPool = mAllocator.GetMemPool(); + auto *newSym = memPool->New(oldSym); + if (oldSym.GetSKind() == kStConst) { + newSym->SetKonst(oldSym.GetKonst()->Clone(*memPool)); + } else if (oldSym.GetSKind() == kStPreg) { + newSym->SetPreg(memPool->New(*oldSym.GetPreg())); + } else if (oldSym.GetSKind() == kStFunc) { + CHECK_FATAL(false, "%s has unexpected local func symbol", oldSym.GetName().c_str()); + } + return newSym; + } + +private: + MapleAllocator mAllocator; + // hash table mapping string index to st index + MapleMap strIdxToStIdxMap; + // map symbol idx to symbol node + MapleVector symbolTable; +}; + +class MIRLabelTable { +public: + explicit MIRLabelTable(MapleAllocator &allocator) + : addrTakenLabels(allocator.Adapter()), + caseLabelSet(allocator.Adapter()), + mAllocator(allocator), + strIdxToLabIdxMap(std::less(), mAllocator.Adapter()), + labelTable(mAllocator.Adapter()) + { + labelTable.push_back(GStrIdx(kDummyLabel)); // push dummy label index 0 + } + + ~MIRLabelTable() = default; + + LabelIdx CreateLabel() + { + LabelIdx labelIdx = labelTable.size(); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(std::to_string(labelIdx)); + labelTable.push_back(strIdx); + return labelIdx; + } + + LabelIdx CreateLabelWithPrefix(char c); + + LabelIdx AddLabel(GStrIdx nameIdx) + { + LabelIdx labelIdx = labelTable.size(); + labelTable.push_back(nameIdx); + strIdxToLabIdxMap[nameIdx] = labelIdx; + return labelIdx; + } + + LabelIdx GetLabelIdxFromStrIdx(GStrIdx idx) const + { + auto it = strIdxToLabIdxMap.find(idx); + if (it == strIdxToLabIdxMap.end()) { + return LabelIdx(); + } + return it->second; + } + + void AddToStringLabelMap(LabelIdx labelIdx); + size_t GetLabelTableSize() const + { + return labelTable.size(); + } + + const std::string &GetName(LabelIdx labelIdx) const; + + size_t Size() const + { + return labelTable.size(); + } + + static uint32 GetDummyLabel() + { + return kDummyLabel; + } + + GStrIdx GetSymbolFromStIdx(LabelIdx idx) const + { + CHECK_FATAL(idx < labelTable.size(), "label table index out of range"); + return labelTable[idx]; + } + + void SetSymbolFromStIdx(LabelIdx idx, GStrIdx strIdx) + { + CHECK_FATAL(idx < labelTable.size(), "label table index out of range"); + labelTable[idx] = strIdx; + } + + MapleVector GetLabelTable() + { + return labelTable; + } + + const MapleUnorderedSet &GetAddrTakenLabels() const + { + return addrTakenLabels; + } + + MapleUnorderedSet &GetAddrTakenLabels() + { + return addrTakenLabels; + } + + const MapleMap &GetStrIdxToLabelIdxMap() const + { + return strIdxToLabIdxMap; + } + void EraseStrIdxToLabelIdxElem(GStrIdx idx) + { + strIdxToLabIdxMap.erase(idx); + } + + MapleUnorderedSet addrTakenLabels; // those appeared in addroflabel or MIRLblConst + MapleUnorderedSet caseLabelSet; // labels marking starts of switch cases + +private: + static constexpr uint32 kDummyLabel = 0; + MapleAllocator mAllocator; + MapleMap strIdxToLabIdxMap; + MapleVector labelTable; // map label idx to label name +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_SYMBOL_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mir_symbol_builder.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_symbol_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..b3f71fa9cf14f272b4051902f0fda8e9d49435ac --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_symbol_builder.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLEIR_INCLUDE_MIRSYMBOLBUILDER_H +#define MAPLEIR_INCLUDE_MIRSYMBOLBUILDER_H +#include +#include +#include +#include +#include "opcodes.h" +#include "prim_types.h" +#include "mir_type.h" +#include "mir_const.h" +#include "mir_symbol.h" +#include "mir_nodes.h" +#include "mir_module.h" +#include "mir_preg.h" +#include "mir_function.h" +#include "printing.h" +#include "intrinsic_op.h" +#include "opcode_info.h" +#include "global_tables.h" + +namespace maple { +class MIRSymbolBuilder { +public: + static MIRSymbolBuilder &Instance() + { + static MIRSymbolBuilder builder; + return builder; + } + + MIRSymbol *GetLocalDecl(const MIRSymbolTable &symbolTable, const GStrIdx &strIdx) const; + MIRSymbol *CreateLocalDecl(MIRSymbolTable &symbolTable, GStrIdx strIdx, const MIRType &type) const; + MIRSymbol *GetGlobalDecl(GStrIdx strIdx) const; + MIRSymbol *CreateGlobalDecl(GStrIdx strIdx, const MIRType &type, MIRStorageClass sc) const; + MIRSymbol *GetSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + bool sameType = false) const; + MIRSymbol *CreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, MIRFunction *func, + uint8 scpID) const; + MIRSymbol *CreatePregFormalSymbol(TyIdx tyIdx, PregIdx pRegIdx, MIRFunction &func) const; + size_t GetSymbolTableSize(const MIRFunction *func = nullptr) const; + const MIRSymbol *GetSymbolFromStIdx(uint32 idx, const MIRFunction *func = nullptr) const; + +private: + MIRSymbolBuilder() = default; + ~MIRSymbolBuilder() = default; + MIRSymbolBuilder(const MIRSymbolBuilder &) = delete; + MIRSymbolBuilder(const MIRSymbolBuilder &&) = delete; + MIRSymbolBuilder &operator=(const MIRSymbolBuilder &) = delete; + MIRSymbolBuilder &operator=(const MIRSymbolBuilder &&) = delete; +}; +} // namespace maple +#endif // MAPLEIR_INCLUDE_MIRSYMBOLBUILDER_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mir_type.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_type.h new file mode 100644 index 0000000000000000000000000000000000000000..ec53433fec4647d10cacbc98632847ec9868685c --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mir_type.h @@ -0,0 +1,2532 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_MIR_TYPE_H +#define MAPLE_IR_INCLUDE_MIR_TYPE_H +#include +#include +#include "prim_types.h" +#include "mir_pragma.h" +#include "mpl_logging.h" +#if MIR_FEATURE_FULL +#include "mempool.h" +#include "mempool_allocator.h" +#endif // MIR_FEATURE_FULL + +namespace maple { +constexpr uint32 kTypeHashLength = 12289; // hash length for mirtype, ref: planetmath.org/goodhashtableprimes +const std::string kRenameKeyWord = "_MNO"; // A static symbol name will be renamed as oriname_MNOxxx. + +class FieldAttrs; // circular dependency exists, no other choice +using TyIdxFieldAttrPair = std::pair; +using FieldPair = std::pair; +using FieldVector = std::vector; +using MIRTypePtr = MIRType *; + +constexpr size_t kMaxArrayDim = 20; +const std::string kJstrTypeName = "constStr"; +constexpr uint32 kInvalidFieldNum = UINT32_MAX; +constexpr size_t kInvalidSize = static_cast(UINT64_MAX); +#if MIR_FEATURE_FULL +extern bool VerifyPrimType(PrimType primType1, PrimType primType2); // verify if primType1 and primType2 match +extern PrimType GetExactPtrPrimType(); // return either PTY_a64 or PTY_a32 +extern uint32 GetPrimTypeSize(PrimType primType); // answer in bytes; 0 if unknown +extern uint32 GetPrimTypeP2Size(PrimType primType); // answer in bytes in power-of-two. +extern PrimType GetSignedPrimType(PrimType pty); // return signed version +extern PrimType GetUnsignedPrimType(PrimType pty); // return unsigned version +extern uint32 GetVecEleSize(PrimType primType); // element size of each lane in vector +extern uint32 GetVecLanes(PrimType primType); // lane size if vector +extern const char *GetPrimTypeName(PrimType primType); +extern const char *GetPrimTypeJavaName(PrimType primType); +extern int64 MinValOfSignedInteger(PrimType primType); +extern PrimType GetVecElemPrimType(PrimType primType); +constexpr uint32 k0BitSize = 0; +constexpr uint32 k1BitSize = 1; +constexpr uint32 k2BitSize = 2; +constexpr uint32 k3BitSize = 3; +constexpr uint32 k4BitSize = 4; +constexpr uint32 k5BitSize = 5; +constexpr uint32 k8BitSize = 8; +constexpr uint32 k9BitSize = 9; +constexpr uint32 k10BitSize = 10; +constexpr uint32 k16BitSize = 16; +constexpr uint32 k32BitSize = 32; +constexpr uint32 k64BitSize = 64; + +inline uint32 GetPrimTypeBitSize(PrimType primType) +{ + // 1 byte = 8 bits = 2^3 bits + return GetPrimTypeSize(primType) << 3; +} + +inline uint32 GetPrimTypeActualBitSize(PrimType primType) +{ + // GetPrimTypeSize(PTY_u1) will return 1, so we take it as a special case + if (primType == PTY_u1) { + return 1; + } + // 1 byte = 8 bits = 2^3 bits + return GetPrimTypeSize(primType) << 3; +} + +#endif // MIR_FEATURE_FULL +// return the same type with size increased to register size +PrimType GetRegPrimType(PrimType primType); +PrimType GetDynType(PrimType primType); +PrimType GetReg64PrimType(PrimType primType); +PrimType GetNonDynType(PrimType primType); +PrimType GetIntegerPrimTypeBySizeAndSign(size_t sizeBit, bool isSign); + +inline bool IsAddress(PrimitiveType primitiveType) +{ + return primitiveType.IsAddress(); +} + +inline bool IsPossible64BitAddress(PrimType tp) +{ + return (tp == PTY_ptr || tp == PTY_ref || tp == PTY_u64 || tp == PTY_a64); +} + +inline bool IsPossible32BitAddress(PrimType tp) +{ + return (tp == PTY_ptr || tp == PTY_ref || tp == PTY_u32 || tp == PTY_a32); +} + +inline bool MustBeAddress(PrimType tp) +{ + return (tp == PTY_ptr || tp == PTY_ref || tp == PTY_a64 || tp == PTY_a32); +} + +inline bool IsPrimitivePureScalar(PrimitiveType primitiveType) +{ + return primitiveType.IsInteger() && !primitiveType.IsAddress() && !primitiveType.IsDynamic() && + !primitiveType.IsVector(); +} + +inline bool IsPrimitiveUnsigned(PrimitiveType primitiveType) +{ + return primitiveType.IsUnsigned(); +} + +inline bool IsUnsignedInteger(PrimitiveType primitiveType) +{ + return IsPrimitiveUnsigned(primitiveType) && primitiveType.IsInteger() && !primitiveType.IsDynamic(); +} + +inline bool IsSignedInteger(PrimitiveType primitiveType) +{ + return !IsPrimitiveUnsigned(primitiveType) && primitiveType.IsInteger() && !primitiveType.IsDynamic(); +} + +inline bool IsPrimitiveInteger(PrimitiveType primitiveType) +{ + return primitiveType.IsInteger() && !primitiveType.IsDynamic() && !primitiveType.IsVector(); +} + +inline bool IsPrimitiveDynType(PrimitiveType primitiveType) +{ + return primitiveType.IsDynamic(); +} + +inline bool IsPrimitiveDynInteger(PrimitiveType primitiveType) +{ + return primitiveType.IsDynamic() && primitiveType.IsInteger(); +} + +inline bool IsPrimitiveDynFloat(PrimitiveType primitiveType) +{ + return primitiveType.IsDynamic() && primitiveType.IsFloat(); +} + +inline bool IsPrimitiveFloat(PrimitiveType primitiveType) +{ + return primitiveType.IsFloat() && !primitiveType.IsDynamic() && !primitiveType.IsVector(); +} + +inline bool IsPrimitiveScalar(PrimitiveType primitiveType) +{ + return primitiveType.IsInteger() || primitiveType.IsFloat() || + (primitiveType.IsDynamic() && !primitiveType.IsDynamicNone()) || primitiveType.IsSimple(); +} + +inline bool IsPrimitiveValid(PrimitiveType primitiveType) +{ + return IsPrimitiveScalar(primitiveType) && !primitiveType.IsDynamicAny(); +} + +inline bool IsPrimitivePoint(PrimitiveType primitiveType) +{ + return primitiveType.IsPointer(); +} + +inline bool IsPrimitiveVector(PrimitiveType primitiveType) +{ + return primitiveType.IsVector(); +} + +inline bool IsPrimitiveVectorFloat(PrimitiveType primitiveType) +{ + return primitiveType.IsVector() && primitiveType.IsFloat(); +} + +inline bool IsPrimitiveVectorInteger(PrimitiveType primitiveType) +{ + return primitiveType.IsVector() && primitiveType.IsInteger(); +} + +inline bool IsPrimitiveUnSignedVector(const PrimitiveType &primitiveType) +{ + return IsPrimitiveUnsigned(primitiveType) && primitiveType.IsVector(); +} + +bool IsNoCvtNeeded(PrimType toType, PrimType fromType); +bool NeedCvtOrRetype(PrimType origin, PrimType compared); + +uint8 GetPointerSize(); +uint8 GetP2Size(); +PrimType GetLoweredPtrType(); + +inline bool IsRefOrPtrAssign(PrimType toType, PrimType fromType) +{ + return (toType == PTY_ref && fromType == PTY_ptr) || (toType == PTY_ptr && fromType == PTY_ref); +} + +enum MIRTypeKind : std::uint8_t { + kTypeInvalid, + kTypeUnknown, + kTypeScalar, + kTypeBitField, + kTypeArray, + kTypeFArray, + kTypeJArray, + kTypeStruct, + kTypeUnion, + kTypeClass, + kTypeInterface, + kTypeStructIncomplete, + kTypeClassIncomplete, + kTypeConstString, + kTypeInterfaceIncomplete, + kTypePointer, + kTypeFunction, + kTypeVoid, + kTypeByName, // type definition not yet seen + kTypeParam, // to support java generics + kTypeInstantVector, // represent a vector of instantiation pairs + kTypeGenericInstant, // type to be formed by instantiation of a generic type +}; + +enum AttrKind : unsigned { +#define TYPE_ATTR +#define ATTR(STR) ATTR_##STR, +#include "all_attributes.def" +#undef ATTR +#undef TYPE_ATTR +}; + +class AttrBoundary { +public: + AttrBoundary() = default; + ~AttrBoundary() = default; + + bool operator==(const AttrBoundary &tA) const + { + return lenExprHash == tA.lenExprHash && lenParamIdx == tA.lenParamIdx && isBytedLen == tA.isBytedLen; + } + + bool operator!=(const AttrBoundary &tA) const + { + return !(*this == tA); + } + + bool operator<(const AttrBoundary &tA) const + { + return lenExprHash < tA.lenExprHash && lenParamIdx < tA.lenParamIdx && + static_cast(isBytedLen) < static_cast(tA.isBytedLen); + } + + void SetLenExprHash(uint32 val) + { + lenExprHash = val; + } + + uint32 GetLenExprHash() const + { + return lenExprHash; + } + + void SetLenParamIdx(int8 idx) + { + lenParamIdx = idx; + } + + int8 GetLenParamIdx() const + { + return lenParamIdx; + } + + void SetIsBytedLen(bool flag) + { + isBytedLen = flag; + } + + bool IsBytedLen() const + { + return isBytedLen; + } + + void Clear() + { + lenExprHash = 0; + lenParamIdx = -1; + isBytedLen = false; + } + +private: + bool isBytedLen = false; + int8 lenParamIdx = -1; + uint32 lenExprHash = 0; +}; + +class TypeAttrs { +public: + TypeAttrs() = default; + TypeAttrs(const TypeAttrs &ta) = default; + TypeAttrs &operator=(const TypeAttrs &t) = default; + ~TypeAttrs() = default; + + void SetAlignValue(uint8 align) + { + attrAlign = align; + } + + uint8 GetAlignValue() const + { + return attrAlign; + } + + void SetAttrFlag(uint64 flag) + { + attrFlag = flag; + } + + uint64 GetAttrFlag() const + { + return attrFlag; + } + + void SetAttr(AttrKind x) + { + attrFlag |= (1ULL << static_cast(x)); + } + + void ResetAttr(AttrKind x) + { + attrFlag &= ~(1ULL << static_cast(x)); + } + + bool GetAttr(AttrKind x) const + { + return (attrFlag & (1ULL << static_cast(x))) != 0; + } + + void SetAlign(uint32 x) + { + DEBUG_ASSERT((~(x - 1) & x) == x, "SetAlign called with non-power-of-2"); + attrAlign = 0; + while (x != 1) { + x >>= 1; + ++attrAlign; + } + } + + uint32 GetAlign() const + { + if (attrAlign == 0) { + return 1; + } + uint32 res = 1; + uint32 exp = attrAlign; + do { + --exp; + res *= 2; + } while (exp != 0); + return res; + } + + bool operator==(const TypeAttrs &tA) const + { + return attrFlag == tA.attrFlag && attrAlign == tA.attrAlign && attrBoundary == tA.attrBoundary; + } + + bool operator!=(const TypeAttrs &tA) const + { + return !(*this == tA); + } + + void DumpAttributes() const; + + const AttrBoundary &GetAttrBoundary() const + { + return attrBoundary; + } + + AttrBoundary &GetAttrBoundary() + { + return attrBoundary; + } + + void AddAttrBoundary(const AttrBoundary &attr) + { + if (attr.GetLenExprHash() != 0) { + attrBoundary.SetLenExprHash(attr.GetLenExprHash()); + } + if (attr.GetLenParamIdx() != -1) { + attrBoundary.SetLenParamIdx(attr.GetLenParamIdx()); + } + if (attr.IsBytedLen()) { + attrBoundary.SetIsBytedLen(attr.IsBytedLen()); + } + } + + void SetPack(uint32 pack) + { + attrPack = pack; + } + + uint32 GetPack() const + { + return attrPack; + } + + bool IsPacked() const + { + return GetAttr(ATTR_pack); + } + +private: + uint64 attrFlag = 0; + uint8 attrAlign = 0; // alignment in bytes is 2 to the power of attrAlign + uint32 attrPack = -1; // -1 means inactive + AttrBoundary attrBoundary; // boundary attr for EnhanceC +}; + +enum FieldAttrKind { +#define FIELD_ATTR +#define ATTR(STR) FLDATTR_##STR, +#include "all_attributes.def" +#undef ATTR +#undef FIELD_ATTR +}; + +class FieldAttrs { +public: + FieldAttrs() = default; + FieldAttrs(const FieldAttrs &ta) = default; + FieldAttrs &operator=(const FieldAttrs &p) = default; + ~FieldAttrs() = default; + + void SetAlignValue(uint8 align) + { + attrAlign = align; + } + + uint8 GetAlignValue() const + { + return attrAlign; + } + + void SetAttrFlag(uint32 flag) + { + attrFlag = flag; + } + + uint32 GetAttrFlag() const + { + return attrFlag; + } + + void SetAttr(FieldAttrKind x) + { + attrFlag |= (1u << static_cast(x)); + } + + bool GetAttr(FieldAttrKind x) const + { + return (attrFlag & (1u << static_cast(x))) != 0; + } + + void SetAlign(uint32 x) + { + DEBUG_ASSERT((~(x - 1) & x) == x, "SetAlign called with non-power-of-2"); + attrAlign = 0; + while (x != 1) { + x >>= 1; + ++attrAlign; + } + } + + uint32 GetAlign() const + { + return 1U << attrAlign; + } + + bool operator==(const FieldAttrs &tA) const + { + return attrFlag == tA.attrFlag && attrAlign == tA.attrAlign && attrBoundary == tA.attrBoundary; + } + + bool operator!=(const FieldAttrs &tA) const + { + return !(*this == tA); + } + + bool operator<(const FieldAttrs &tA) const + { + return attrFlag < tA.attrFlag && attrAlign < tA.attrAlign && attrBoundary < tA.attrBoundary; + } + + void Clear() + { + attrFlag = 0; + attrAlign = 0; + attrBoundary.Clear(); + } + + void DumpAttributes() const; + TypeAttrs ConvertToTypeAttrs(); + + const AttrBoundary &GetAttrBoundary() const + { + return attrBoundary; + } + + AttrBoundary &GetAttrBoundary() + { + return attrBoundary; + } + + bool IsPacked() const + { + return GetAttr(FLDATTR_pack); + } + +private: + uint8 attrAlign = 0; // alignment in bytes is 2 to the power of attrAlign + uint32 attrFlag = 0; + AttrBoundary attrBoundary; +}; + +enum StmtAttrKind : unsigned { +#define STMT_ATTR +#define ATTR(STR) STMTATTR_##STR, +#include "all_attributes.def" +#undef ATTR +#undef STMT_ATTR +}; + +class StmtAttrs { +public: + StmtAttrs() = default; + StmtAttrs(const StmtAttrs &ta) = default; + StmtAttrs &operator=(const StmtAttrs &p) = default; + ~StmtAttrs() = default; + + void SetAttr(StmtAttrKind x) + { + attrFlag |= (1u << static_cast(x)); + } + + bool GetAttr(StmtAttrKind x) const + { + return (attrFlag & (1u << static_cast(x))) != 0; + } + + uint32 GetTargetAttrFlag(StmtAttrKind x) const + { + return attrFlag & (1u << static_cast(x)); + } + + uint32 GetAttrFlag() const + { + return attrFlag; + } + + void AppendAttr(uint32 flag) + { + attrFlag |= flag; + } + + void Clear() + { + attrFlag = 0; + } + + void DumpAttributes() const; + +private: + uint32 attrFlag = 0; +}; + +enum FuncAttrKind : unsigned { +#define FUNC_ATTR +#define ATTR(STR) FUNCATTR_##STR, +#include "all_attributes.def" +#undef ATTR +#undef FUNC_ATTR +}; + +class FuncAttrs { +public: + FuncAttrs() = default; + FuncAttrs(const FuncAttrs &ta) = default; + FuncAttrs &operator=(const FuncAttrs &p) = default; + ~FuncAttrs() = default; + + void SetAttr(FuncAttrKind x, bool unSet = false) + { + if (!unSet) { + attrFlag |= (1ULL << x); + } else { + attrFlag &= ~(1ULL << x); + } + } + + void SetAliasFuncName(const std::string &name) + { + aliasFuncName = name; + } + + const std::string &GetAliasFuncName() const + { + return aliasFuncName; + } + + void SetPrefixSectionName(const std::string &name) + { + prefixSectionName = name; + } + + const std::string &GetPrefixSectionName() const + { + return prefixSectionName; + } + + void SetAttrFlag(uint64 flag) + { + attrFlag = flag; + } + + uint64 GetAttrFlag() const + { + return attrFlag; + } + + bool GetAttr(FuncAttrKind x) const + { + return (attrFlag & (1ULL << x)) != 0; + } + + bool operator==(const FuncAttrs &tA) const + { + return attrFlag == tA.attrFlag; + } + + bool operator!=(const FuncAttrs &tA) const + { + return !(*this == tA); + } + + void DumpAttributes() const; + + const AttrBoundary &GetAttrBoundary() const + { + return attrBoundary; + } + + AttrBoundary &GetAttrBoundary() + { + return attrBoundary; + } + + void SetConstructorPriority(int priority) + { + constructorPriority = priority; + } + + int GetConstructorPriority() const + { + return constructorPriority; + } + + void SetDestructorPriority(int priority) + { + destructorPriority = priority; + } + + int GetDestructorPriority() const + { + return destructorPriority; + } + + int GetFrameResverdSlot() const + { + return frameResverdSlot; + } + + void SetFrameResverdSlot(int slot) + { + SetAttr(FUNCATTR_frame_pointer); + frameResverdSlot = slot; + } + + void SetFramePointer(std::string framePointer_) + { + SetAttr(FUNCATTR_frame_reserved_slots); + framePointer = framePointer_; + } + +private: + uint64 attrFlag = 0; + std::string aliasFuncName; + std::string prefixSectionName; + std::string framePointer; + AttrBoundary attrBoundary; // ret boundary for EnhanceC + int constructorPriority = -1; // 0~65535, -1 means inactive + int destructorPriority = -1; // 0~65535, -1 means inactive + int frameResverdSlot = 0; +}; + +#if MIR_FEATURE_FULL +constexpr size_t kShiftNumOfTypeKind = 8; +constexpr size_t kShiftNumOfNameStrIdx = 6; +constexpr int32 kOffsetUnknown = INT_MAX; +constexpr int32 kOffsetMax = (INT_MAX - 1); +constexpr int32 kOffsetMin = INT_MIN; +struct OffsetType { + explicit OffsetType(int64 offset) + { + Set(offset); + } + + OffsetType(const OffsetType &other) : val(other.val) {} + + ~OffsetType() = default; + + void Set(int64 offsetVal) + { + val = (offsetVal >= kOffsetMin && offsetVal <= kOffsetMax) ? static_cast(offsetVal) : kOffsetUnknown; + } + + bool IsInvalid() const + { + return val == kOffsetUnknown; + } + + OffsetType &operator=(const OffsetType &other) + { + val = other.val; + return *this; + } + + OffsetType operator+(int64 offset) const + { + if (this->IsInvalid() || OffsetType(offset).IsInvalid()) { + return InvalidOffset(); + } + return OffsetType(val + offset); + } + + OffsetType operator+(OffsetType other) const + { + return other + val; + } + + void operator+=(int64 offset) + { + if (this->IsInvalid() || OffsetType(offset).IsInvalid()) { + val = kOffsetUnknown; + return; + } + Set(offset + val); + } + + void operator+=(OffsetType other) + { + this->operator+=(other.val); + } + + OffsetType operator-() const + { + if (this->IsInvalid()) { + return *this; + } + return OffsetType(-val); + } + + bool operator<(OffsetType other) const + { + return val < other.val; + } + + bool operator==(OffsetType other) const + { + return val == other.val; + } + + bool operator!=(OffsetType other) const + { + return val != other.val; + } + + static OffsetType InvalidOffset() + { + return OffsetType(kOffsetUnknown); + } + + int32 val = kOffsetUnknown; +}; + +class MIRStructType; // circular dependency exists, no other choice +class MIRFuncType; + +class MIRType { +public: + MIRType(MIRTypeKind kind, PrimType pType) : typeKind(kind), primType(pType) {} + + MIRType(MIRTypeKind kind, PrimType pType, GStrIdx strIdx) : typeKind(kind), primType(pType), nameStrIdx(strIdx) {} + + virtual ~MIRType() = default; + + virtual void Dump(int indent, bool dontUseName = false) const; + virtual void DumpAsCxx(int indent) const; + virtual bool EqualTo(const MIRType &mirType) const; + virtual bool IsStructType() const + { + return false; + } + + virtual MIRType *CopyMIRTypeNode() const + { + return new MIRType(*this); + } + + PrimType GetPrimType() const + { + return primType; + } + void SetPrimType(const PrimType pt) + { + primType = pt; + } + + TyIdx GetTypeIndex() const + { + return tyIdx; + } + void SetTypeIndex(TyIdx idx) + { + tyIdx = idx; + } + + MIRTypeKind GetKind() const + { + return typeKind; + } + void SetMIRTypeKind(MIRTypeKind kind) + { + typeKind = kind; + } + + bool IsNameIsLocal() const + { + return nameIsLocal; + } + void SetNameIsLocal(bool flag) + { + nameIsLocal = flag; + } + + GStrIdx GetNameStrIdx() const + { + return nameStrIdx; + } + void SetNameStrIdx(GStrIdx strIdx) + { + nameStrIdx = strIdx; + } + void SetNameStrIdxItem(uint32 idx) + { + nameStrIdx.reset(idx); + } + + virtual size_t GetSize() const + { + return GetPrimTypeSize(primType); + } + + virtual uint32 GetAlign() const + { + return GetPrimTypeSize(primType); + } + + virtual bool HasVolatileField() const + { + return false; + } + + virtual bool HasTypeParam() const + { + return false; + } + + virtual bool IsIncomplete() const + { + return typeKind == kTypeStructIncomplete || typeKind == kTypeClassIncomplete || + typeKind == kTypeInterfaceIncomplete; + } + + bool IsVolatile(int fieldID) const; + + bool IsMIRPtrType() const + { + return typeKind == kTypePointer; + } + + bool IsMIRStructType() const + { + return (typeKind == kTypeStruct) || (typeKind == kTypeStructIncomplete); + } + + bool IsMIRUnionType() const + { + return typeKind == kTypeUnion; + } + + bool IsMIRClassType() const + { + return (typeKind == kTypeClass) || (typeKind == kTypeClassIncomplete); + } + + bool IsMIRInterfaceType() const + { + return (typeKind == kTypeInterface) || (typeKind == kTypeInterfaceIncomplete); + } + + bool IsInstanceOfMIRStructType() const + { + return IsMIRStructType() || IsMIRClassType() || IsMIRInterfaceType(); + } + + bool IsMIRJarrayType() const + { + return typeKind == kTypeJArray; + } + + bool IsMIRArrayType() const + { + return typeKind == kTypeArray; + } + + bool IsMIRFuncType() const + { + return typeKind == kTypeFunction; + } + + bool IsScalarType() const + { + return typeKind == kTypeScalar; + } + + bool IsMIRTypeByName() const + { + return typeKind == kTypeByName; + } + + bool IsMIRBitFieldType() const + { + return typeKind == kTypeBitField; + } + + virtual bool IsUnsafeType() const + { + return false; + } + virtual bool IsVoidPointer() const + { + return false; + } + + bool ValidateClassOrInterface(const std::string &className, bool noWarning) const; + bool IsOfSameType(MIRType &type); + const std::string &GetName() const; + virtual std::string GetMplTypeName() const; + virtual std::string GetCompactMplTypeName() const; + virtual bool PointsToConstString() const; + virtual size_t GetHashIndex() const + { + constexpr uint8 idxShift = 2; + return ((static_cast(primType) << idxShift) + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; + } + + virtual bool HasFields() const + { + return false; + } + // total number of field IDs the type is consisted of, excluding its own field ID + virtual uint32 NumberOfFieldIDs() const + { + return 0; + } + // return any struct type directly embedded in this type + virtual MIRStructType *EmbeddedStructType() + { + return nullptr; + } + + virtual int64 GetBitOffsetFromBaseAddr(FieldID fieldID) + { + (void)fieldID; + return 0; + } + +protected: + MIRTypeKind typeKind; + PrimType primType; + bool nameIsLocal = false; // needed when printing the type name + TyIdx tyIdx {0}; + GStrIdx nameStrIdx {0}; // name in global string table +}; + +class MIRPtrType : public MIRType { +public: + explicit MIRPtrType(TyIdx pTyIdx) : MIRType(kTypePointer, PTY_ptr), pointedTyIdx(pTyIdx) {} + + MIRPtrType(TyIdx pTyIdx, PrimType pty) : MIRType(kTypePointer, pty), pointedTyIdx(pTyIdx) {} + + MIRPtrType(PrimType primType, GStrIdx strIdx) : MIRType(kTypePointer, primType, strIdx), pointedTyIdx(0) {} + + ~MIRPtrType() override = default; + + MIRType *CopyMIRTypeNode() const override + { + return new MIRPtrType(*this); + } + + MIRType *GetPointedType() const; + + TyIdx GetPointedTyIdx() const + { + return pointedTyIdx; + } + void SetPointedTyIdx(TyIdx idx) + { + pointedTyIdx = idx; + } + + TypeAttrs &GetTypeAttrs() + { + return typeAttrs; + } + + const TypeAttrs &GetTypeAttrs() const + { + return typeAttrs; + } + + void SetTypeAttrs(const TypeAttrs &attrs) + { + typeAttrs = attrs; + } + + bool EqualTo(const MIRType &type) const override; + + bool HasTypeParam() const override; + bool IsPointedTypeVolatile(int fieldID) const; + bool IsUnsafeType() const override; + bool IsVoidPointer() const override; + + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override; + uint32 GetAlign() const override; + TyIdxFieldAttrPair GetPointedTyIdxFldAttrPairWithFieldID(FieldID fldId) const; + TyIdx GetPointedTyIdxWithFieldID(FieldID fieldID) const; + size_t GetHashIndex() const override + { + constexpr uint8 idxShift = 4; + constexpr uint8 attrShift = 3; + size_t hIdx = (static_cast(pointedTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind); + hIdx += (typeAttrs.GetAttrFlag() << attrShift) + typeAttrs.GetAlignValue(); + return hIdx % kTypeHashLength; + } + bool IsFunctionPtr() const + { + MIRType *pointedType = GetPointedType(); + if (pointedType->GetKind() == kTypeFunction) { + return true; + } + if (pointedType->GetKind() == kTypePointer) { + MIRPtrType *pointedPtrType = static_cast(pointedType); + return pointedPtrType->GetPointedType()->GetKind() == kTypeFunction; + } + return false; + } + + MIRFuncType *GetPointedFuncType() const; + + bool PointsToConstString() const override; + + std::string GetMplTypeName() const override; + + std::string GetCompactMplTypeName() const override; + +private: + TyIdx pointedTyIdx; + TypeAttrs typeAttrs; +}; + +class MIRArrayType : public MIRType { +public: + MIRArrayType() : MIRType(kTypeArray, PTY_agg) {} + explicit MIRArrayType(GStrIdx strIdx) : MIRType(kTypeArray, PTY_agg, strIdx) {} + + MIRArrayType(TyIdx eTyIdx, const std::vector &sizeArray) + : MIRType(kTypeArray, PTY_agg), eTyIdx(eTyIdx), dim(sizeArray.size()) + { + for (size_t i = 0; i < kMaxArrayDim; ++i) { + this->sizeArray[i] = (i < dim) ? sizeArray[i] : 0; + } + } + + MIRArrayType(const MIRArrayType &pat) = default; + MIRArrayType &operator=(const MIRArrayType &p) = default; + ~MIRArrayType() override = default; + + TyIdx GetElemTyIdx() const + { + return eTyIdx; + } + void SetElemTyIdx(TyIdx idx) + { + eTyIdx = idx; + } + + uint32 GetSizeArrayItem(uint32 n) const + { + CHECK_FATAL((n >= 0 && n < kMaxArrayDim), "out of bound of array!"); + return sizeArray[n]; + } + void SetSizeArrayItem(uint32 idx, uint32 value) + { + CHECK_FATAL((idx >= 0 && idx < kMaxArrayDim), "out of bound of array!"); + sizeArray[idx] = value; + } + + bool IsIncompleteArray() const + { + return typeAttrs.GetAttr(ATTR_incomplete_array); + } + + bool EqualTo(const MIRType &type) const override; + + uint16 GetDim() const + { + return dim; + } + void SetDim(uint16 dim) + { + this->dim = dim; + } + + const TypeAttrs &GetTypeAttrs() const + { + return typeAttrs; + } + + TypeAttrs &GetTypeAttrs() + { + return typeAttrs; + } + + void SetTypeAttrs(const TypeAttrs &attrs) + { + typeAttrs = attrs; + } + + MIRType *GetElemType() const; + + MIRType *CopyMIRTypeNode() const override + { + return new MIRArrayType(*this); + } + + bool HasTypeParam() const override + { + return GetElemType()->HasTypeParam(); + } + + void Dump(int indent, bool dontUseName) const override; + + size_t GetSize() const override; + uint32 GetAlign() const override; + + size_t GetHashIndex() const override + { + constexpr uint8 idxShift = 2; + size_t hIdx = (static_cast(eTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind); + for (size_t i = 0; i < dim; ++i) { + CHECK_FATAL(i < kMaxArrayDim, "array index out of range"); + hIdx += (sizeArray[i] << i); + } + constexpr uint8 attrShift = 3; + hIdx += (typeAttrs.GetAttrFlag() << attrShift) + typeAttrs.GetAlignValue(); + return hIdx % kTypeHashLength; + } + + int64 GetBitOffsetFromBaseAddr(FieldID fieldID) override + { + (void)fieldID; + return kOffsetUnknown; + } + int64 GetBitOffsetFromArrayAddress(std::vector &indexArray); + + std::string GetMplTypeName() const override; + std::string GetCompactMplTypeName() const override; + bool HasFields() const override; + uint32 NumberOfFieldIDs() const override; + MIRStructType *EmbeddedStructType() override; + size_t ElemNumber(); + +private: + TyIdx eTyIdx {0}; + uint16 dim = 0; + std::array sizeArray {{0}}; + TypeAttrs typeAttrs; + mutable uint32 fieldsNum = kInvalidFieldNum; + mutable size_t size = kInvalidSize; +}; + +// flexible array type, must be last field of a top-level struct +class MIRFarrayType : public MIRType { +public: + MIRFarrayType() : MIRType(kTypeFArray, PTY_agg), elemTyIdx(TyIdx(0)) {}; + + explicit MIRFarrayType(TyIdx elemTyIdx) : MIRType(kTypeFArray, PTY_agg), elemTyIdx(elemTyIdx) {} + + explicit MIRFarrayType(GStrIdx strIdx) : MIRType(kTypeFArray, PTY_agg, strIdx), elemTyIdx(TyIdx(0)) {} + + ~MIRFarrayType() override = default; + + MIRType *CopyMIRTypeNode() const override + { + return new MIRFarrayType(*this); + }; + + MIRType *GetElemType() const; + + bool HasTypeParam() const override + { + return GetElemType()->HasTypeParam(); + } + + TyIdx GetElemTyIdx() const + { + return elemTyIdx; + } + void SetElemtTyIdx(TyIdx idx) + { + elemTyIdx = idx; + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + + size_t GetHashIndex() const override + { + constexpr uint8 idxShift = 5; + return ((static_cast(elemTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; + } + + std::string GetMplTypeName() const override; + std::string GetCompactMplTypeName() const override; + + bool HasFields() const override; + uint32 NumberOfFieldIDs() const override; + MIRStructType *EmbeddedStructType() override; + + int64 GetBitOffsetFromBaseAddr(FieldID fieldID) override + { + (void)fieldID; + return kOffsetUnknown; + } + + int64 GetBitOffsetFromArrayAddress(int64 arrayIndex); + +private: + TyIdx elemTyIdx; + mutable uint32 fieldsNum = kInvalidFieldNum; +}; + +using TyidxFuncAttrPair = std::pair; +using MethodPair = std::pair; +using MethodVector = std::vector; +using MethodPtrVector = std::vector; +using MIREncodedArray = std::vector; +class GenericDeclare; +class AnnotationType; +class GenericType; +// used by kTypeStruct, kTypeStructIncomplete, kTypeUnion +class MIRStructType : public MIRType { +public: + explicit MIRStructType(MIRTypeKind typeKind) : MIRType(typeKind, PTY_agg) {} + + MIRStructType(MIRTypeKind typeKind, GStrIdx strIdx) : MIRType(typeKind, PTY_agg, strIdx) {} + + ~MIRStructType() override = default; + + bool IsStructType() const override + { + return true; + } + + FieldVector &GetFields() + { + return fields; + } + const FieldVector &GetFields() const + { + return fields; + } + void SetFields(const FieldVector &fields) + { + this->fields = fields; + } + + const FieldPair &GetFieldsElemt(size_t n) const + { + DEBUG_ASSERT(n < fields.size(), "array index out of range"); + return fields.at(n); + } + + FieldPair &GetFieldsElemt(size_t n) + { + DEBUG_ASSERT(n < fields.size(), "array index out of range"); + return fields.at(n); + } + + size_t GetFieldsSize() const + { + return fields.size(); + } + + const std::vector &GetFieldInferredTyIdx() const + { + return fieldInferredTyIdx; + } + + FieldVector &GetStaticFields() + { + return staticFields; + } + const FieldVector &GetStaticFields() const + { + return staticFields; + } + + const FieldPair &GetStaticFieldsPair(size_t i) const + { + return staticFields.at(i); + } + + GStrIdx GetStaticFieldsGStrIdx(size_t i) const + { + return staticFields.at(i).first; + } + + FieldVector &GetParentFields() + { + return parentFields; + } + void SetParentFields(const FieldVector &parentFields) + { + this->parentFields = parentFields; + } + const FieldVector &GetParentFields() const + { + return parentFields; + } + const FieldPair &GetParentFieldsElemt(size_t n) const + { + DEBUG_ASSERT(n < parentFields.size(), "array index out of range"); + return parentFields.at(n); + } + size_t GetParentFieldsSize() const + { + return parentFields.size(); + } + + MethodVector &GetMethods() + { + return methods; + } + const MethodVector &GetMethods() const + { + return methods; + } + + const MethodPair &GetMethodsElement(size_t n) const + { + DEBUG_ASSERT(n < methods.size(), "array index out of range"); + return methods.at(n); + } + + MethodPtrVector &GetVTableMethods() + { + return vTableMethods; + } + + const MethodPair *GetVTableMethodsElemt(size_t n) const + { + DEBUG_ASSERT(n < vTableMethods.size(), "array index out of range"); + return vTableMethods.at(n); + } + + size_t GetVTableMethodsSize() const + { + return vTableMethods.size(); + } + + const MethodPtrVector &GetItableMethods() const + { + return iTableMethods; + } + + bool IsImported() const + { + return isImported; + } + + void SetIsImported(bool flag) + { + isImported = flag; + } + + bool IsUsed() const + { + return isUsed; + } + + void SetIsUsed(bool flag) + { + isUsed = flag; + } + + bool IsCPlusPlus() const + { + return isCPlusPlus; + } + + void SetIsCPlusPlus(bool flag) + { + isCPlusPlus = flag; + } + + GStrIdx GetFieldGStrIdx(FieldID id) const + { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.first; + } + + const TyIdxFieldAttrPair GetFieldTyIdxAttrPair(FieldID id) const + { + return TraverseToField(id).second; + } + + TyIdxFieldAttrPair GetTyidxFieldAttrPair(size_t n) const + { + return fields.at(n).second; + } + + TyIdx GetFieldTyIdx(FieldID id) const + { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.first; + } + + FieldAttrs GetFieldAttrs(FieldID id) const + { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second; + } + + FieldAttrs GetFieldAttrs(GStrIdx fieldStrIdx) const + { + const FieldPair &fieldPair = TraverseToField(fieldStrIdx); + return fieldPair.second.second; + } + + bool IsFieldVolatile(FieldID id) const + { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_volatile); + } + + bool IsFieldFinal(FieldID id) const + { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_final); + } + + bool IsFieldRCUnownedRef(FieldID id) const + { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_rcunowned); + } + + bool IsFieldRCWeak(FieldID id) const + { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_rcweak); + } + + bool IsFieldRestrict(FieldID id) const + { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_restrict); + } + + bool IsOwnField(FieldID id) const + { + const FieldPair &fieldPair = TraverseToField(id); + return std::find(fields.begin(), fields.end(), fieldPair) != fields.end(); + } + + TypeAttrs &GetTypeAttrs() + { + return typeAttrs; + } + + const TypeAttrs &GetTypeAttrs() const + { + return typeAttrs; + } + + void SetTypeAttrs(const TypeAttrs &attrs) + { + typeAttrs = attrs; + } + + bool HasVolatileField() const override; + bool HasTypeParam() const override; + bool EqualTo(const MIRType &type) const override; + MIRType *CopyMIRTypeNode() const override + { + return new MIRStructType(*this); + } + + TyIdx GetElemTyIdx(size_t n) const + { + DEBUG_ASSERT(n < fields.size(), "array index out of range"); + return fields.at(n).second.first; + } + + void SetElemtTyIdxSimple(size_t n, TyIdx tyIdx) + { + DEBUG_ASSERT(n < fields.size(), "array index out of range"); + fields.at(n).second.first = tyIdx; + } + + TyIdx GetStaticElemtTyIdx(size_t n) const + { + DEBUG_ASSERT(n < staticFields.size(), "array index out of range"); + return staticFields.at(n).second.first; + } + + void SetStaticElemtTyIdx(size_t n, TyIdx tyIdx) + { + staticFields.at(n).second.first = tyIdx; + } + + void SetMethodTyIdx(size_t n, TyIdx tyIdx) + { + DEBUG_ASSERT(n < methods.size(), "array index out of range"); + methods.at(n).second.first = tyIdx; + } + + MIRType *GetElemType(uint32 n) const; + + MIRType *GetFieldType(FieldID fieldID); + + void SetElemtTyIdx(size_t n, TyIdx tyIdx) + { + DEBUG_ASSERT(n < fields.size(), "array index out of range"); + fields.at(n).second = TyIdxFieldAttrPair(tyIdx, FieldAttrs()); + } + + GStrIdx GetElemStrIdx(size_t n) const + { + DEBUG_ASSERT(n < fields.size(), "array index out of range"); + return fields.at(n).first; + } + + void SetElemStrIdx(size_t n, GStrIdx idx) + { + DEBUG_ASSERT(n < fields.size(), "array index out of range"); + fields.at(n).first = idx; + } + + void SetElemInferredTyIdx(size_t n, TyIdx tyIdx) + { + if (n >= fieldInferredTyIdx.size()) { + (void)fieldInferredTyIdx.insert(fieldInferredTyIdx.end(), n + 1 - fieldInferredTyIdx.size(), kInitTyIdx); + } + DEBUG_ASSERT(n < fieldInferredTyIdx.size(), "array index out of range"); + fieldInferredTyIdx.at(n) = tyIdx; + } + + TyIdx GetElemInferredTyIdx(size_t n) + { + if (n >= fieldInferredTyIdx.size()) { + (void)fieldInferredTyIdx.insert(fieldInferredTyIdx.end(), n + 1 - fieldInferredTyIdx.size(), kInitTyIdx); + } + DEBUG_ASSERT(n < fieldInferredTyIdx.size(), "array index out of range"); + return fieldInferredTyIdx.at(n); + } + + void DumpFieldsAndMethods(int indent, bool hasMethod) const; + void Dump(int indent, bool dontUseName = false) const override; + + virtual void SetComplete() + { + typeKind = (typeKind == kTypeUnion) ? typeKind : kTypeStruct; + } + + // only meaningful for MIRClassType and MIRInterface types + bool IsLocal() const; + + size_t GetSize() const override; + uint32 GetAlign() const override; + + size_t GetHashIndex() const override + { + constexpr uint8 attrShift = 3; + return ((static_cast(nameStrIdx) << kShiftNumOfNameStrIdx) + (typeKind << kShiftNumOfTypeKind) + + ((typeAttrs.GetAttrFlag() << attrShift) + typeAttrs.GetAlignValue())) % + kTypeHashLength; + } + + virtual void ClearContents() + { + fields.clear(); + staticFields.clear(); + parentFields.clear(); + methods.clear(); + vTableMethods.clear(); + iTableMethods.clear(); + isImported = false; + isUsed = false; + hasVolatileField = false; + hasVolatileFieldSet = false; + } + + virtual const std::vector &GetInfo() const + { + CHECK_FATAL(false, "can not use GetInfo"); + } + + virtual const MIRInfoPair &GetInfoElemt(size_t) const + { + CHECK_FATAL(false, "can not use GetInfoElemt"); + } + + virtual const std::vector &GetInfoIsString() const + { + CHECK_FATAL(false, "can not use GetInfoIsString"); + } + + virtual bool GetInfoIsStringElemt(size_t) const + { + CHECK_FATAL(false, "can not use GetInfoIsStringElemt"); + } + + virtual const std::vector &GetPragmaVec() const + { + CHECK_FATAL(false, "can not use GetPragmaVec"); + } + + virtual std::vector &GetPragmaVec() + { + CHECK_FATAL(false, "can not use GetPragmaVec"); + } + + std::vector &GetGenericDeclare() + { + return genericDeclare; + } + + void AddClassGenericDeclare(GenericDeclare *gd) + { + genericDeclare.push_back(gd); + } + + void AddFieldGenericDeclare(const GStrIdx &g, AnnotationType *a) + { + if (fieldGenericDeclare.find(g) != fieldGenericDeclare.end()) { + CHECK_FATAL(fieldGenericDeclare[g] == a, "MUST BE"); + } + fieldGenericDeclare[g] = a; + } + + AnnotationType *GetFieldGenericDeclare(const GStrIdx &g) + { + if (fieldGenericDeclare.find(g) == fieldGenericDeclare.end()) { + return nullptr; + } + return fieldGenericDeclare[g]; + } + + void AddInheritaceGeneric(GenericType *a) + { + inheritanceGeneric.push_back(a); + } + + std::vector &GetInheritanceGeneric() + { + return inheritanceGeneric; + } + + virtual const MIREncodedArray &GetStaticValue() const + { + CHECK_FATAL(false, "can not use GetStaticValue"); + } + + virtual void PushbackMIRInfo(const MIRInfoPair &) + { + CHECK_FATAL(false, "can not use PushbackMIRInfo"); + } + + virtual void PushbackPragma(MIRPragma *) + { + CHECK_FATAL(false, "can not use PushbackPragma"); + } + + virtual void PushbackStaticValue(EncodedValue &) + { + CHECK_FATAL(false, "can not use PushbackStaticValue"); + } + + virtual void PushbackIsString(bool) + { + CHECK_FATAL(false, "can not use PushbackIsString"); + } + + bool HasFields() const override + { + return true; + } + uint32 NumberOfFieldIDs() const override; + MIRStructType *EmbeddedStructType() override + { + return this; + } + + virtual FieldPair TraverseToFieldRef(FieldID &fieldID) const; + std::string GetMplTypeName() const override; + std::string GetCompactMplTypeName() const override; + FieldPair TraverseToField(FieldID fieldID) const; + + int64 GetBitOffsetFromBaseAddr(FieldID fieldID) override; + + bool HasPadding() const; + +protected: + FieldVector fields {}; + std::vector fieldInferredTyIdx {}; + FieldVector staticFields {}; + FieldVector parentFields {}; // fields belong to the ancestors not fully defined + MethodVector methods {}; // for the list of member function prototypes + MethodPtrVector vTableMethods {}; // the list of implmentation for all virtual functions for this type + MethodPtrVector iTableMethods {}; // the list of all interface functions for this type; For classes, they are + // implementation functions, For interfaces, they are abstact functions. + // Weak indicates the actual definition is in another module. + bool isImported = false; + bool isUsed = false; + bool isCPlusPlus = false; // empty struct in C++ has size 1 byte + mutable bool hasVolatileField = false; // for caching computed value + mutable bool hasVolatileFieldSet = false; // if true, just read hasVolatileField; + // otherwise compute to initialize hasVolatileField + std::vector genericDeclare; + std::map fieldGenericDeclare; + std::vector inheritanceGeneric; + TypeAttrs typeAttrs; + mutable uint32 fieldsNum = kInvalidFieldNum; + mutable size_t size = kInvalidSize; + +private: + FieldPair TraverseToField(GStrIdx fieldStrIdx) const; + bool HasVolatileFieldInFields(const FieldVector &fieldsOfStruct) const; + bool HasTypeParamInFields(const FieldVector &fieldsOfStruct) const; + int64 GetBitOffsetFromUnionBaseAddr(FieldID fieldID); + int64 GetBitOffsetFromStructBaseAddr(FieldID fieldID); +}; + +// java array type, must not be nested inside another aggregate +class MIRJarrayType : public MIRFarrayType { +public: + MIRJarrayType() + { + typeKind = kTypeJArray; + }; + + explicit MIRJarrayType(TyIdx elemTyIdx) : MIRFarrayType(elemTyIdx) + { + typeKind = kTypeJArray; + } + + explicit MIRJarrayType(GStrIdx strIdx) : MIRFarrayType(strIdx) + { + typeKind = kTypeJArray; + } + + ~MIRJarrayType() override = default; + + MIRType *CopyMIRTypeNode() const override + { + return new MIRJarrayType(*this); + } + + MIRStructType *GetParentType(); + const std::string &GetJavaName(); + + bool IsPrimitiveArray() + { + if (javaNameStrIdx == 0u) { + DetermineName(); + } + return fromPrimitive; + } + + int GetDim() + { + if (javaNameStrIdx == 0u) { + DetermineName(); + } + return dim; + } + + size_t GetHashIndex() const override + { + constexpr uint8 idxShift = 5; + return ((static_cast(GetElemTyIdx()) << idxShift) + (typeKind << kShiftNumOfTypeKind)) % + kTypeHashLength; + } + +private: + void DetermineName(); // determine the internal name of this type + TyIdx parentTyIdx {0}; // since Jarray is also an object, this is java.lang.Object + GStrIdx javaNameStrIdx {0}; // for internal java name of Jarray. nameStrIdx is used for other purpose + bool fromPrimitive = false; // the lowest dimension is primitive type + int dim = 0; // the dimension if decidable at compile time. otherwise 0 +}; + +// used by kTypeClass, kTypeClassIncomplete +class MIRClassType : public MIRStructType { +public: + explicit MIRClassType(MIRTypeKind tKind) : MIRStructType(tKind) {} + MIRClassType(MIRTypeKind tKind, GStrIdx strIdx) : MIRStructType(tKind, strIdx) {} + ~MIRClassType() override = default; + + bool EqualTo(const MIRType &type) const override; + + MIRType *CopyMIRTypeNode() const override + { + return new MIRClassType(*this); + } + + const std::vector &GetInfo() const override + { + return info; + } + void PushbackMIRInfo(const MIRInfoPair &pair) override + { + info.push_back(pair); + } + uint32 GetInfo(const std::string &infoStr) const; + uint32 GetInfo(GStrIdx strIdx) const; + size_t GetInfoSize() const + { + return info.size(); + } + + const MIRInfoPair &GetInfoElemt(size_t n) const override + { + DEBUG_ASSERT(n < info.size(), "array index out of range"); + return info.at(n); + } + + const std::vector &GetInfoIsString() const override + { + return infoIsString; + } + + void PushbackIsString(bool isString) override + { + infoIsString.push_back(isString); + } + + size_t GetInfoIsStringSize() const + { + return infoIsString.size(); + } + + bool GetInfoIsStringElemt(size_t n) const override + { + DEBUG_ASSERT(n < infoIsString.size(), "array index out of range"); + return infoIsString.at(n); + } + + std::vector &GetPragmaVec() override + { + return pragmaVec; + } + const std::vector &GetPragmaVec() const override + { + return pragmaVec; + } + void PushbackPragma(MIRPragma *pragma) override + { + pragmaVec.push_back(pragma); + } + + const MIREncodedArray &GetStaticValue() const override + { + return staticValue; + } + void PushbackStaticValue(EncodedValue &encodedValue) override + { + staticValue.push_back(encodedValue); + } + + TyIdx GetParentTyIdx() const + { + return parentTyIdx; + } + void SetParentTyIdx(TyIdx idx) + { + parentTyIdx = idx; + } + + std::vector &GetInterfaceImplemented() + { + return interfacesImplemented; + } + const std::vector &GetInterfaceImplemented() const + { + return interfacesImplemented; + } + TyIdx GetNthInterfaceImplemented(size_t i) const + { + DEBUG_ASSERT(i < interfacesImplemented.size(), "array index out of range"); + return interfacesImplemented.at(i); + } + + void SetNthInterfaceImplemented(size_t i, TyIdx tyIdx) + { + DEBUG_ASSERT(i < interfacesImplemented.size(), "array index out of range"); + interfacesImplemented.at(i) = tyIdx; + } + void PushbackInterfaceImplemented(TyIdx idx) + { + interfacesImplemented.push_back(idx); + } + + void Dump(int indent, bool dontUseName = false) const override; + void DumpAsCxx(int indent) const override; + void SetComplete() override + { + typeKind = kTypeClass; + } + + bool IsFinal() const; + bool IsAbstract() const; + bool IsInner() const; + bool HasVolatileField() const override; + bool HasTypeParam() const override; + FieldPair TraverseToFieldRef(FieldID &fieldID) const override; + size_t GetSize() const override; + + FieldID GetLastFieldID() const; + FieldID GetFirstFieldID() const + { + return GetLastFieldID() - fields.size() + 1; + } + + FieldID GetFirstLocalFieldID() const; + // return class id or superclass id accroding to input string + MIRClassType *GetExceptionRootType(); + const MIRClassType *GetExceptionRootType() const; + bool IsExceptionType() const; + void AddImplementedInterface(TyIdx interfaceTyIdx) + { + if (std::find(interfacesImplemented.begin(), interfacesImplemented.end(), interfaceTyIdx) == + interfacesImplemented.end()) { + interfacesImplemented.push_back(interfaceTyIdx); + } + } + + void ClearContents() override + { + MIRStructType::ClearContents(); + parentTyIdx = TyIdx(0); + interfacesImplemented.clear(); // for the list of interfaces the class implements + info.clear(); + infoIsString.clear(); + pragmaVec.clear(); + staticValue.clear(); + } + + size_t GetHashIndex() const override + { + return ((static_cast(nameStrIdx) << kShiftNumOfNameStrIdx) + (typeKind << kShiftNumOfTypeKind)) % + kTypeHashLength; + } + + uint32 NumberOfFieldIDs() const override; + +private: + TyIdx parentTyIdx {0}; + std::vector interfacesImplemented {}; // for the list of interfaces the class implements + std::vector info {}; + std::vector infoIsString {}; + std::vector pragmaVec {}; + MIREncodedArray staticValue {}; // DELETE THIS +}; + +// used by kTypeInterface, kTypeInterfaceIncomplete +class MIRInterfaceType : public MIRStructType { +public: + explicit MIRInterfaceType(MIRTypeKind tKind) : MIRStructType(tKind) {} + MIRInterfaceType(MIRTypeKind tKind, GStrIdx strIdx) : MIRStructType(tKind, strIdx) {} + ~MIRInterfaceType() override = default; + + bool EqualTo(const MIRType &type) const override; + + MIRType *CopyMIRTypeNode() const override + { + return new MIRInterfaceType(*this); + } + + const std::vector &GetInfo() const override + { + return info; + } + void PushbackMIRInfo(const MIRInfoPair &pair) override + { + info.push_back(pair); + } + uint32 GetInfo(const std::string &infoStr) const; + uint32 GetInfo(GStrIdx strIdx) const; + size_t GetInfoSize() const + { + return info.size(); + } + + const MIRInfoPair &GetInfoElemt(size_t n) const override + { + DEBUG_ASSERT(n < info.size(), "array index out of range"); + return info.at(n); + } + + const std::vector &GetInfoIsString() const override + { + return infoIsString; + } + void PushbackIsString(bool isString) override + { + infoIsString.push_back(isString); + } + size_t GetInfoIsStringSize() const + { + return infoIsString.size(); + } + bool GetInfoIsStringElemt(size_t n) const override + { + DEBUG_ASSERT(n < infoIsString.size(), "array index out of range"); + return infoIsString.at(n); + } + + std::vector &GetPragmaVec() override + { + return pragmaVec; + } + const std::vector &GetPragmaVec() const override + { + return pragmaVec; + } + void PushbackPragma(MIRPragma *pragma) override + { + pragmaVec.push_back(pragma); + } + + const MIREncodedArray &GetStaticValue() const override + { + return staticValue; + } + void PushbackStaticValue(EncodedValue &encodedValue) override + { + staticValue.push_back(encodedValue); + } + + std::vector &GetParentsTyIdx() + { + return parentsTyIdx; + } + void SetParentsTyIdx(const std::vector &parents) + { + parentsTyIdx = parents; + } + const std::vector &GetParentsTyIdx() const + { + return parentsTyIdx; + } + + TyIdx GetParentsElementTyIdx(size_t i) const + { + DEBUG_ASSERT(i < parentsTyIdx.size(), "array index out of range"); + return parentsTyIdx[i]; + } + + void SetParentsElementTyIdx(size_t i, TyIdx tyIdx) + { + DEBUG_ASSERT(i < parentsTyIdx.size(), "array index out of range"); + parentsTyIdx[i] = tyIdx; + } + + void Dump(int indent, bool dontUseName = false) const override; + bool HasVolatileField() const override; + bool HasTypeParam() const override; + FieldPair TraverseToFieldRef(FieldID &fieldID) const override; + void SetComplete() override + { + typeKind = kTypeInterface; + } + + size_t GetSize() const override; + + void ClearContents() override + { + MIRStructType::ClearContents(); + parentsTyIdx.clear(); + info.clear(); + infoIsString.clear(); + pragmaVec.clear(); + staticValue.clear(); + } + + size_t GetHashIndex() const override + { + return ((static_cast(nameStrIdx) << kShiftNumOfNameStrIdx) + (typeKind << kShiftNumOfTypeKind)) % + kTypeHashLength; + } + + bool HasFields() const override + { + return false; + } + uint32 NumberOfFieldIDs() const override + { + return 0; + } + MIRStructType *EmbeddedStructType() override + { + return nullptr; + } + +private: + std::vector parentsTyIdx {}; // multiple inheritence + std::vector info {}; + std::vector infoIsString {}; + std::vector pragmaVec {}; + MIREncodedArray staticValue {}; // DELETE THIS +}; + +class MIRBitFieldType : public MIRType { +public: + MIRBitFieldType(uint8 field, PrimType pt) : MIRType(kTypeBitField, pt), fieldSize(field) {} + MIRBitFieldType(uint8 field, PrimType pt, GStrIdx strIdx) : MIRType(kTypeBitField, pt, strIdx), fieldSize(field) {} + ~MIRBitFieldType() override = default; + + uint8 GetFieldSize() const + { + return fieldSize; + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + MIRType *CopyMIRTypeNode() const override + { + return new MIRBitFieldType(*this); + } + + size_t GetSize() const override + { + if (fieldSize == 0) { + return 0; + } else if (fieldSize <= 8) { + return 1; + } else { + return (fieldSize + 7) / 8; + } + } // size not be in bytes + + uint32 GetAlign() const override + { + return 0; + } // align not be in bytes + + size_t GetHashIndex() const override + { + return ((static_cast(primType) << fieldSize) + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; + } + +private: + uint8 fieldSize; +}; + +class MIRFuncType : public MIRType { +public: + MIRFuncType() : MIRType(kTypeFunction, PTY_ptr) {} + + explicit MIRFuncType(const GStrIdx &strIdx) : MIRType(kTypeFunction, PTY_ptr, strIdx) {} + + MIRFuncType(const TyIdx &retTyIdx, const std::vector &vecTy, const std::vector &vecAt, + const TypeAttrs &retAttrsIn) + : MIRType(kTypeFunction, PTY_ptr), + retTyIdx(retTyIdx), + paramTypeList(vecTy), + paramAttrsList(vecAt), + retAttrs(retAttrsIn) + { + } + + ~MIRFuncType() override = default; + + bool EqualTo(const MIRType &type) const override; + bool CompatibleWith(const MIRType &type) const; + MIRType *CopyMIRTypeNode() const override + { + return new MIRFuncType(*this); + } + + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override + { + return 0; + } // size unknown + + TyIdx GetRetTyIdx() const + { + return retTyIdx; + } + + void SetRetTyIdx(TyIdx idx) + { + retTyIdx = idx; + } + + const std::vector &GetParamTypeList() const + { + return paramTypeList; + } + + std::vector &GetParamTypeList() + { + return paramTypeList; + } + + TyIdx GetNthParamType(size_t i) const + { + DEBUG_ASSERT(i < paramTypeList.size(), "array index out of range"); + return paramTypeList[i]; + } + + void SetParamTypeList(const std::vector &list) + { + paramTypeList.clear(); + (void)paramTypeList.insert(paramTypeList.begin(), list.begin(), list.end()); + } + + const std::vector &GetParamAttrsList() const + { + return paramAttrsList; + } + + std::vector &GetParamAttrsList() + { + return paramAttrsList; + } + + const TypeAttrs &GetNthParamAttrs(size_t i) const + { + DEBUG_ASSERT(i < paramAttrsList.size(), "array index out of range"); + return paramAttrsList[i]; + } + + TypeAttrs &GetNthParamAttrs(size_t i) + { + DEBUG_ASSERT(i < paramAttrsList.size(), "array index out of range"); + return paramAttrsList[i]; + } + + void SetParamAttrsList(const std::vector &list) + { + paramAttrsList.clear(); + (void)paramAttrsList.insert(paramAttrsList.begin(), list.begin(), list.end()); + } + + void SetNthParamAttrs(size_t i, const TypeAttrs &attrs) + { + DEBUG_ASSERT(i < paramAttrsList.size(), "array index out of range"); + paramAttrsList[i] = attrs; + } + + bool IsVarargs() const + { + return funcAttrs.GetAttr(FUNCATTR_varargs); + } + + void SetVarArgs() + { + funcAttrs.SetAttr(FUNCATTR_varargs); + } + + bool FirstArgReturn() const + { + return funcAttrs.GetAttr(FUNCATTR_firstarg_return); + } + + void SetFirstArgReturn() + { + funcAttrs.SetAttr(FUNCATTR_firstarg_return); + } + + const TypeAttrs &GetRetAttrs() const + { + return retAttrs; + } + + TypeAttrs &GetRetAttrs() + { + return retAttrs; + } + + void SetRetAttrs(const TypeAttrs &attrs) + { + retAttrs = attrs; + } + + size_t GetHashIndex() const override + { + constexpr uint8 idxShift = 6; + size_t hIdx = (static_cast(retTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind); + size_t size = paramTypeList.size(); + hIdx += (size ? (static_cast(paramTypeList[0]) + size) : 0) << 4; // shift bit is 4 + return hIdx % kTypeHashLength; + } + +public: + FuncAttrs funcAttrs; + +private: + TyIdx retTyIdx {0}; + std::vector paramTypeList; + std::vector paramAttrsList; + TypeAttrs retAttrs; +}; + +class MIRTypeByName : public MIRType { + // use nameStrIdx to store the name for both local and global +public: + explicit MIRTypeByName(GStrIdx gStrIdx) : MIRType(kTypeByName, PTY_void) + { + nameStrIdx = gStrIdx; + } + + ~MIRTypeByName() override = default; + + MIRType *CopyMIRTypeNode() const override + { + return new MIRTypeByName(*this); + } + + bool EqualTo(const MIRType &type) const override; + + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override + { + return 0; + } // size unknown + + size_t GetHashIndex() const override + { + constexpr uint8 idxShift = 2; + return ((static_cast(nameStrIdx) << idxShift) + nameIsLocal + (typeKind << kShiftNumOfTypeKind)) % + kTypeHashLength; + } +}; + +class MIRTypeParam : public MIRType { + // use nameStrIdx to store the name +public: + explicit MIRTypeParam(GStrIdx gStrIdx) : MIRType(kTypeParam, PTY_gen) + { + nameStrIdx = gStrIdx; + } + + ~MIRTypeParam() override = default; + + MIRType *CopyMIRTypeNode() const override + { + return new MIRTypeParam(*this); + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override + { + return 0; + } // size unknown + + bool HasTypeParam() const override + { + return true; + } + + size_t GetHashIndex() const override + { + constexpr uint8 idxShift = 3; + return ((static_cast(nameStrIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; + } +}; + +using TypePair = std::pair; +using GenericInstantVector = std::vector; +class MIRInstantVectorType : public MIRType { +public: + MIRInstantVectorType() : MIRType(kTypeInstantVector, PTY_agg) {} + + explicit MIRInstantVectorType(MIRTypeKind kind) : MIRType(kind, PTY_agg) {} + + MIRInstantVectorType(MIRTypeKind kind, GStrIdx strIdx) : MIRType(kind, PTY_agg, strIdx) {} + + ~MIRInstantVectorType() override = default; + + MIRType *CopyMIRTypeNode() const override + { + return new MIRInstantVectorType(*this); + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override + { + return 0; + } // size unknown + + const GenericInstantVector &GetInstantVec() const + { + return instantVec; + } + + GenericInstantVector &GetInstantVec() + { + return instantVec; + } + + void AddInstant(TypePair typePair) + { + instantVec.push_back(typePair); + } + + size_t GetHashIndex() const override + { + constexpr uint8 idxShift = 3; + uint32 hIdx = typeKind << kShiftNumOfTypeKind; + for (const TypePair &typePair : instantVec) { + hIdx += static_cast(typePair.first + typePair.second) << idxShift; + } + return hIdx % kTypeHashLength; + } + +protected: + GenericInstantVector instantVec {}; // in each pair, first is generic type, second is real type +}; + +class MIRGenericInstantType : public MIRInstantVectorType { +public: + explicit MIRGenericInstantType(TyIdx genTyIdx) : MIRInstantVectorType(kTypeGenericInstant), genericTyIdx(genTyIdx) + { + } + + explicit MIRGenericInstantType(GStrIdx strIdx) : MIRInstantVectorType(kTypeGenericInstant, strIdx), genericTyIdx(0) + { + } + + ~MIRGenericInstantType() override = default; + + MIRType *CopyMIRTypeNode() const override + { + return new MIRGenericInstantType(*this); + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + + size_t GetSize() const override + { + return 0; + } // size unknown + + TyIdx GetGenericTyIdx() const + { + return genericTyIdx; + } + void SetGenericTyIdx(TyIdx idx) + { + genericTyIdx = idx; + } + + size_t GetHashIndex() const override + { + constexpr uint8 idxShift = 2; + uint32 hIdx = (static_cast(genericTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind); + for (const TypePair &typePair : instantVec) { + hIdx += static_cast(typePair.first + typePair.second) << 3; // shift bit is 3 + } + return hIdx % kTypeHashLength; + } + +private: + TyIdx genericTyIdx; // the generic type to be instantiated +}; + +MIRType *GetElemType(const MIRType &arrayType); +#endif // MIR_FEATURE_FULL +} // namespace maple + +#define LOAD_SAFE_CAST_FOR_MIR_TYPE +#include "ir_safe_cast_traits.def" + +#endif // MAPLE_IR_INCLUDE_MIR_TYPE_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/mpl2mpl_options.h b/ecmascript/compiler/codegen/maple/maple_ir/include/mpl2mpl_options.h new file mode 100644 index 0000000000000000000000000000000000000000..83d68d587fb71f4de00b0a96a88c17844c60d63e --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/mpl2mpl_options.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_MPL2MPL_OPTION_H +#define MAPLE_IR_INCLUDE_MPL2MPL_OPTION_H + +#include "cl_option.h" +#include "cl_parser.h" + +#include +#include + +namespace opts::mpl2mpl { + +extern maplecl::Option dumpPhase; +extern maplecl::Option skipPhase; +extern maplecl::Option skipFrom; +extern maplecl::Option skipAfter; +extern maplecl::Option dumpFunc; +extern maplecl::Option quiet; +extern maplecl::Option maplelinker; +extern maplecl::Option regnativefunc; +extern maplecl::Option inlineWithProfile; +extern maplecl::Option inlineOpt; +extern maplecl::Option ipaClone; +extern maplecl::Option noInlineFunc; +extern maplecl::Option importFileList; +extern maplecl::Option crossModuleInline; +extern maplecl::Option inlineSmallFunctionThreshold; +extern maplecl::Option inlineHotFunctionThreshold; +extern maplecl::Option inlineRecursiveFunctionThreshold; +extern maplecl::Option inlineDepth; +extern maplecl::Option inlineModuleGrow; +extern maplecl::Option inlineColdFuncThresh; +extern maplecl::Option profileHotCount; +extern maplecl::Option profileColdCount; +extern maplecl::Option profileHotRate; +extern maplecl::Option profileColdRate; +extern maplecl::Option nativewrapper; +extern maplecl::Option regnativeDynamicOnly; +extern maplecl::Option staticBindingList; +extern maplecl::Option dumpBefore; +extern maplecl::Option dumpAfter; +extern maplecl::Option dumpMuid; +extern maplecl::Option emitVtableImpl; + +#if MIR_JAVA +extern maplecl::Option skipvirtual; +#endif + +extern maplecl::Option userc; +extern maplecl::Option strictNaiveRc; +extern maplecl::Option rcOpt1; +extern maplecl::Option nativeopt; +extern maplecl::Option o0; +extern maplecl::Option o2; +extern maplecl::Option os; +extern maplecl::Option criticalNative; +extern maplecl::Option fastNative; +extern maplecl::Option nodot; +extern maplecl::Option genIrProfile; +extern maplecl::Option profileTest; +extern maplecl::Option barrier; +extern maplecl::Option nativeFuncPropertyFile; +extern maplecl::Option maplelinkerNolocal; +extern maplecl::Option buildApp; +extern maplecl::Option partialAot; +extern maplecl::Option decoupleInit; +extern maplecl::Option sourceMuid; +extern maplecl::Option deferredVisit; +extern maplecl::Option deferredVisit2; +extern maplecl::Option decoupleSuper; +extern maplecl::Option genDecoupleVtab; +extern maplecl::Option profileFunc; +extern maplecl::Option dumpDevirtual; +extern maplecl::Option readDevirtual; +extern maplecl::Option usewhiteclass; +extern maplecl::Option appPackageName; +extern maplecl::Option checkClInvocation; +extern maplecl::Option dumpClInvocation; +extern maplecl::Option warning; +extern maplecl::Option lazyBinding; +extern maplecl::Option hotFix; +extern maplecl::Option compactMeta; +extern maplecl::Option genPGOReport; +extern maplecl::Option inlineCache; +extern maplecl::Option noComment; +extern maplecl::Option rmnousefunc; +extern maplecl::Option sideeffect; +extern maplecl::Option dumpIPA; +extern maplecl::Option wpaa; +extern maplecl::Option numOfCloneVersions; +extern maplecl::Option numOfImpExprLowBound; +extern maplecl::Option numOfImpExprHighBound; +extern maplecl::Option numOfCallSiteLowBound; +extern maplecl::Option numOfCallSiteUpBound; +extern maplecl::Option numOfConstpropValue; + +} // namespace opts::mpl2mpl + +#endif /* MAPLE_IR_INCLUDE_MPL2MPL_OPTION_H */ diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/opcode_info.h b/ecmascript/compiler/codegen/maple/maple_ir/include/opcode_info.h new file mode 100644 index 0000000000000000000000000000000000000000..c7912cd2f68f72cf8541c04bd7e80a88e785e9ff --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/opcode_info.h @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_OPCODE_INFO_H +#define MAPLE_IR_INCLUDE_OPCODE_INFO_H +#include "types_def.h" +#include "opcodes.h" +#include "mpl_logging.h" + +namespace maple { +enum OpcodeProp { + kOpcodePropNone, + kOpcodePropIsStmt, // The instruction is a stmt, so has 2 stmt pointers + kOpcodePropIsVarSize, // The instruction size is not fixed + kOpcodePropNotMMPL, // The instruction is not allowed in Machine Maple IR + kOpcodePropIsCompare, // The instruction is one of the 6 comparison ops + kOpcodePropIsTypeCvt, // The instruction is a type conversion op + kOpcodePropHasSSAUse, // The instruction may incur a use in SSA form + kOpcodePropHasSSADef, // The instruction may incur a def in SSA form + kOpcodePropIsCall, // The instruction is among the call instructions + kOpcodePropIsCallAssigned, // The instruction is among the call instructions with implicit assignments of the + // returned values + kOpcodePropNotPure, // The operation does not return same result with idential operands + kOpcodePropMayThrowException, + kOpcodePropIsAssertNonnull, // The operation check nonnnull + kOpcodePropIsAssertUpperBoundary, // The operation check upper boundary + kOpcodePropIsAssertLowerBoundary, // The operation check lower boundary +}; + +constexpr unsigned long OPCODEISSTMT = 1ULL << kOpcodePropIsStmt; +constexpr unsigned long OPCODEISVARSIZE = 1ULL << kOpcodePropIsVarSize; +constexpr unsigned long OPCODENOTMMPL = 1ULL << kOpcodePropNotMMPL; +constexpr unsigned long OPCODEISCOMPARE = 1ULL << kOpcodePropIsCompare; +constexpr unsigned long OPCODEISTYPECVT = 1ULL << kOpcodePropIsTypeCvt; +constexpr unsigned long OPCODEHASSSAUSE = 1ULL << kOpcodePropHasSSAUse; +constexpr unsigned long OPCODEHASSSADEF = 1ULL << kOpcodePropHasSSADef; +constexpr unsigned long OPCODEISCALL = 1ULL << kOpcodePropIsCall; +constexpr unsigned long OPCODEISCALLASSIGNED = 1ULL << kOpcodePropIsCallAssigned; +constexpr unsigned long OPCODENOTPURE = 1ULL << kOpcodePropNotPure; +constexpr unsigned long OPCODEMAYTHROWEXCEPTION = 1ULL << kOpcodePropMayThrowException; +constexpr unsigned long OPCODEASSERTNONNULL = 1ULL << kOpcodePropIsAssertNonnull; +constexpr unsigned long OPCODEASSERTUPPERBOUNDARY = 1ULL << kOpcodePropIsAssertUpperBoundary; +constexpr unsigned long OPCODEASSERTLOWERBOUNDARY = 1ULL << kOpcodePropIsAssertLowerBoundary; + +struct OpcodeDesc { + uint8 instrucSize; // size of instruction in bytes + uint16 flag; // stores the opcode property flags + std::string name; +}; + +class OpcodeTable { +public: + OpcodeTable(); + ~OpcodeTable() = default; + + OpcodeDesc GetTableItemAt(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o]; + } + + bool IsStmt(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISSTMT; + } + + bool IsVarSize(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISVARSIZE; + } + + bool NotMMPL(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODENOTMMPL; + } + + bool IsCompare(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISCOMPARE; + } + + bool IsTypeCvt(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISTYPECVT; + } + + bool HasSSAUse(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEHASSSAUSE; + } + + bool HasSSADef(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEHASSSADEF; + } + + bool IsCall(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISCALL; + } + + bool IsCallAssigned(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISCALLASSIGNED; + } + + bool IsICall(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return o == OP_icall || o == OP_icallassigned || o == OP_icallproto || o == OP_icallprotoassigned || + o == OP_virtualicall || o == OP_virtualicallassigned || o == OP_interfaceicall || + o == OP_interfaceicallassigned; + } + + bool NotPure(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODENOTPURE; + } + + bool MayThrowException(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEMAYTHROWEXCEPTION; + } + + bool HasSideEffect(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return MayThrowException(o); + } + + const std::string &GetName(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].name; + } + + bool IsCondBr(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return o == OP_brtrue || o == OP_brfalse; + } + + bool AssignActualVar(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return o == OP_dassign || o == OP_regassign; + } + + bool IsAssertNonnull(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEASSERTNONNULL; + } + + bool IsCallAssertNonnull(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return o == OP_callassertnonnull; + } + + bool IsAssertBoundary(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & (OPCODEASSERTUPPERBOUNDARY | OPCODEASSERTLOWERBOUNDARY); + } + + bool IsAssertUpperBoundary(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEASSERTUPPERBOUNDARY; + } + + bool IsAssertLowerBoundary(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEASSERTLOWERBOUNDARY; + } + + bool IsCallAssertBoundary(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return o == OP_callassertle; + } + + bool IsAssertLeBoundary(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return (o == OP_callassertle || o == OP_returnassertle || o == OP_assignassertle); + } + + bool IsCalcAssertBoundary(Opcode o) const + { + DEBUG_ASSERT(o < OP_last, "invalid opcode"); + return (o == OP_calcassertlt || o == OP_calcassertge); + } + +private: + OpcodeDesc table[OP_last]; +}; +extern const OpcodeTable kOpcodeInfo; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_OPCODE_INFO_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/opcodes.def b/ecmascript/compiler/codegen/maple/maple_ir/include/opcodes.def new file mode 100755 index 0000000000000000000000000000000000000000..e5750c6f4917c6afce08051b68ca1c24224b9b3c --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/opcodes.def @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Stmt & Notmmpl + // storage access opcodes + OPCODE(dassign, DassignNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEHASSSADEF), 8) + OPCODE(piassign, PiassignNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEHASSSADEF), 8) + OPCODE(maydassign, DassignNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEHASSSADEF), 8) + OPCODE(iassign, IassignNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEHASSSADEF), 12) + // hierarchical control flow opcodes + OPCODE(block, BlockNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(doloop, DoloopNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(dowhile, WhileStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(if, IfStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(while, WhileStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(switch, SwitchNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(multiway, MultiwayNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(foreachelem, ForeachelemNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + // other opcodes + OPCODE(comment, CommentNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(eval, UnaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(free, UnaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(calcassertge, BinaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTLOWERBOUNDARY), 8) + OPCODE(calcassertlt, BinaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(assertge, AssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTLOWERBOUNDARY), 8) + OPCODE(assertlt, AssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(callassertle, CallAssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(returnassertle, AssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(assignassertle, AssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(abort, UnaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(assertnonnull, UnaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTNONNULL), 8) + OPCODE(assignassertnonnull, AssignAssertNonnullStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTNONNULL), 8) + OPCODE(callassertnonnull, CallAssertNonnullStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTNONNULL), 8) + OPCODE(returnassertnonnull, ReturnAssertNonnullStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTNONNULL), 8) +// Expr & Notmmpl + // storage access opcodes + OPCODE(dread, AddrofNode, (OPCODENOTMMPL | OPCODEHASSSAUSE), 12) + OPCODE(iread, IreadNode, (OPCODENOTMMPL | OPCODEHASSSAUSE), 12) + // leaf opcodes + OPCODE(addrof, AddrofNode, OPCODENOTMMPL, 12) + OPCODE(iaddrof, IreadNode, OPCODENOTMMPL, 12) + OPCODE(sizeoftype, SizeoftypeNode, OPCODENOTMMPL, 8) + OPCODE(fieldsdist, FieldsDistNode, OPCODENOTMMPL, 8) + // N-ary expression opcodes + OPCODE(array, ArrayNode, (OPCODEISVARSIZE | OPCODENOTMMPL | OPCODEMAYTHROWEXCEPTION), 8) +// Stmt + // storage access opcodes + OPCODE(iassignoff, IassignoffNode, OPCODEISSTMT, 8) + OPCODE(iassignfpoff, IassignFPoffNode, OPCODEISSTMT, 8) + OPCODE(regassign, RegassignNode, (OPCODEISSTMT | OPCODEHASSSADEF), 8) + // flat control flow opcodes + OPCODE(goto, GotoNode, OPCODEISSTMT, 8) + OPCODE(brfalse, CondGotoNode, OPCODEISSTMT, 8) + OPCODE(brtrue, CondGotoNode, OPCODEISSTMT, 8) + OPCODE(return, NaryStmtNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE), 0) + OPCODE(rangegoto, RangeGotoNode, OPCODEISSTMT, 8) + // call opcodes + OPCODE(call, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(virtualcall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(superclasscall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(interfacecall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(customcall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(polymorphiccall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 12) + OPCODE(icall, IcallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(interfaceicall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(virtualicall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(intrinsiccall, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(intrinsiccallwithtype, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 12) + OPCODE(xintrinsiccall, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(callassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(virtualcallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(superclasscallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(interfacecallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(customcallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(polymorphiccallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(icallassigned, IcallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(interfaceicallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(virtualicallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(intrinsiccallassigned, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(intrinsiccallwithtypeassigned, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(xintrinsiccallassigned, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + // call with generic instantiation opcodes + OPCODE(callinstant, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 0) + OPCODE(callinstantassigned, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(virtualcallinstant, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 0) + OPCODE(virtualcallinstantassigned, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(superclasscallinstant, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 0) + OPCODE(superclasscallinstantassigned, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(interfacecallinstant, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 0) + OPCODE(interfacecallinstantassigned, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + // exception handling + OPCODE(jstry, JsTryNode, OPCODEISSTMT, 8) + OPCODE(try, TryNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(cpptry, TryNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + + OPCODE(throw, UnaryStmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE), 0) + + OPCODE(jscatch, StmtNode, OPCODEISSTMT, 4) + OPCODE(catch, CatchNode, OPCODEISSTMT, 8) + OPCODE(cppcatch, CppCatchNode, OPCODEISSTMT, 8) + + OPCODE(finally, StmtNode, OPCODEISSTMT, 6) + OPCODE(cleanuptry, StmtNode, OPCODEISSTMT, 6) + OPCODE(endtry, StmtNode, OPCODEISSTMT, 6) + OPCODE(safe, StmtNode, OPCODEISSTMT, 6) + OPCODE(endsafe, StmtNode, OPCODEISSTMT, 6) + OPCODE(unsafe, StmtNode, OPCODEISSTMT, 6) + OPCODE(endunsafe, StmtNode, OPCODEISSTMT, 6) + OPCODE(gosub, GotoNode, (OPCODEISSTMT | OPCODEHASSSAUSE), 8) + OPCODE(retsub, StmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE), 6) + // synchronizaion + OPCODE(syncenter, NaryStmtNode, (OPCODEISSTMT | OPCODEHASSSADEF | OPCODEHASSSAUSE), 0) + OPCODE(syncexit, NaryStmtNode, (OPCODEISSTMT | OPCODEHASSSADEF | OPCODEHASSSAUSE), 0) + OPCODE(decref, UnaryStmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE | OPCODENOTMMPL), 0) + OPCODE(incref, UnaryStmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE | OPCODENOTMMPL), 0) + OPCODE(decrefreset, UnaryStmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE | OPCODENOTMMPL), 0) + // barriers + OPCODE(membaracquire, StmtNode, OPCODEISSTMT, 6) + OPCODE(membarrelease, StmtNode, OPCODEISSTMT, 6) + OPCODE(membarstoreload, StmtNode, OPCODEISSTMT, 6) + OPCODE(membarstorestore, StmtNode, OPCODEISSTMT, 6) + // other opcodes + OPCODE(label, LabelNode, OPCODEISSTMT, 8) +// Expr + // storage access opcodes + OPCODE(ireadoff, IreadoffNode, 0, 8) + OPCODE(ireadfpoff, IreadFPoffNode, 0, 8) + OPCODE(regread, RegreadNode, OPCODEHASSSAUSE, 8) + // leaf opcodes + OPCODE(addroffunc, AddroffuncNode, 0, 8) + OPCODE(addroflabel, AddroflabelNode, 0, 8) + OPCODE(constval, ConstvalNode, 0, 8) + OPCODE(conststr, ConststrNode, OPCODENOTMMPL, 8) + OPCODE(conststr16, Conststr16Node, OPCODENOTMMPL, 8) + // type conversion expression opcodes + OPCODE(ceil, TypeCvtNode, OPCODEISTYPECVT, 8) + OPCODE(cvt, TypeCvtNode, OPCODEISTYPECVT, 8) + OPCODE(floor, TypeCvtNode, OPCODEISTYPECVT, 8) + OPCODE(retype, RetypeNode, OPCODEISTYPECVT, 8) + OPCODE(round, TypeCvtNode, OPCODEISTYPECVT, 8) + OPCODE(trunc, TypeCvtNode, OPCODEISTYPECVT, 8) + // unary expression opcodes + OPCODE(abs, UnaryNode, 0, 0) + OPCODE(bnot, UnaryNode, 0, 0) + OPCODE(lnot, UnaryNode, 0, 0) + OPCODE(neg, UnaryNode, 0, 0) + OPCODE(recip, UnaryNode, 0, 0) + OPCODE(sqrt, UnaryNode, 0, 0) + OPCODE(sext, ExtractbitsNode, 0, 8) + OPCODE(zext, ExtractbitsNode, 0, 8) + +#ifdef alloca +#undef alloca + OPCODE(alloca, UnaryNode, OPCODENOTPURE, 0) +#define alloca __builtin_alloca +#else + OPCODE(alloca, UnaryNode, OPCODENOTPURE, 0) +#endif + + OPCODE(malloc, UnaryNode, OPCODENOTPURE, 0) + OPCODE(gcmalloc, GCMallocNode, OPCODENOTPURE, 8) + OPCODE(gcpermalloc, GCMallocNode, OPCODENOTPURE, 8) + OPCODE(stackmalloc, GCMallocNode, OPCODENOTPURE, 8) + OPCODE(gcmallocjarray, JarrayMallocNode, OPCODENOTPURE, 12) + OPCODE(gcpermallocjarray, JarrayMallocNode, OPCODENOTPURE, 12) + OPCODE(stackmallocjarray, JarrayMallocNode, OPCODENOTPURE, 12) + OPCODE(resolveinterfacefunc, ResolveFuncNode, 0, 8) + OPCODE(resolvevirtualfunc, ResolveFuncNode, 0, 8) + // binary expression opcodes + OPCODE(add, BinaryNode, 0, 0) + OPCODE(sub, BinaryNode, 0, 0) + OPCODE(mul, BinaryNode, 0, 0) + OPCODE(div, BinaryNode, OPCODEMAYTHROWEXCEPTION, 0) + OPCODE(rem, BinaryNode, OPCODEMAYTHROWEXCEPTION, 0) + OPCODE(ashr, BinaryNode, 0, 0) + OPCODE(lshr, BinaryNode, 0, 0) + OPCODE(shl, BinaryNode, 0, 0) + OPCODE(ror, BinaryNode, 0, 0) + OPCODE(max, BinaryNode, 0, 0) + OPCODE(min, BinaryNode, 0, 0) + OPCODE(band, BinaryNode, 0, 0) + OPCODE(bior, BinaryNode, 0, 0) + OPCODE(bxor, BinaryNode, 0, 0) + OPCODE(CG_array_elem_add, BinaryNode, 0, 0) + OPCODE(eq, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(ge, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(gt, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(le, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(lt, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(ne, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(cmp, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(cmpl, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(cmpg, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(land, BinaryNode, 0, 0) + OPCODE(lior, BinaryNode, 0, 0) + OPCODE(cand, BinaryNode, OPCODENOTMMPL, 0) + OPCODE(cior, BinaryNode, OPCODENOTMMPL, 0) + // ternary expression opcodes + OPCODE(select, TernaryNode, 0, 0) + // N-ary expression opcodes + OPCODE(intrinsicop, IntrinsicopNode, OPCODEISVARSIZE, 8) + OPCODE(intrinsicopwithtype, IntrinsicopNode, OPCODEISVARSIZE, 12) + // Other expression opcodes + OPCODE(extractbits, ExtractbitsNode, 0, 8) + OPCODE(depositbits, DepositbitsNode, 0, 8) + // storage access + OPCODE(iassignpcoff, IassignPCoffNode, OPCODEISSTMT, 0) + OPCODE(ireadpcoff, IreadPCoffNode, 0, 0) + // barrier + OPCODE(checkpoint, StmtNode, OPCODEISSTMT, 0) + // leaf node + OPCODE(addroffpc, AddroffPCNode, 0, 0) + OPCODE(igoto, UnaryStmtNode, OPCODEISSTMT, 0) + OPCODE(asm, AsmNode, OPCODEISSTMT | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALLASSIGNED, 0) + OPCODE(dreadoff, dreadoffNode, OPCODEHASSSAUSE, 12) + OPCODE(addrofoff, addrofoffNode, 0, 12) + OPCODE(dassignoff, DassignoffNode, (OPCODEISSTMT | OPCODEHASSSADEF), 8) + OPCODE(iassignspoff, IassignFPoffNode, OPCODEISSTMT, 8) + OPCODE(blkassignoff, BlkassignoffNode, OPCODEISSTMT, 8) + OPCODE(icallproto, IcallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(icallprotoassigned, IcallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 8) diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/opcodes.h b/ecmascript/compiler/codegen/maple/maple_ir/include/opcodes.h new file mode 100644 index 0000000000000000000000000000000000000000..15404146dcf1a2cd6e37bd500e52a31b4a41174c --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/opcodes.h @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_OPCODES_H +#define MAPLE_IR_INCLUDE_OPCODES_H +#include "types_def.h" +#include "mpl_logging.h" + +namespace maple { +enum Opcode : uint8 { + OP_undef, +#define OPCODE(STR, YY, ZZ, SS) OP_##STR, +#include "opcodes.def" +#undef OPCODE + OP_last, +}; + +#define CASE_OP_ASSERT_NONNULL \ + case OP_assertnonnull: \ + case OP_assignassertnonnull: \ + case OP_callassertnonnull: \ + case OP_returnassertnonnull: + +#define CASE_OP_ASSERT_BOUNDARY \ + case OP_assertge: \ + case OP_assertlt: \ + case OP_calcassertge: \ + case OP_calcassertlt: \ + case OP_callassertle: \ + case OP_returnassertle: \ + case OP_assignassertle: + +inline constexpr bool IsDAssign(Opcode code) +{ + return (code == OP_dassign || code == OP_maydassign); +} + +inline constexpr bool IsCallAssigned(Opcode code) +{ + return (code == OP_callassigned || code == OP_virtualcallassigned || code == OP_virtualicallassigned || + code == OP_superclasscallassigned || code == OP_interfacecallassigned || + code == OP_interfaceicallassigned || code == OP_customcallassigned || code == OP_polymorphiccallassigned || + code == OP_icallassigned || code == OP_icallprotoassigned || code == OP_intrinsiccallassigned || + code == OP_xintrinsiccallassigned || code == OP_intrinsiccallwithtypeassigned); +} + +inline constexpr bool IsBranch(Opcode opcode) +{ + return (opcode == OP_goto || opcode == OP_brtrue || opcode == OP_brfalse || opcode == OP_switch || + opcode == OP_igoto); +} + +inline constexpr bool IsLogicalShift(Opcode opcode) +{ + return (opcode == OP_lshr || opcode == OP_shl); +} + +constexpr bool IsCommutative(Opcode opcode) +{ + switch (opcode) { + case OP_add: + case OP_mul: + case OP_max: + case OP_min: + case OP_band: + case OP_bior: + case OP_bxor: + case OP_eq: + case OP_ne: + case OP_land: + case OP_lior: + return true; + default: + return false; + } +} + +constexpr bool IsStmtMustRequire(Opcode opcode) +{ + switch (opcode) { + case OP_jstry: + case OP_throw: + case OP_try: + case OP_catch: + case OP_jscatch: + case OP_finally: + case OP_endtry: + case OP_cleanuptry: + case OP_gosub: + case OP_retsub: + case OP_return: + case OP_call: + case OP_virtualcall: + case OP_virtualicall: + case OP_superclasscall: + case OP_interfacecall: + case OP_interfaceicall: + case OP_customcall: + case OP_polymorphiccall: + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_interfaceicallassigned: + case OP_customcallassigned: + case OP_polymorphiccallassigned: + case OP_icall: + case OP_icallassigned: + case OP_icallproto: + case OP_icallprotoassigned: + case OP_intrinsiccall: + case OP_xintrinsiccall: + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: + case OP_intrinsiccallwithtype: + case OP_intrinsiccallwithtypeassigned: + case OP_asm: + case OP_syncenter: + case OP_syncexit: + case OP_membaracquire: + case OP_membarrelease: + case OP_membarstoreload: + case OP_membarstorestore: + CASE_OP_ASSERT_NONNULL + CASE_OP_ASSERT_BOUNDARY + case OP_free: + case OP_incref: + case OP_decref: + case OP_decrefreset: { + return true; + } + default: + return false; + } +} + +// the result of these op is actually u1(may be set as other type, but its return value can only be zero or one) +// different from kOpcodeInfo.IsCompare(op) : cmp/cmpg/cmpl have no reverse op, and may return -1/0/1 +constexpr bool IsCompareHasReverseOp(Opcode op) +{ + if (op == OP_eq || op == OP_ne || op == OP_ge || op == OP_gt || op == OP_le || op == OP_lt) { + return true; + } + return false; +} + +constexpr Opcode GetSwapCmpOp(Opcode op) +{ + switch (op) { + case OP_eq: + return OP_eq; + case OP_ne: + return OP_ne; + case OP_ge: + return OP_le; + case OP_gt: + return OP_lt; + case OP_le: + return OP_ge; + case OP_lt: + return OP_gt; + default: + CHECK_FATAL(false, "can't swap op"); + return op; + } +} + +constexpr Opcode GetReverseCmpOp(Opcode op) +{ + switch (op) { + case OP_eq: + return OP_ne; + case OP_ne: + return OP_eq; + case OP_ge: + return OP_lt; + case OP_gt: + return OP_le; + case OP_le: + return OP_gt; + case OP_lt: + return OP_ge; + default: + CHECK_FATAL(false, "opcode has no reverse op"); + return op; + } +} + +constexpr bool IsSupportedOpForCopyInPhasesLoopUnrollAndVRP(Opcode op) +{ + switch (op) { + case OP_igoto: + case OP_switch: + case OP_comment: + case OP_goto: + case OP_dassign: + case OP_regassign: + case OP_membarrelease: + case OP_brfalse: + case OP_brtrue: + case OP_maydassign: + case OP_iassign: + CASE_OP_ASSERT_NONNULL + CASE_OP_ASSERT_BOUNDARY + case OP_membaracquire: + case OP_call: + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_interfaceicallassigned: + case OP_intrinsiccall: + case OP_intrinsiccallassigned: + case OP_intrinsiccallwithtype: + case OP_membarstorestore: + case OP_membarstoreload: { + return true; + } + default: + return false; + } +} +} // namespace maple +#endif // MAPLE_IR_INCLUDE_OPCODES_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/option.h b/ecmascript/compiler/codegen/maple/maple_ir/include/option.h new file mode 100644 index 0000000000000000000000000000000000000000..e6f66174439efb358fee739357181732f632b4c8 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/option.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_OPTION_H +#define MAPLE_IR_INCLUDE_OPTION_H +#include +#include + +#include "mempool.h" +#include "mempool_allocator.h" +#include "parser_opt.h" +#include "types_def.h" + +namespace maple { +class Options { +public: + static Options &GetInstance(); + + bool ParseOptions(int argc, char **argv, std::string &fileName) const; + + bool SolveOptions(bool isDebug) const; + ~Options() = default; + + void DumpOptions() const; + const std::vector &GetSequence() const + { + return phaseSeq; + } + + std::string LastPhaseName() const + { + return phaseSeq.empty() ? "noopt" : phaseSeq[phaseSeq.size() - 1]; + } + + enum Level { kMpl2MplLevelZero = 0, kMpl2MplLevelOne = 1, kMpl2MplLevelTwo = 2 }; + enum DecoupleLevel { kNoDecouple = 0, kConservativeDecouple = 1, kAggressiveDecouple = 2, kDecoupleAndLazy = 3 }; + + static bool DumpPhase(const std::string &phase) + { + if (phase == "") { + return false; + } + return dumpPhase == "*" || dumpPhase == phase; + } + + static bool IsSkipPhase(const std::string &phaseName) + { + return skipPhase == phaseName; + } + + static bool DumpFunc() + { + return dumpFunc != "*" && dumpFunc != ""; + } + static bool IsBigEndian() + { + return bigEndian; + } + + static bool dumpBefore; + static bool dumpAfter; + static std::string dumpPhase; + static std::string skipPhase; + static std::string skipFrom; + static std::string skipAfter; + static std::string dumpFunc; + static bool quiet; + static bool regNativeFunc; + static bool regNativeDynamicOnly; + static bool nativeWrapper; + static bool inlineWithProfile; + static bool useInline; + static bool enableIPAClone; + static std::string noInlineFuncList; + static std::string importFileList; + static bool useCrossModuleInline; + static uint32 numOfCloneVersions; + static uint32 numOfImpExprLowBound; + static uint32 numOfImpExprHighBound; + static uint32 numOfCallSiteLowBound; + static uint32 numOfCallSiteUpBound; + static uint32 numOfConstpropValue; + static uint32 inlineSmallFunctionThreshold; + static uint32 inlineHotFunctionThreshold; + static uint32 inlineRecursiveFunctionThreshold; + static uint32 inlineDepth; + static uint32 inlineModuleGrowth; + static uint32 inlineColdFunctionThreshold; + static uint32 profileHotCount; + static uint32 profileColdCount; + static bool profileHotCountSeted; + static bool profileColdCountSeted; + static uint32 profileHotRate; + static uint32 profileColdRate; + static std::string staticBindingList; + static bool usePreg; + static bool mapleLinker; + static bool dumpMuidFile; + static bool emitVtableImpl; +#if MIR_JAVA + static bool skipVirtualMethod; +#endif + // Ready to be deleted. + static bool noRC; + static bool analyzeCtor; + static bool strictNaiveRC; + static bool gcOnly; + static bool bigEndian; + static bool rcOpt1; + static std::string classMetaProFile; + static std::string methodMetaProfile; + static std::string fieldMetaProFile; + static std::string reflectStringProFile; + static bool nativeOpt; + static bool optForSize; + static bool O2; + static bool noDot; + static std::string criticalNativeFile; + static std::string fastNativeFile; + static bool barrier; + static std::string nativeFuncPropertyFile; + static bool mapleLinkerTransformLocal; + static uint32 buildApp; + static bool partialAot; + static uint32 decoupleInit; + static std::string sourceMuid; + static bool decoupleSuper; + static bool deferredVisit; + static bool deferredVisit2; + static bool genVtabAndItabForDecouple; + static bool profileFunc; + static uint32 parserOpt; + static std::string dumpDevirtualList; + static std::string readDevirtualList; + static bool usePreloadedClass; + static std::string profile; + static bool profileGen; + static bool profileUse; + static std::string appPackageName; + static std::string proFileData; + static std::string proFileFuncData; + static std::string proFileClassData; + static bool profileStaticFields; + static bool genIRProfile; + static bool profileTest; + static std::string classLoaderInvocationList; + static bool dumpClassLoaderInvocation; + static unsigned int warningLevel; + static bool lazyBinding; + static bool hotFix; + static bool compactMeta; + static bool genPGOReport; + static bool verify; + static uint32 inlineCache; + static bool checkArrayStore; + static bool noComment; + static bool rmNoUseFunc; + static bool sideEffect; + static bool dumpIPA; + static bool wpaa; + static bool genLMBC; + +private: + void DecideMpl2MplRealLevel() const; + std::vector phaseSeq; +}; +} // namespace maple +#ifndef TRACE_PHASE +#define TRACE_PHASE (Options::dumpPhase.compare(PhaseName()) == 0) +#endif + +#ifndef TRACE_MAPLE_PHASE +#define TRACE_MAPLE_PHASE (Options::dumpPhase.compare(PhaseName()) == 0) +#endif +#endif // MAPLE_IR_INCLUDE_OPTION_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/parser_opt.h b/ecmascript/compiler/codegen/maple/maple_ir/include/parser_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..6aa32bd5a7ab6e2ae50520cde927bdacce9fe1a3 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/parser_opt.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_PARSER_OPT_H +#define MAPLE_IR_INCLUDE_PARSER_OPT_H +#include "types_def.h" + +namespace maple { +// option bits passed into ParseMIR +enum ParserOptions : uint8 { + kInvalidOption = 0x0, + kWithDbgInfo = 0x1, // collect dbginfo + kKeepFirst = 0x2, // ignore second type def, not emit error + kWithProfileInfo = 0x4, + kParseOptFunc = 0x08, // parse optimized function mpl file + kParseInlineFuncBody = 0x10 // parse to-be-inlined function bodies +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_PARSER_OPT_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/prim_types.def b/ecmascript/compiler/codegen/maple/maple_ir/include/prim_types.def new file mode 100644 index 0000000000000000000000000000000000000000..cd2fc6aa570b87d6ab1ae76266e0ae032095140b --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/prim_types.def @@ -0,0 +1,491 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifdef LOAD_ALGO_PRIMARY_TYPE +#undef LOAD_ALGO_PRIMARY_TYPE +// NOTE: this ordering needs to be in sync with ptypesizetable[] in maplevm/src/vmfunc.cpp + PRIMTYPE(void) + PRIMTYPE(i8) + PRIMTYPE(i16) + PRIMTYPE(i32) + PRIMTYPE(i64) + PRIMTYPE(i128) + PRIMTYPE(u8) + PRIMTYPE(u16) + PRIMTYPE(u32) + PRIMTYPE(u64) + PRIMTYPE(u128) + PRIMTYPE(u1) + PRIMTYPE(ptr) + PRIMTYPE(ref) + PRIMTYPE(a32) + PRIMTYPE(a64) + PRIMTYPE(f32) + PRIMTYPE(f64) + PRIMTYPE(f128) + PRIMTYPE(c64) + PRIMTYPE(c128) +#ifdef DYNAMICLANG + PRIMTYPE(simplestr) + PRIMTYPE(simpleobj) + PRIMTYPE(dynany) + PRIMTYPE(dynundef) + PRIMTYPE(dynnull) + PRIMTYPE(dynbool) + PRIMTYPE(dyni32) + PRIMTYPE(dynstr) + PRIMTYPE(dynobj) + PRIMTYPE(dynf64) + PRIMTYPE(dynf32) + PRIMTYPE(dynnone) +#endif + PRIMTYPE(constStr) + PRIMTYPE(gen) + PRIMTYPE(agg) + PRIMTYPE(v2i64) + PRIMTYPE(v4i32) + PRIMTYPE(v8i16) + PRIMTYPE(v16i8) + PRIMTYPE(v2u64) + PRIMTYPE(v4u32) + PRIMTYPE(v8u16) + PRIMTYPE(v16u8) + PRIMTYPE(v2f64) + PRIMTYPE(v4f32) + PRIMTYPE(v2i32) + PRIMTYPE(v4i16) + PRIMTYPE(v8i8) + PRIMTYPE(v2u32) + PRIMTYPE(v4u16) + PRIMTYPE(v8u8) + PRIMTYPE(v2f32) + PRIMTYPE(reservedpty1) + PRIMTYPE(reservedpty2) + PRIMTYPE(reservedpty3) + PRIMTYPE(reservedpty4) + PRIMTYPE(reservedpty5) + PRIMTYPE(reservedpty6) + PRIMTYPE(reservedpty7) + PRIMTYPE(reservedpty8) + PRIMTYPE(reservedpty9) + PRIMTYPE(reservedpty10) + PRIMTYPE(unknown) +#endif // ~LOAD_ALGO_PRIMARY_TYPE + + +#ifdef LOAD_PRIMARY_TYPE_PROPERTY +#undef LOAD_PRIMARY_TYPE_PROPERTY + +static const PrimitiveTypeProperty PTProperty_begin = { + /*type=*/PTY_begin, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_void = { + /*type=*/PTY_void, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i8 = { + /*type=*/PTY_i8, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i16 = { + /*type=*/PTY_i16, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i32 = { + /*type=*/PTY_i32, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i64 = { + /*type=*/PTY_i64, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i128 = { + /*type=*/PTY_i128, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_u8 = { + /*type=*/PTY_u8, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_u16 = { + /*type=*/PTY_u16, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +/* isAddress and isPointer are overloaded in getter method for PTProperty_u32 */ +static const PrimitiveTypeProperty PTProperty_u32 = { + /*type=*/PTY_u32, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +/* isAddress and isPointer are overloaded in getter method for PTProperty_64 */ +static const PrimitiveTypeProperty PTProperty_u64 = { + /*type=*/PTY_u64, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_u128 = { + /*type=*/PTY_u128, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_u1 = { + /*type=*/PTY_u1, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_ptr = { + /*type=*/PTY_ptr, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_ref = { + /*type=*/PTY_ref, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_a32 = { + /*type=*/PTY_a32, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_a64 = { + /*type=*/PTY_a64, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_f32 = { + /*type=*/PTY_f32, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_f64 = { + /*type=*/PTY_f64, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_f128 = { + /*type=*/PTY_f128, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_c64 = { + /*type=*/PTY_c64, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_c128 = { + /*type=*/PTY_c128, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +#ifdef DYNAMICLANG +static const PrimitiveTypeProperty PTProperty_simplestr = { + /*type=*/PTY_simplestr, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/true, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_simpleobj = { + /*type=*/PTY_simpleobj, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/true, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynany = { + /*type=*/PTY_dynany, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/true, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynundef = { + /*type=*/PTY_dynundef, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynnull = { + /*type=*/PTY_dynnull, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynbool = { + /*type=*/PTY_dynbool, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dyni32 = { + /*type=*/PTY_dyni32, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynstr = { + /*type=*/PTY_dynstr, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynobj = { + /*type=*/PTY_dynobj, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynf64 = { + /*type=*/PTY_dynf64, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynf32 = { + /*type=*/PTY_dynf32, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynnone = { + /*type=*/PTY_dynnone, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/true, + /*isVector*/false +}; +#endif // ~DYNAMICLANG + +static const PrimitiveTypeProperty PTProperty_constStr = { + /*type=*/PTY_constStr, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_gen = { + /*type=*/PTY_gen, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_agg = { + /*type=*/PTY_agg, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_v2i64 = { + /*type=*/PTY_v2i64, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4i32 = { + /*type=*/PTY_v4i32, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v8i16 = { + /*type=*/PTY_v8i16, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v16i8 = { + /*type=*/PTY_v16i8, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2u64 = { + /*type=*/PTY_v2u64, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4u32 = { + /*type=*/PTY_v4u32, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v8u16 = { + /*type=*/PTY_v8u16, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v16u8 = { + /*type=*/PTY_v16u8, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2f64 = { + /*type=*/PTY_v2f64, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4f32 = { + /*type=*/PTY_v4f32, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2i32 = { + /*type=*/PTY_v2i32, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4i16 = { + /*type=*/PTY_v4i16, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v8i8 = { + /*type=*/PTY_v8i8, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2u32 = { + /*type=*/PTY_v2u32, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4u16 = { + /*type=*/PTY_v4u16, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v8u8 = { + /*type=*/PTY_v8u8, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2f32 = { + /*type=*/PTY_v2f32, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty1 = { + /*type=*/PTY_reservedpty1, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty2 = { + /*type=*/PTY_reservedpty2, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty3 = { + /*type=*/PTY_reservedpty3, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty4 = { + /*type=*/PTY_reservedpty4, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty5 = { + /*type=*/PTY_reservedpty5, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty6 = { + /*type=*/PTY_reservedpty6, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty7 = { + /*type=*/PTY_reservedpty7, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty8 = { + /*type=*/PTY_reservedpty8, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty9 = { + /*type=*/PTY_reservedpty9, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty10 = { + /*type=*/PTY_reservedpty10, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_unknown = { + /*type=*/PTY_unknown, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_end = { + /*type=*/PTY_end, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +#endif // ~LOAD_PRIMARY_TYPE_PROPERTY diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/prim_types.h b/ecmascript/compiler/codegen/maple/maple_ir/include/prim_types.h new file mode 100644 index 0000000000000000000000000000000000000000..64489f0d176d139b45fd279897af949df1250de6 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/prim_types.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_PRIM_TYPES_H +#define MAPLE_IR_INCLUDE_PRIM_TYPES_H +#include "types_def.h" +#include "cfg_primitive_types.h" + +namespace maple { +class PrimitiveType { +public: + // we need implicit conversion from PrimType to PrimitiveType, so there is no explicit keyword here. + PrimitiveType(PrimType type) : property(GetPrimitiveTypeProperty(type)) {} + ~PrimitiveType() = default; + + PrimType GetType() const + { + return property.type; + } + + bool IsInteger() const + { + return property.IsInteger(); + } + bool IsUnsigned() const + { + return property.IsUnsigned(); + } + bool IsAddress() const + { + return property.IsAddress(); + } + bool IsFloat() const + { + return property.IsFloat(); + } + bool IsPointer() const + { + return property.IsPointer(); + } + bool IsDynamic() const + { + return property.IsDynamic(); + } + bool IsSimple() const + { + return property.IsSimple(); + } + bool IsDynamicAny() const + { + return property.IsDynamicAny(); + } + bool IsDynamicNone() const + { + return property.IsDynamicNone(); + } + bool IsVector() const + { + return property.IsVector(); + } + +private: + const PrimitiveTypeProperty &property; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_PRIM_TYPES_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/printing.h b/ecmascript/compiler/codegen/maple/maple_ir/include/printing.h new file mode 100644 index 0000000000000000000000000000000000000000..d83e9cb2e0eb5fc42c506abf3c3059000b9375c9 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/printing.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_PRINTING_H +#define MAPLE_IR_INCLUDE_PRINTING_H +#include +#include "types_def.h" + +namespace maple { +void PrintIndentation(int32 indent); +void PrintString(const std::string &str); +} // namespace maple +#endif // MAPLE_IR_INCLUDE_PRINTING_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/simplifyintrinsics.def b/ecmascript/compiler/codegen/maple/maple_ir/include/simplifyintrinsics.def new file mode 100644 index 0000000000000000000000000000000000000000..e570abe3eb8897d17ea45b041137d6a787418169 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/simplifyintrinsics.def @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* INTRINSIC(STR, NAME) */ +DEF_MIR_INTRINSIC(GET_AND_ADDI, "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndAddInt_7C_28Ljava_2Flang_2FObject_3BJI_29I",\ + INTRNISJAVA | INTRNISSPECIAL, kArgTyI32, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI32) +DEF_MIR_INTRINSIC(GET_AND_ADDL, "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndAddLong_7C_28Ljava_2Flang_2FObject_3BJJ_29J",\ + INTRNISJAVA | INTRNISSPECIAL, kArgTyI64, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI64) +DEF_MIR_INTRINSIC(GET_AND_SETI, "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndSetInt_7C_28Ljava_2Flang_2FObject_3BJI_29I",\ + INTRNISJAVA | INTRNISSPECIAL, kArgTyI32, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI32) +DEF_MIR_INTRINSIC(GET_AND_SETL, "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndSetLong_7C_28Ljava_2Flang_2FObject_3BJJ_29J",\ + INTRNISJAVA | INTRNISSPECIAL, kArgTyI64, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI64) +DEF_MIR_INTRINSIC(COMP_AND_SWAPI, "Lsun_2Fmisc_2FUnsafe_3B_7CcompareAndSwapInt_7C_28Ljava_2Flang_2FObject_3BJII_29Z",\ + INTRNISJAVA | INTRNISSPECIAL, kArgTyU1, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI32, kArgTyI32) +DEF_MIR_INTRINSIC(COMP_AND_SWAPL, "Lsun_2Fmisc_2FUnsafe_3B_7CcompareAndSwapLong_7C_28Ljava_2Flang_2FObject_3BJJJ_29Z",\ + INTRNISJAVA | INTRNISSPECIAL, kArgTyU1, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI64, kArgTyI64) +DEF_MIR_INTRINSIC(STR_INDEXOF, "Ljava_2Flang_2FString_3B_7CindexOf_7C_28Ljava_2Flang_2FString_3B_29I",\ + INTRNISJAVA | INTRNISSPECIAL, kArgTyI32, kArgTyRef, kArgTyRef) + diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/src_position.h b/ecmascript/compiler/codegen/maple/maple_ir/include/src_position.h new file mode 100644 index 0000000000000000000000000000000000000000..c85bc9d3bcbaba87e033648fae69e2da56b7b90d --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/src_position.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_SRC_POSITION_H +#define MAPLE_IR_INCLUDE_SRC_POSITION_H +#include "mpl_logging.h" + +namespace maple { +// to store source position information +class SrcPosition { +public: + SrcPosition() : lineNum(0), mplLineNum(0) + { + u.word0 = 0; + } + + virtual ~SrcPosition() = default; + + uint32 RawData() const + { + return u.word0; + } + + uint32 FileNum() const + { + return u.fileColumn.fileNum; + } + + uint32 Column() const + { + return u.fileColumn.column; + } + + uint32 LineNum() const + { + return lineNum; + } + + uint32 MplLineNum() const + { + return mplLineNum; + } + + void SetFileNum(uint16 n) + { + u.fileColumn.fileNum = n; + } + + void SetColumn(uint16 n) + { + u.fileColumn.column = n; + } + + void SetLineNum(uint32 n) + { + lineNum = n; + } + + void SetRawData(uint32 n) + { + u.word0 = n; + } + + void SetMplLineNum(uint32 n) + { + mplLineNum = n; + } + + void CondSetLineNum(uint32 n) + { + lineNum = lineNum ? lineNum : n; + } + + void CondSetFileNum(uint16 n) + { + uint16 i = u.fileColumn.fileNum; + u.fileColumn.fileNum = i ? i : n; + } + + // as you read: this->IsBfOrEq(pos) + bool IsBfOrEq(SrcPosition pos) const + { + return (pos.FileNum() == FileNum() && + ((LineNum() < pos.LineNum()) || ((LineNum() == pos.LineNum()) && (Column() <= pos.Column())))); + } + + bool IsSrcPostionEq(SrcPosition pos) const + { + return FileNum() == pos.FileNum() && LineNum() == pos.LineNum() && Column() == pos.Column(); + } + + void DumpLoc(uint32 &lastLineNum, uint16 &lastColumnNum) const + { + if (FileNum() != 0 && LineNum() != 0) { + if (Column() != 0 && (LineNum() != lastLineNum || Column() != lastColumnNum)) { + DumpLocWithCol(); + lastLineNum = LineNum(); + lastColumnNum = Column(); + } else if (LineNum() != lastLineNum) { + DumpLocWithLine(); + lastLineNum = LineNum(); + } + } + } + + void DumpLocWithLine() const + { + LogInfo::MapleLogger() << "LOC " << FileNum() << " " << LineNum() << '\n'; + } + + void DumpLocWithCol() const + { + LogInfo::MapleLogger() << "LOC " << FileNum() << " " << LineNum() << " " << Column() << '\n'; + } + + std::string DumpLocWithColToString() const + { + std::stringstream ss; + ss << "LOC " << FileNum() << " " << LineNum() << " " << Column(); + return ss.str(); + } + +private: + union { + struct { + uint16 fileNum; + uint16 column : 12; + uint16 stmtBegin : 1; + uint16 bbBegin : 1; + uint16 unused : 2; + } fileColumn; + uint32 word0; + } u; + uint32 lineNum; // line number of original src file, like foo.java + uint32 mplLineNum; // line number of mpl file +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_SRC_POSITION_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/tokens.h b/ecmascript/compiler/codegen/maple/maple_ir/include/tokens.h new file mode 100644 index 0000000000000000000000000000000000000000..c843e9d0f982f35cc158c966cf4d9dfcb949744b --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/tokens.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_TOKENS_H +#define MAPLE_IR_INCLUDE_TOKENS_H + +namespace maple { +enum TokenKind { + TK_invalid, +// keywords from this file +#define KEYWORD(STR) TK_##STR, +#include "keywords.def" +#undef KEYWORD + // non-keywords starting here + // constants + TK_intconst, + TK_floatconst, + TK_doubleconst, + // local name + TK_lname, + // global name + TK_gname, + // function name + TK_fname, + // pseudo register + TK_preg, + // special register + TK_specialreg, + // parent field + TK_prntfield, + // type parameter name + TK_typeparam, + // misc. + TK_newline, + TK_lparen, // ( + TK_rparen, // ) + TK_lbrace, // { + TK_rbrace, // } + TK_lbrack, // [ + TK_rbrack, // ] + TK_langle, // < + TK_rangle, // > + TK_eqsign, // = + TK_coma, // , + TK_dotdotdot, // ... + TK_colon, // : + TK_asterisk, // * + TK_string, // a literal string enclosed between " + TK_eof +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_TOKENS_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/types_def.h b/ecmascript/compiler/codegen/maple/maple_ir/include/types_def.h new file mode 100644 index 0000000000000000000000000000000000000000..7658944350687e95ec388153382bb3a432b93e17 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/types_def.h @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLE_IR_INCLUDE_TYPES_DEF_H +#define MAPLE_IR_INCLUDE_TYPES_DEF_H + +// NOTE: Since we already committed to -std=c++0x, we should eventually use the +// standard definitions in the and headers rather than +// reinventing our own primitive types. +#include +#include +#include +#include "mpl_number.h" + +namespace maple { +// Let's keep the following definitions so that existing code will continue to work. +using int8 = std::int8_t; +using int16 = std::int16_t; +using int32 = std::int32_t; +using int64 = std::int64_t; +using uint8 = std::uint8_t; +using uint16 = std::uint16_t; +using uint32 = std::uint32_t; +using uint64 = std::uint64_t; +class StIdx { // scope nesting level + symbol table index +public: + union un { + struct { + uint32 idx : 24; + uint8 scope; // scope level, with the global scope is at level 1 + } scopeIdx; + + uint32 fullIdx; + }; + + StIdx() + { + u.fullIdx = 0; + } + + StIdx(uint32 level, uint32 i) + { + u.scopeIdx.scope = level; + u.scopeIdx.idx = i; + } + + StIdx(uint32 fidx) + { + u.fullIdx = fidx; + } + + ~StIdx() = default; + + uint32 Idx() const + { + return u.scopeIdx.idx; + } + + void SetIdx(uint32 idx) + { + u.scopeIdx.idx = idx; + } + + uint32 Scope() const + { + return u.scopeIdx.scope; + } + + void SetScope(uint32 scpe) + { + u.scopeIdx.scope = static_cast(scpe); + } + + uint32 FullIdx() const + { + return u.fullIdx; + } + + void SetFullIdx(uint32 idx) + { + u.fullIdx = idx; + } + + bool Islocal() const + { + return u.scopeIdx.scope > 1; + } + + bool IsGlobal() const + { + return u.scopeIdx.scope == 1; + } + + bool operator==(const StIdx &x) const + { + return u.fullIdx == x.u.fullIdx; + } + + bool operator!=(const StIdx &x) const + { + return !(*this == x); + } + + bool operator<(const StIdx &x) const + { + return u.fullIdx < x.u.fullIdx; + } + +private: + un u; +}; + +using LabelIdx = uint32; +using phyRegIdx = uint64; +using OfstRegIdx = uint64; +using LabelIDOrder = uint32; +using PUIdx = uint32; +using PregIdx = int32; +using ExprIdx = int32; +using FieldID = int32; + +class TypeTag; +using TyIdx = utils::Index; // global type table index + +class GStrTag; +using GStrIdx = utils::Index; // global string table index + +class UStrTag; +using UStrIdx = utils::Index; // user string table index (from the conststr opcode) + +class U16StrTag; +using U16StrIdx = utils::Index; // user string table index (from the conststr opcode) + +const TyIdx kInitTyIdx = TyIdx(0); +const TyIdx kNoneTyIdx = TyIdx(UINT32_MAX); + +enum SSALevel : uint8 { + kSSAInvalid = 0x00, + kSSATopLevel = 0x01, // ssa only for local top-level is valid + kSSAAddrTaken = 0x02, // ssa only for addr-taken is valid + kSSAMemory = kSSATopLevel | kSSAAddrTaken, // ssa for both top-level and addr-taken is valid + kSSAHSSA = 0x04, // hssa is valid +}; + +constexpr uint8 kOperandNumUnary = 1; +constexpr uint8 kOperandNumBinary = 2; +constexpr uint8 kOperandNumTernary = 3; +} // namespace maple +namespace std { +template <> // function-template-specialization +class hash { +public: + size_t operator()(const maple::StIdx &x) const + { + std::size_t seed = 0; + hash_combine(seed, x.Scope()); + hash_combine(seed, x.Idx()); + return seed; + } +}; +} // namespace std +#endif // MAPLE_IR_INCLUDE_TYPES_DEF_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/unary_op.def b/ecmascript/compiler/codegen/maple/maple_ir/include/unary_op.def new file mode 100644 index 0000000000000000000000000000000000000000..48b09bf6e517e8d10b966d0f4a2e97a2472c8faa --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/unary_op.def @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +UNARYOP(abs) +UNARYOP(bnot) +UNARYOP(lnot) +UNARYOP(neg) +UNARYOP(recip) +UNARYOP(sqrt) +UNARYOP(sext) +UNARYOP(zext) +UNARYOP(extractbits) +UNARYOP(alloca) +UNARYOP(malloc) +UNARYOP(gcmallocjarray) +UNARYOP(gcpermallocjarray) diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/verification.h b/ecmascript/compiler/codegen/maple/maple_ir/include/verification.h new file mode 100644 index 0000000000000000000000000000000000000000..89e596b9d55fbe36deaaedcd8366fa9198b2aed1 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/verification.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLEIR_VERIFICATION_PHASE_H +#define MAPLEIR_VERIFICATION_PHASE_H +#include "class_hierarchy.h" +#include "verify_pragma_info.h" + +namespace maple { +using ClassVerifyPragmas = MapleUnorderedMap>; + +class VerifyResult { +public: + VerifyResult(const MIRModule &module, const KlassHierarchy &klassHierarchy, MemPool &memPool) + : module(module), + klassHierarchy(klassHierarchy), + allocator(&memPool), + classesCorrectness(allocator.Adapter()), + classesPragma(allocator.Adapter()) + { + } + + ~VerifyResult() = default; + + const KlassHierarchy &GetKlassHierarchy() const + { + return klassHierarchy; + } + + const MIRModule &GetMIRModule() const + { + return module; + } + + const MIRFunction *GetCurrentFunction() const + { + return module.GetFunctionList().front(); + } + + const std::string &GetCurrentClassName() const + { + return GetCurrentFunction()->GetClassType()->GetName(); + } + + const ClassVerifyPragmas &GetDeferredClassesPragma() const + { + return classesPragma; + } + + void AddPragmaVerifyError(const std::string &className, std::string errMsg); + void AddPragmaAssignableCheck(const std::string &className, std::string fromType, std::string toType); + void AddPragmaExtendFinalCheck(const std::string &className); + void AddPragmaOverrideFinalCheck(const std::string &className); + + const MapleUnorderedMap &GetResultMap() const + { + return classesCorrectness; + } + void SetClassCorrectness(const std::string &className, bool result) + { + classesCorrectness[className] = result; + } + + bool HasErrorNotDeferred() const + { + for (auto &classResult : classesCorrectness) { + if (!classResult.second) { + if (classesPragma.find(classResult.first) == classesPragma.end()) { + // Verify result is not OK, but has no deferred check or verify error in runtime + return true; + } + } + } + return false; + } + +private: + bool HasVerifyError(const std::vector &pragmaInfoPtrVec) const; + bool HasSamePragmaInfo(const std::vector &pragmaInfoPtrVec, + const VerifyPragmaInfo &verifyPragmaInfo) const; + + const MIRModule &module; + const KlassHierarchy &klassHierarchy; + MapleAllocator allocator; + // classesCorrectness, correctness is true only if the class is verified OK + MapleUnorderedMap classesCorrectness; + // classesPragma + ClassVerifyPragmas classesPragma; +}; + +class VerificationPhaseResult : public AnalysisResult { +public: + VerificationPhaseResult(MemPool &mp, const VerifyResult &verifyResult) + : AnalysisResult(&mp), verifyResult(verifyResult) + { + } + ~VerificationPhaseResult() = default; + + const ClassVerifyPragmas &GetDeferredClassesPragma() const + { + return verifyResult.GetDeferredClassesPragma(); + } + +private: + const VerifyResult &verifyResult; +}; + +#ifdef NOT_USED +class DoVerification : public ModulePhase { +public: + explicit DoVerification(ModulePhaseID id) : ModulePhase(id) {} + + AnalysisResult *Run(MIRModule *module, ModuleResultMgr *mgr) override; + std::string PhaseName() const override + { + return "verification"; + } + + ~DoVerification() = default; + +private: + void VerifyModule(MIRModule &module, VerifyResult &result) const; + void DeferredCheckFinalClassAndMethod(VerifyResult &result) const; + bool IsLazyBindingOrDecouple(const KlassHierarchy &klassHierarchy) const; + bool NeedRuntimeFinalCheck(const KlassHierarchy &klassHierarchy, const std::string &className) const; + void CheckExtendFinalClass(VerifyResult &result) const; +}; +#endif +} // namespace maple +#endif // MAPLEIR_VERIFICATION_PHASE_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/verify_annotation.h b/ecmascript/compiler/codegen/maple/maple_ir/include/verify_annotation.h new file mode 100644 index 0000000000000000000000000000000000000000..a6022129d6d3fbd9d5934d692ddd89bc98eaeb81 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/verify_annotation.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLEIR_VERIFY_ANNOTATION_H +#define MAPLEIR_VERIFY_ANNOTATION_H +#include "mir_module.h" +#include "mir_type.h" +#include "verify_pragma_info.h" + +namespace maple { +void AddVerfAnnoThrowVerifyError(MIRModule &md, const ThrowVerifyErrorPragma &info, MIRStructType &clsType); +void AddVerfAnnoAssignableCheck(MIRModule &md, std::vector &info, + MIRStructType &clsType); +void AddVerfAnnoExtendFinalCheck(MIRModule &md, MIRStructType &clsType); +void AddVerfAnnoOverrideFinalCheck(MIRModule &md, MIRStructType &clsType); +} // namespace maple +#endif // MAPLEALL_VERIFY_ANNOTATION_H \ No newline at end of file diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/verify_mark.h b/ecmascript/compiler/codegen/maple/maple_ir/include/verify_mark.h new file mode 100644 index 0000000000000000000000000000000000000000..806dccfb65060e9e9a7791868e799b1689358bc8 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/verify_mark.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLEALL_VERIFY_MARK_H +#define MAPLEALL_VERIFY_MARK_H +#include "class_hierarchy.h" +#include "verify_pragma_info.h" + +namespace maple { +#ifdef NOT_USED +class DoVerifyMark : public ModulePhase { +public: + explicit DoVerifyMark(ModulePhaseID id) : ModulePhase(id) {} + + AnalysisResult *Run(MIRModule *module, ModuleResultMgr *mgr) override; + + std::string PhaseName() const override + { + return "verifymark"; + } + + ~DoVerifyMark() override = default; + +private: + void AddAnnotations(MIRModule &module, const Klass &klass, + const std::vector &pragmaInfoVec); +}; +#endif +} // namespace maple +#endif // MAPLEALL_VERIFY_MARK_H \ No newline at end of file diff --git a/ecmascript/compiler/codegen/maple/maple_ir/include/verify_pragma_info.h b/ecmascript/compiler/codegen/maple/maple_ir/include/verify_pragma_info.h new file mode 100644 index 0000000000000000000000000000000000000000..7810cc4f337b8af99e09150295c17b51905ad1c1 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/include/verify_pragma_info.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MAPLEIR_VERIFY_PRAGMA_INFO_H +#define MAPLEIR_VERIFY_PRAGMA_INFO_H +#include +#include + +namespace maple { +enum PragmaInfoType { kThrowVerifyError, kAssignableCheck, kExtendFinalCheck, kOverrideFinalCheck }; + +class VerifyPragmaInfo { +public: + VerifyPragmaInfo() = default; + ; + virtual ~VerifyPragmaInfo() = default; + + virtual PragmaInfoType GetPragmaType() const = 0; + bool IsEqualTo(const VerifyPragmaInfo &pragmaInfo) const + { + return GetPragmaType() == pragmaInfo.GetPragmaType(); + } + bool IsVerifyError() const + { + return GetPragmaType() == kThrowVerifyError; + } + bool IsAssignableCheck() const + { + return GetPragmaType() == kAssignableCheck; + } + bool IsExtendFinalCheck() const + { + return GetPragmaType() == kExtendFinalCheck; + } + bool IsOverrideFinalCheck() const + { + return GetPragmaType() == kOverrideFinalCheck; + } +}; + +class ThrowVerifyErrorPragma : public VerifyPragmaInfo { +public: + explicit ThrowVerifyErrorPragma(std::string errorMessage) + : VerifyPragmaInfo(), errorMessage(std::move(errorMessage)) + { + } + ~ThrowVerifyErrorPragma() = default; + + PragmaInfoType GetPragmaType() const override + { + return kThrowVerifyError; + } + + const std::string &GetMessage() const + { + return errorMessage; + } + +private: + std::string errorMessage; +}; + +class AssignableCheckPragma : public VerifyPragmaInfo { +public: + AssignableCheckPragma(std::string fromType, std::string toType) + : VerifyPragmaInfo(), fromType(std::move(fromType)), toType(std::move(toType)) + { + } + ~AssignableCheckPragma() = default; + + PragmaInfoType GetPragmaType() const override + { + return kAssignableCheck; + } + + bool IsEqualTo(const AssignableCheckPragma &pragma) const + { + return fromType == pragma.GetFromType() && toType == pragma.GetToType(); + } + + const std::string &GetFromType() const + { + return fromType; + } + + const std::string &GetToType() const + { + return toType; + } + +private: + std::string fromType; + std::string toType; +}; + +class ExtendFinalCheckPragma : public VerifyPragmaInfo { +public: + ExtendFinalCheckPragma() : VerifyPragmaInfo() {} + ~ExtendFinalCheckPragma() = default; + + PragmaInfoType GetPragmaType() const override + { + return kExtendFinalCheck; + } +}; + +class OverrideFinalCheckPragma : public VerifyPragmaInfo { +public: + OverrideFinalCheckPragma() : VerifyPragmaInfo() {} + ~OverrideFinalCheckPragma() = default; + + PragmaInfoType GetPragmaType() const override + { + return kOverrideFinalCheck; + } +}; +} // namespace maple +#endif // MAPLEIR_VERIFY_PRAGMA_INFO_H diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/bin_func_export.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/bin_func_export.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7ff851a44f5346f0a2556a0c31480aa2e31e10dc --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/bin_func_export.cpp @@ -0,0 +1,756 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mir_function.h" +#include "opcode_info.h" +#include "mir_pragma.h" +#include "mir_builder.h" +#include "bin_mplt.h" +#include +#include + +using namespace std; +namespace maple { +void BinaryMplExport::OutputInfoVector(const MIRInfoVector &infoVector, const MapleVector &infoVectorIsString) +{ + if (!mod.IsWithDbgInfo()) { + Write(0); + return; + } + WriteNum(infoVector.size()); + for (uint32 i = 0; i < infoVector.size(); i++) { + OutputStr(infoVector[i].first); + WriteNum(infoVectorIsString[i] ? 1 : 0); + if (!infoVectorIsString[i]) { + WriteNum(infoVector[i].second); + } else { + OutputStr(GStrIdx(infoVector[i].second)); + } + } +} + +void BinaryMplExport::OutputFuncIdInfo(MIRFunction *func) +{ + WriteNum(kBinFuncIdInfoStart); + WriteNum(func->GetPuidxOrigin()); // the funcid + OutputInfoVector(func->GetInfoVector(), func->InfoIsString()); + if (mod.GetFlavor() == kFlavorLmbc) { + WriteNum(func->GetFrameSize()); + } +} + +void BinaryMplExport::OutputBaseNode(const BaseNode *b) +{ + Write(static_cast(b->GetOpCode())); + Write(static_cast(b->GetPrimType())); +} + +void BinaryMplExport::OutputLocalSymbol(MIRSymbol *sym) +{ + std::unordered_map::iterator it = localSymMark.find(sym); + if (it != localSymMark.end()) { + WriteNum(-(it->second)); + return; + } + + WriteNum(kBinSymbol); + OutputStr(sym->GetNameStrIdx()); + WriteNum(sym->GetSKind()); + WriteNum(sym->GetStorageClass()); + size_t mark = localSymMark.size(); + localSymMark[sym] = mark; + OutputTypeAttrs(sym->GetAttrs()); + WriteNum(static_cast(sym->GetIsTmp())); + if (sym->GetSKind() == kStVar || sym->GetSKind() == kStFunc) { + OutputSrcPos(sym->GetSrcPosition()); + } + OutputType(sym->GetTyIdx()); + if (sym->GetSKind() == kStPreg) { + OutputPreg(sym->GetPreg()); + } else if (sym->GetSKind() == kStConst || sym->GetSKind() == kStVar) { + OutputConst(sym->GetKonst()); + } else if (sym->GetSKind() == kStFunc) { + OutputFuncViaSym(sym->GetFunction()->GetPuidx()); + } else { + CHECK_FATAL(false, "should not used"); + } +} + +void BinaryMplExport::OutputPreg(MIRPreg *preg) +{ + if (preg->GetPregNo() < 0) { + WriteNum(kBinSpecialReg); + Write(static_cast(-preg->GetPregNo())); + return; + } + std::unordered_map::iterator it = localPregMark.find(preg); + if (it != localPregMark.end()) { + WriteNum(-(it->second)); + return; + } + + WriteNum(kBinPreg); + Write(static_cast(preg->GetPrimType())); + size_t mark = localPregMark.size(); + localPregMark[preg] = mark; +} + +void BinaryMplExport::OutputLabel(LabelIdx lidx) +{ + std::unordered_map::iterator it = labelMark.find(lidx); + if (it != labelMark.end()) { + WriteNum(-(it->second)); + return; + } + + WriteNum(kBinLabel); + size_t mark = labelMark.size(); + labelMark[lidx] = mark; +} + +void BinaryMplExport::OutputLocalTypeNameTab(const MIRTypeNameTable *typeNameTab) +{ + WriteNum(kBinTypenameStart); + WriteNum(static_cast(typeNameTab->Size())); + for (std::pair it : typeNameTab->GetGStrIdxToTyIdxMap()) { + OutputStr(it.first); + OutputType(it.second); + } +} + +void BinaryMplExport::OutputFormalsStIdx(MIRFunction *func) +{ + WriteNum(kBinFormalStart); + WriteNum(func->GetFormalDefVec().size()); + for (FormalDef formalDef : func->GetFormalDefVec()) { + OutputLocalSymbol(formalDef.formalSym); + } +} + +void BinaryMplExport::OutputAliasMap(MapleMap &aliasVarMap) +{ + WriteNum(kBinAliasMapStart); + WriteInt(static_cast(aliasVarMap.size())); + for (std::pair it : aliasVarMap) { + OutputStr(it.first); + OutputStr(it.second.mplStrIdx); + OutputType(it.second.tyIdx); + OutputStr(it.second.sigStrIdx); + } +} + +void BinaryMplExport::OutputFuncViaSym(PUIdx puIdx) +{ + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + OutputSymbol(funcSt); +} + +void BinaryMplExport::OutputExpression(BaseNode *e) +{ + OutputBaseNode(e); + switch (e->GetOpCode()) { + // leaf + case OP_constval: { + MIRConst *constVal = static_cast(e)->GetConstVal(); + OutputConst(constVal); + return; + } + case OP_conststr: { + UStrIdx strIdx = static_cast(e)->GetStrIdx(); + OutputUsrStr(strIdx); + return; + } + case OP_addroflabel: { + AddroflabelNode *lNode = static_cast(e); + OutputLabel(lNode->GetOffset()); + return; + } + case OP_addroffunc: { + AddroffuncNode *addrNode = static_cast(e); + OutputFuncViaSym(addrNode->GetPUIdx()); + return; + } + case OP_sizeoftype: { + SizeoftypeNode *sot = static_cast(e); + OutputType(sot->GetTyIdx()); + return; + } + case OP_addrof: + case OP_addrofoff: + case OP_dread: + case OP_dreadoff: { + StIdx stIdx; + if (e->GetOpCode() == OP_addrof || e->GetOpCode() == OP_dread) { + AddrofNode *drNode = static_cast(e); + WriteNum(drNode->GetFieldID()); + stIdx = drNode->GetStIdx(); + } else { + DreadoffNode *droff = static_cast(e); + WriteNum(droff->offset); + stIdx = droff->stIdx; + } + WriteNum(stIdx.Scope()); + if (stIdx.Islocal()) { + OutputLocalSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); + } else { + OutputSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); + } + return; + } + case OP_regread: { + RegreadNode *regreadNode = static_cast(e); + MIRPreg *preg = curFunc->GetPregTab()->PregFromPregIdx(regreadNode->GetRegIdx()); + OutputPreg(preg); + return; + } + case OP_gcmalloc: + case OP_gcpermalloc: + case OP_stackmalloc: { + GCMallocNode *gcNode = static_cast(e); + OutputType(gcNode->GetTyIdx()); + return; + } + // unary + case OP_ceil: + case OP_cvt: + case OP_floor: + case OP_trunc: { + TypeCvtNode *typecvtNode = static_cast(e); + Write(static_cast(typecvtNode->FromType())); + break; + } + case OP_retype: { + RetypeNode *retypeNode = static_cast(e); + OutputType(retypeNode->GetTyIdx()); + break; + } + case OP_iread: + case OP_iaddrof: { + IreadNode *irNode = static_cast(e); + OutputType(irNode->GetTyIdx()); + WriteNum(irNode->GetFieldID()); + break; + } + case OP_ireadoff: { + IreadoffNode *irNode = static_cast(e); + WriteNum(irNode->GetOffset()); + break; + } + case OP_ireadfpoff: { + IreadFPoffNode *irNode = static_cast(e); + WriteNum(irNode->GetOffset()); + break; + } + case OP_sext: + case OP_zext: + case OP_extractbits: { + ExtractbitsNode *extNode = static_cast(e); + Write(extNode->GetBitsOffset()); + Write(extNode->GetBitsSize()); + break; + } + case OP_depositbits: { + DepositbitsNode *dbNode = static_cast(e); + Write(dbNode->GetBitsOffset()); + Write(dbNode->GetBitsSize()); + break; + } + case OP_gcmallocjarray: + case OP_gcpermallocjarray: { + JarrayMallocNode *gcNode = static_cast(e); + OutputType(gcNode->GetTyIdx()); + break; + } + // binary + case OP_sub: + case OP_mul: + case OP_div: + case OP_rem: + case OP_ashr: + case OP_lshr: + case OP_shl: + case OP_max: + case OP_min: + case OP_band: + case OP_bior: + case OP_bxor: + case OP_cand: + case OP_cior: + case OP_land: + case OP_lior: + case OP_add: { + break; + } + case OP_eq: + case OP_ne: + case OP_lt: + case OP_gt: + case OP_le: + case OP_ge: + case OP_cmpg: + case OP_cmpl: + case OP_cmp: { + CompareNode *cmpNode = static_cast(e); + Write(static_cast(cmpNode->GetOpndType())); + break; + } + case OP_resolveinterfacefunc: + case OP_resolvevirtualfunc: { + ResolveFuncNode *rsNode = static_cast(e); + OutputFuncViaSym(rsNode->GetPuIdx()); + break; + } + // ternary + case OP_select: { + break; + } + // nary + case OP_array: { + ArrayNode *arrNode = static_cast(e); + OutputType(arrNode->GetTyIdx()); + Write(static_cast(arrNode->GetBoundsCheck())); + WriteNum(static_cast(arrNode->NumOpnds())); + break; + } + case OP_intrinsicop: { + IntrinsicopNode *intrnNode = static_cast(e); + WriteNum(intrnNode->GetIntrinsic()); + WriteNum(static_cast(intrnNode->NumOpnds())); + break; + } + case OP_intrinsicopwithtype: { + IntrinsicopNode *intrnNode = static_cast(e); + WriteNum(intrnNode->GetIntrinsic()); + OutputType(intrnNode->GetTyIdx()); + WriteNum(static_cast(intrnNode->NumOpnds())); + break; + } + default: + break; + } + for (uint32 i = 0; i < e->NumOpnds(); ++i) { + OutputExpression(e->Opnd(i)); + } +} + +static SrcPosition lastOutputSrcPosition; + +void BinaryMplExport::OutputSrcPos(const SrcPosition &pos) +{ + if (!mod.IsWithDbgInfo()) { + return; + } + if (pos.FileNum() == 0 || pos.LineNum() == 0) { // error case, so output 0 + WriteNum(lastOutputSrcPosition.RawData()); + WriteNum(lastOutputSrcPosition.LineNum()); + return; + } + WriteNum(pos.RawData()); + WriteNum(pos.LineNum()); + lastOutputSrcPosition = pos; +} + +void BinaryMplExport::OutputReturnValues(const CallReturnVector *retv) +{ + WriteNum(kBinReturnvals); + WriteNum(static_cast(retv->size())); + for (uint32 i = 0; i < retv->size(); i++) { + RegFieldPair rfp = (*retv)[i].second; + if (rfp.IsReg()) { + MIRPreg *preg = curFunc->GetPregTab()->PregFromPregIdx(rfp.GetPregIdx()); + OutputPreg(preg); + } else { + WriteNum(0); + WriteNum((rfp.GetFieldID())); + OutputLocalSymbol(curFunc->GetLocalOrGlobalSymbol((*retv)[i].first)); + } + } +} + +void BinaryMplExport::OutputBlockNode(BlockNode *block) +{ + WriteNum(kBinNodeBlock); + if (!block->GetStmtNodes().empty()) { + OutputSrcPos(block->GetSrcPos()); + } else { + OutputSrcPos(SrcPosition()); // output 0 + } + int32 num = 0; + uint64 idx = buf.size(); + ExpandFourBuffSize(); // place holder, Fixup later + for (StmtNode *s = block->GetFirst(); s; s = s->GetNext()) { + bool doneWithOpnds = false; + OutputSrcPos(s->GetSrcPos()); + WriteNum(s->GetOpCode()); + switch (s->GetOpCode()) { + case OP_dassign: + case OP_dassignoff: { + StIdx stIdx; + if (s->GetOpCode() == OP_dassign) { + DassignNode *dass = static_cast(s); + WriteNum(dass->GetFieldID()); + stIdx = dass->GetStIdx(); + } else { + DassignoffNode *dassoff = static_cast(s); + WriteNum(dassoff->GetPrimType()); + WriteNum(dassoff->offset); + stIdx = dassoff->stIdx; + } + WriteNum(stIdx.Scope()); + if (stIdx.Islocal()) { + OutputLocalSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); + } else { + OutputSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); + } + break; + } + case OP_regassign: { + RegassignNode *rass = static_cast(s); + Write(static_cast(rass->GetPrimType())); + MIRPreg *preg = curFunc->GetPregTab()->PregFromPregIdx(rass->GetRegIdx()); + OutputPreg(preg); + break; + } + case OP_iassign: { + IassignNode *iass = static_cast(s); + OutputType(iass->GetTyIdx()); + WriteNum(iass->GetFieldID()); + break; + } + case OP_iassignoff: { + IassignoffNode *iassoff = static_cast(s); + Write(static_cast(iassoff->GetPrimType())); + WriteNum(iassoff->GetOffset()); + break; + } + case OP_iassignspoff: + case OP_iassignfpoff: { + IassignFPoffNode *iassfpoff = static_cast(s); + Write(static_cast(iassfpoff->GetPrimType())); + WriteNum(iassfpoff->GetOffset()); + break; + } + case OP_blkassignoff: { + BlkassignoffNode *bass = static_cast(s); + int32 offsetAlign = (bass->offset << 4) | bass->alignLog2; + WriteNum(offsetAlign); + WriteNum(bass->blockSize); + break; + } + case OP_call: + case OP_virtualcall: + case OP_virtualicall: + case OP_superclasscall: + case OP_interfacecall: + case OP_interfaceicall: + case OP_customcall: + case OP_polymorphiccall: { + CallNode *callnode = static_cast(s); + OutputFuncViaSym(callnode->GetPUIdx()); + if (s->GetOpCode() == OP_polymorphiccall) { + OutputType(static_cast(callnode)->GetTyIdx()); + } + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_interfaceicallassigned: + case OP_customcallassigned: { + CallNode *callnode = static_cast(s); + OutputFuncViaSym(callnode->GetPUIdx()); + OutputReturnValues(&callnode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_polymorphiccallassigned: { + CallNode *callnode = static_cast(s); + OutputFuncViaSym(callnode->GetPUIdx()); + OutputType(callnode->GetTyIdx()); + OutputReturnValues(&callnode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_icallproto: + case OP_icall: { + IcallNode *icallnode = static_cast(s); + OutputType(icallnode->GetRetTyIdx()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_icallprotoassigned: + case OP_icallassigned: { + IcallNode *icallnode = static_cast(s); + OutputType(icallnode->GetRetTyIdx()); + OutputReturnValues(&icallnode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_intrinsiccall: + case OP_xintrinsiccall: { + IntrinsiccallNode *intrnNode = static_cast(s); + WriteNum(intrnNode->GetIntrinsic()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: { + IntrinsiccallNode *intrnNode = static_cast(s); + WriteNum(intrnNode->GetIntrinsic()); + OutputReturnValues(&intrnNode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_intrinsiccallwithtype: { + IntrinsiccallNode *intrnNode = static_cast(s); + WriteNum(intrnNode->GetIntrinsic()); + OutputType(intrnNode->GetTyIdx()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_intrinsiccallwithtypeassigned: { + IntrinsiccallNode *intrnNode = static_cast(s); + WriteNum(intrnNode->GetIntrinsic()); + OutputType(intrnNode->GetTyIdx()); + OutputReturnValues(&intrnNode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_syncenter: + case OP_syncexit: + case OP_return: { + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_jscatch: + case OP_cppcatch: + case OP_finally: + case OP_endtry: + case OP_cleanuptry: + case OP_retsub: + case OP_membaracquire: + case OP_membarrelease: + case OP_membarstorestore: + case OP_membarstoreload: { + break; + } + case OP_eval: + case OP_throw: + case OP_free: + case OP_decref: + case OP_incref: + case OP_decrefreset: + CASE_OP_ASSERT_NONNULL + case OP_igoto: { + break; + } + case OP_label: { + LabelNode *lNode = static_cast(s); + OutputLabel(lNode->GetLabelIdx()); + break; + } + case OP_goto: + case OP_gosub: { + GotoNode *gtoNode = static_cast(s); + OutputLabel(gtoNode->GetOffset()); + break; + } + case OP_brfalse: + case OP_brtrue: { + CondGotoNode *cgotoNode = static_cast(s); + OutputLabel(cgotoNode->GetOffset()); + break; + } + case OP_switch: { + SwitchNode *swNode = static_cast(s); + OutputLabel(swNode->GetDefaultLabel()); + WriteNum(static_cast(swNode->GetSwitchTable().size())); + for (CasePair cpair : swNode->GetSwitchTable()) { + WriteNum(cpair.first); + OutputLabel(cpair.second); + } + break; + } + case OP_rangegoto: { + RangeGotoNode *rgoto = static_cast(s); + WriteNum(rgoto->GetTagOffset()); + WriteNum(static_cast(rgoto->GetRangeGotoTable().size())); + for (SmallCasePair cpair : rgoto->GetRangeGotoTable()) { + WriteNum(cpair.first); + OutputLabel(cpair.second); + } + break; + } + case OP_jstry: { + JsTryNode *tryNode = static_cast(s); + OutputLabel(tryNode->GetCatchOffset()); + OutputLabel(tryNode->GetFinallyOffset()); + break; + } + case OP_cpptry: + case OP_try: { + TryNode *tryNode = static_cast(s); + WriteNum(static_cast(tryNode->GetOffsetsCount())); + for (LabelIdx lidx : tryNode->GetOffsets()) { + OutputLabel(lidx); + } + break; + } + case OP_catch: { + CatchNode *catchNode = static_cast(s); + WriteNum(static_cast(catchNode->GetExceptionTyIdxVec().size())); + for (TyIdx tidx : catchNode->GetExceptionTyIdxVec()) { + OutputType(tidx); + } + break; + } + case OP_comment: { + string str(static_cast(s)->GetComment().c_str()); + WriteAsciiStr(str); + break; + } + case OP_dowhile: + case OP_while: { + WhileStmtNode *whileNode = static_cast(s); + OutputBlockNode(whileNode->GetBody()); + OutputExpression(whileNode->Opnd()); + doneWithOpnds = true; + break; + } + case OP_if: { + IfStmtNode *ifNode = static_cast(s); + bool hasElsePart = ifNode->GetElsePart() != nullptr; + WriteNum(static_cast(hasElsePart)); + OutputBlockNode(ifNode->GetThenPart()); + if (hasElsePart) { + OutputBlockNode(ifNode->GetElsePart()); + } + OutputExpression(ifNode->Opnd()); + doneWithOpnds = true; + break; + } + case OP_block: { + BlockNode *blockNode = static_cast(s); + OutputBlockNode(blockNode); + doneWithOpnds = true; + break; + } + case OP_asm: { + AsmNode *asmNode = static_cast(s); + WriteNum(asmNode->qualifiers); + string str(asmNode->asmString.c_str()); + WriteAsciiStr(str); + // the outputs + size_t count = asmNode->asmOutputs.size(); + WriteNum(static_cast(count)); + for (size_t i = 0; i < count; ++i) { + OutputUsrStr(asmNode->outputConstraints[i]); + } + OutputReturnValues(&asmNode->asmOutputs); + // the clobber list + count = asmNode->clobberList.size(); + WriteNum(static_cast(count)); + for (size_t i = 0; i < count; ++i) { + OutputUsrStr(asmNode->clobberList[i]); + } + // the labels + count = asmNode->gotoLabels.size(); + WriteNum(static_cast(count)); + for (size_t i = 0; i < count; ++i) { + OutputLabel(asmNode->gotoLabels[i]); + } + // the inputs + WriteNum(asmNode->NumOpnds()); + for (uint8 i = 0; i < asmNode->numOpnds; ++i) { + OutputUsrStr(asmNode->inputConstraints[i]); + } + break; + } + default: + CHECK_FATAL(false, "Unhandled opcode %d", s->GetOpCode()); + break; + } + num++; + if (!doneWithOpnds) { + for (uint32 i = 0; i < s->NumOpnds(); ++i) { + OutputExpression(s->Opnd(i)); + } + } + } + Fixup(idx, num); +} + +void BinaryMplExport::WriteFunctionBodyField(uint64 contentIdx, std::unordered_set *dumpFuncSet) +{ + Fixup(contentIdx, static_cast(buf.size())); + WriteNum(kBinFunctionBodyStart); + uint64 totalSizeIdx = buf.size(); + ExpandFourBuffSize(); /// total size of this field to ~BIN_FUNCTIONBODY_START + uint64 outFunctionBodySizeIdx = buf.size(); + ExpandFourBuffSize(); /// size of outFunctionBody + int32 size = 0; + + if (not2mplt) { + for (MIRFunction *func : GetMIRModule().GetFunctionList()) { + curFunc = func; + if (func->GetAttr(FUNCATTR_optimized)) { + continue; + } + if (func->GetCodeMemPool() == nullptr || func->GetBody() == nullptr) { + continue; + } + if (dumpFuncSet != nullptr && !dumpFuncSet->empty()) { + // output only if this func matches any name in *dumpFuncSet + const std::string &name = func->GetName(); + bool matched = false; + for (std::string elem : *dumpFuncSet) { + if (name.find(elem.c_str()) != string::npos) { + matched = true; + break; + } + } + if (!matched) { + continue; + } + } + localSymMark.clear(); + localSymMark[nullptr] = 0; + localPregMark.clear(); + localPregMark[nullptr] = 0; + labelMark.clear(); + labelMark[0] = 0; + OutputFunction(func->GetPuidx()); + CHECK_FATAL(func->GetBody() != nullptr, "WriteFunctionBodyField: no function body"); + OutputFuncIdInfo(func); + OutputLocalTypeNameTab(func->GetTypeNameTab()); + OutputFormalsStIdx(func); + if (mod.GetFlavor() < kMmpl) { + OutputAliasMap(func->GetAliasVarMap()); + } + lastOutputSrcPosition = SrcPosition(); + OutputBlockNode(func->GetBody()); + size++; + } + } + + Fixup(totalSizeIdx, static_cast(buf.size() - totalSizeIdx)); + Fixup(outFunctionBodySizeIdx, size); + return; +} +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/bin_func_import.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/bin_func_import.cpp new file mode 100644 index 0000000000000000000000000000000000000000..89e89eec95e0ed829e4b6eb1593305003425d022 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/bin_func_import.cpp @@ -0,0 +1,954 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "bin_mpl_export.h" +#include "bin_mpl_import.h" +#include "mir_function.h" +#include "opcode_info.h" +#include "mir_pragma.h" +#include "mir_builder.h" +using namespace std; + +namespace maple { +constexpr uint32 kOffset4bit = 4; +void BinaryMplImport::ImportInfoVector(MIRInfoVector &infoVector, MapleVector &infoVectorIsString) +{ + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + GStrIdx gStrIdx = ImportStr(); + bool isstring = (ReadNum() != 0); + infoVectorIsString.push_back(isstring); + if (isstring) { + GStrIdx fieldval = ImportStr(); + infoVector.emplace_back(MIRInfoPair(gStrIdx, fieldval.GetIdx())); + } else { + auto fieldval = static_cast(ReadNum()); + infoVector.emplace_back(MIRInfoPair(gStrIdx, fieldval)); + } + } +} + +void BinaryMplImport::ImportFuncIdInfo(MIRFunction *func) +{ + int64 tag = ReadNum(); + CHECK_FATAL(tag == kBinFuncIdInfoStart, "kBinFuncIdInfoStart expected"); + func->SetPuidxOrigin(static_cast(ReadNum())); + ImportInfoVector(func->GetInfoVector(), func->InfoIsString()); + if (mod.GetFlavor() == kFlavorLmbc) { + func->SetFrameSize(static_cast(ReadNum())); + } +} + +void BinaryMplImport::ImportBaseNode(Opcode &o, PrimType &typ) +{ + o = static_cast(Read()); + typ = static_cast(Read()); +} + +MIRSymbol *BinaryMplImport::ImportLocalSymbol(MIRFunction *func) +{ + int64 tag = ReadNum(); + if (tag == 0) { + return nullptr; + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < localSymTab.size(), "index out of bounds"); + return localSymTab.at(static_cast(-tag)); + } + CHECK_FATAL(tag == kBinSymbol, "expecting kBinSymbol in ImportLocalSymbol()"); + MIRSymbol *sym = func->GetSymTab()->CreateSymbol(kScopeLocal); + localSymTab.push_back(sym); + sym->SetNameStrIdx(ImportStr()); + (void)func->GetSymTab()->AddToStringSymbolMap(*sym); + sym->SetSKind(static_cast(ReadNum())); + sym->SetStorageClass(static_cast(ReadNum())); + sym->SetAttrs(ImportTypeAttrs()); + sym->SetIsTmp(ReadNum() != 0); + if (sym->GetSKind() == kStVar || sym->GetSKind() == kStFunc) { + ImportSrcPos(sym->GetSrcPosition()); + } + sym->SetTyIdx(ImportType()); + if (sym->GetSKind() == kStPreg) { + PregIdx pregidx = ImportPreg(func); + MIRPreg *preg = func->GetPregTab()->PregFromPregIdx(pregidx); + sym->SetPreg(preg); + } else if (sym->GetSKind() == kStConst || sym->GetSKind() == kStVar) { + sym->SetKonst(ImportConst(func)); + } else if (sym->GetSKind() == kStFunc) { + PUIdx puIdx = ImportFuncViaSym(func); + sym->SetFunction(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx)); + } + return sym; +} + +PregIdx BinaryMplImport::ImportPreg(MIRFunction *func) +{ + int64 tag = ReadNum(); + if (tag == 0) { + return 0; + } + if (tag == kBinSpecialReg) { + return -Read(); + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < localPregTab.size(), "index out of bounds"); + return localPregTab.at(static_cast(-tag)); + } + CHECK_FATAL(tag == kBinPreg, "expecting kBinPreg in ImportPreg()"); + + PrimType primType = static_cast(Read()); + PregIdx pidx = func->GetPregTab()->CreatePreg(primType); + localPregTab.push_back(pidx); + return pidx; +} + +LabelIdx BinaryMplImport::ImportLabel(MIRFunction *func) +{ + int64 tag = ReadNum(); + if (tag == 0) { + return 0; + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < localLabelTab.size(), "index out of bounds"); + return localLabelTab.at(static_cast(-tag)); + } + CHECK_FATAL(tag == kBinLabel, "kBinLabel expected in ImportLabel()"); + + LabelIdx lidx = func->GetLabelTab()->CreateLabel(); + localLabelTab.push_back(lidx); + return lidx; +} + +void BinaryMplImport::ImportLocalTypeNameTable(MIRTypeNameTable *typeNameTab) +{ + int64 tag = ReadNum(); + CHECK_FATAL(tag == kBinTypenameStart, "kBinTypenameStart expected in ImportLocalTypeNameTable()"); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + GStrIdx strIdx = ImportStr(); + TyIdx tyIdx = ImportType(); + typeNameTab->SetGStrIdxToTyIdx(strIdx, tyIdx); + } +} + +void BinaryMplImport::ImportFormalsStIdx(MIRFunction *func) +{ + auto tag = ReadNum(); + CHECK_FATAL(tag == kBinFormalStart, "kBinFormalStart expected in ImportFormalsStIdx()"); + auto size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + func->GetFormalDefVec()[static_cast(i)].formalSym = ImportLocalSymbol(func); + } +} + +void BinaryMplImport::ImportAliasMap(MIRFunction *func) +{ + int64 tag = ReadNum(); + CHECK_FATAL(tag == kBinAliasMapStart, "kBinAliasMapStart expected in ImportAliasMap()"); + int32 size = ReadInt(); + for (int32 i = 0; i < size; ++i) { + MIRAliasVars aliasvars; + GStrIdx strIdx = ImportStr(); + aliasvars.mplStrIdx = ImportStr(); + aliasvars.tyIdx = ImportType(); + (void)ImportStr(); // not assigning to mimic parser + func->GetAliasVarMap()[strIdx] = aliasvars; + } +} + +PUIdx BinaryMplImport::ImportFuncViaSym(MIRFunction *func) +{ + MIRSymbol *sym = InSymbol(func); + MIRFunction *f = sym->GetFunction(); + return f->GetPuidx(); +} + +BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) +{ + Opcode op; + PrimType typ; + ImportBaseNode(op, typ); + switch (op) { + // leaf + case OP_constval: { + MIRConst *constv = ImportConst(func); + ConstvalNode *constNode = mod.CurFuncCodeMemPool()->New(constv); + constNode->SetPrimType(typ); + return constNode; + } + case OP_conststr: { + UStrIdx strIdx = ImportUsrStr(); + ConststrNode *constNode = mod.CurFuncCodeMemPool()->New(typ, strIdx); + constNode->SetPrimType(typ); + return constNode; + } + case OP_addroflabel: { + AddroflabelNode *alabNode = mod.CurFuncCodeMemPool()->New(); + alabNode->SetOffset(ImportLabel(func)); + alabNode->SetPrimType(typ); + (void)func->GetLabelTab()->addrTakenLabels.insert(alabNode->GetOffset()); + return alabNode; + } + case OP_addroffunc: { + PUIdx puIdx = ImportFuncViaSym(func); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFuncTable()[puIdx]; + f->GetFuncSymbol()->SetAppearsInCode(true); + AddroffuncNode *addrNode = mod.CurFuncCodeMemPool()->New(typ, puIdx); + return addrNode; + } + case OP_sizeoftype: { + TyIdx tidx = ImportType(); + SizeoftypeNode *sot = mod.CurFuncCodeMemPool()->New(tidx); + return sot; + } + case OP_addrof: + case OP_addrofoff: + case OP_dread: + case OP_dreadoff: { + int32 num = static_cast(ReadNum()); + StIdx stIdx; + stIdx.SetScope(static_cast(ReadNum())); + MIRSymbol *sym = nullptr; + if (stIdx.Islocal()) { + sym = ImportLocalSymbol(func); + CHECK_FATAL(sym != nullptr, "null ptr check"); + } else { + sym = InSymbol(nullptr); + CHECK_FATAL(sym != nullptr, "null ptr check"); + if (op == OP_addrof) { + sym->SetHasPotentialAssignment(); + } + } + stIdx.SetIdx(sym->GetStIdx().Idx()); + if (op == OP_addrof || op == OP_dread) { + AddrofNode *drNode = mod.CurFuncCodeMemPool()->New(op); + drNode->SetPrimType(typ); + drNode->SetStIdx(stIdx); + drNode->SetFieldID(num); + return drNode; + } else { + DreadoffNode *dreadoff = mod.CurFuncCodeMemPool()->New(op); + dreadoff->SetPrimType(typ); + dreadoff->stIdx = stIdx; + dreadoff->offset = num; + return dreadoff; + } + } + case OP_regread: { + RegreadNode *regreadNode = mod.CurFuncCodeMemPool()->New(); + regreadNode->SetRegIdx(ImportPreg(func)); + regreadNode->SetPrimType(typ); + return regreadNode; + } + case OP_gcmalloc: + case OP_gcpermalloc: + case OP_stackmalloc: { + TyIdx tyIdx = ImportType(); + GCMallocNode *gcNode = mod.CurFuncCodeMemPool()->New(op, typ, tyIdx); + return gcNode; + } + // unary + case OP_abs: + case OP_bnot: + case OP_lnot: + case OP_neg: + case OP_recip: + case OP_sqrt: + case OP_alloca: + case OP_malloc: { + UnaryNode *unNode = mod.CurFuncCodeMemPool()->New(op, typ); + unNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return unNode; + } + case OP_ceil: + case OP_cvt: + case OP_floor: + case OP_trunc: { + TypeCvtNode *typecvtNode = mod.CurFuncCodeMemPool()->New(op, typ); + typecvtNode->SetFromType(static_cast(Read())); + typecvtNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return typecvtNode; + } + case OP_retype: { + RetypeNode *retypeNode = mod.CurFuncCodeMemPool()->New(typ); + retypeNode->SetTyIdx(ImportType()); + retypeNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return retypeNode; + } + case OP_iread: + case OP_iaddrof: { + IreadNode *irNode = mod.CurFuncCodeMemPool()->New(op, typ); + irNode->SetTyIdx(ImportType()); + irNode->SetFieldID(static_cast(ReadNum())); + irNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return irNode; + } + case OP_ireadoff: { + int32 ofst = static_cast(ReadNum()); + IreadoffNode *irNode = mod.CurFuncCodeMemPool()->New(typ, ofst); + irNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return irNode; + } + case OP_ireadfpoff: { + int32 ofst = static_cast(ReadNum()); + IreadFPoffNode *irNode = mod.CurFuncCodeMemPool()->New(typ, ofst); + return irNode; + } + case OP_sext: + case OP_zext: + case OP_extractbits: { + ExtractbitsNode *extNode = mod.CurFuncCodeMemPool()->New(op, typ); + extNode->SetBitsOffset(Read()); + extNode->SetBitsSize(Read()); + extNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return extNode; + } + case OP_depositbits: { + DepositbitsNode *dbNode = mod.CurFuncCodeMemPool()->New(op, typ); + dbNode->SetBitsOffset(static_cast(ReadNum())); + dbNode->SetBitsSize(static_cast(ReadNum())); + dbNode->SetOpnd(ImportExpression(func), kFirstOpnd); + dbNode->SetOpnd(ImportExpression(func), kSecondOpnd); + return dbNode; + } + case OP_gcmallocjarray: + case OP_gcpermallocjarray: { + JarrayMallocNode *gcNode = mod.CurFuncCodeMemPool()->New(op, typ); + gcNode->SetTyIdx(ImportType()); + gcNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return gcNode; + } + // binary + case OP_sub: + case OP_mul: + case OP_div: + case OP_rem: + case OP_ashr: + case OP_lshr: + case OP_shl: + case OP_max: + case OP_min: + case OP_band: + case OP_bior: + case OP_bxor: + case OP_cand: + case OP_cior: + case OP_land: + case OP_lior: + case OP_add: { + BinaryNode *binNode = mod.CurFuncCodeMemPool()->New(op, typ); + binNode->SetOpnd(ImportExpression(func), kFirstOpnd); + binNode->SetOpnd(ImportExpression(func), kSecondOpnd); + return binNode; + } + case OP_eq: + case OP_ne: + case OP_lt: + case OP_gt: + case OP_le: + case OP_ge: + case OP_cmpg: + case OP_cmpl: + case OP_cmp: { + CompareNode *cmpNode = mod.CurFuncCodeMemPool()->New(op, typ); + cmpNode->SetOpndType(static_cast(Read())); + cmpNode->SetOpnd(ImportExpression(func), kFirstOpnd); + cmpNode->SetOpnd(ImportExpression(func), kSecondOpnd); + return cmpNode; + } + case OP_resolveinterfacefunc: + case OP_resolvevirtualfunc: { + ResolveFuncNode *rsNode = mod.CurFuncCodeMemPool()->New(op, typ); + rsNode->SetPUIdx(ImportFuncViaSym(func)); + rsNode->SetOpnd(ImportExpression(func), kFirstOpnd); + rsNode->SetOpnd(ImportExpression(func), kSecondOpnd); + return rsNode; + } + // ternary + case OP_select: { + TernaryNode *tNode = mod.CurFuncCodeMemPool()->New(op, typ); + tNode->SetOpnd(ImportExpression(func), kFirstOpnd); + tNode->SetOpnd(ImportExpression(func), kSecondOpnd); + tNode->SetOpnd(ImportExpression(func), kThirdOpnd); + return tNode; + } + // nary + case OP_array: { + TyIdx tidx = ImportType(); + bool boundsCheck = static_cast(Read()); + ArrayNode *arrNode = + mod.CurFuncCodeMemPool()->New(func->GetCodeMPAllocator(), typ, tidx, boundsCheck); + auto n = static_cast(ReadNum()); + for (uint32 i = 0; i < n; ++i) { + arrNode->GetNopnd().push_back(ImportExpression(func)); + } + arrNode->SetNumOpnds(static_cast(arrNode->GetNopnd().size())); + return arrNode; + } + case OP_intrinsicop: { + IntrinsicopNode *intrnNode = + mod.CurFuncCodeMemPool()->New(func->GetCodeMPAllocator(), op, typ); + intrnNode->SetIntrinsic(static_cast(ReadNum())); + auto n = static_cast(ReadNum()); + for (uint32 i = 0; i < n; ++i) { + intrnNode->GetNopnd().push_back(ImportExpression(func)); + } + intrnNode->SetNumOpnds(static_cast(intrnNode->GetNopnd().size())); + return intrnNode; + } + case OP_intrinsicopwithtype: { + IntrinsicopNode *intrnNode = + mod.CurFuncCodeMemPool()->New(func->GetCodeMPAllocator(), OP_intrinsicopwithtype, typ); + intrnNode->SetIntrinsic((MIRIntrinsicID)ReadNum()); + intrnNode->SetTyIdx(ImportType()); + auto n = static_cast(ReadNum()); + for (uint32 i = 0; i < n; ++i) { + intrnNode->GetNopnd().push_back(ImportExpression(func)); + } + intrnNode->SetNumOpnds(static_cast(intrnNode->GetNopnd().size())); + return intrnNode; + } + default: + CHECK_FATAL(false, "Unhandled op %d", op); + break; + } +} + +void BinaryMplImport::ImportSrcPos(SrcPosition &pos) +{ + if (!mod.IsWithDbgInfo()) { + return; + } + pos.SetRawData(static_cast(ReadNum())); + pos.SetLineNum(static_cast(ReadNum())); +} + +void BinaryMplImport::ImportReturnValues(MIRFunction *func, CallReturnVector *retv) +{ + int64 tag = ReadNum(); + CHECK_FATAL(tag == kBinReturnvals, "expecting return values"); + auto size = static_cast(ReadNum()); + for (uint32 i = 0; i < size; ++i) { + RegFieldPair rfp; + rfp.SetPregIdx(ImportPreg(func)); + if (rfp.IsReg()) { + retv->push_back(std::make_pair(StIdx(), rfp)); + continue; + } + rfp.SetFieldID(static_cast(ReadNum())); + MIRSymbol *lsym = ImportLocalSymbol(func); + CHECK_FATAL(lsym != nullptr, "null ptr check"); + retv->push_back(std::make_pair(lsym->GetStIdx(), rfp)); + if (lsym->GetName().find("L_STR") == 0) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lsym->GetTyIdx()); + CHECK_FATAL(ty->GetKind() == kTypePointer, "Pointer type expected for L_STR prefix"); + MIRPtrType tempType(static_cast(ty)->GetPointedTyIdx(), PTY_ptr); + TyIdx newTyidx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&tempType); + lsym->SetTyIdx(newTyidx); + } + } +} + +BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) +{ + int64 tag = ReadNum(); + DEBUG_ASSERT(tag == kBinNodeBlock, "expecting a BlockNode"); + + BlockNode *block = func->GetCodeMemPool()->New(); + Opcode op; + uint8 numOpr; + ImportSrcPos(block->GetSrcPos()); + int32 size = ReadInt(); + for (int32 k = 0; k < size; ++k) { + SrcPosition thesrcPosition; + ImportSrcPos(thesrcPosition); + op = static_cast(ReadNum()); + StmtNode *stmt = nullptr; + switch (op) { + case OP_dassign: + case OP_dassignoff: { + PrimType primType = PTY_void; + if (op == OP_dassignoff) { + primType = static_cast(ReadNum()); + } + int32 num = static_cast(ReadNum()); + StIdx stIdx; + stIdx.SetScope(static_cast(ReadNum())); + MIRSymbol *sym = nullptr; + if (stIdx.Islocal()) { + sym = ImportLocalSymbol(func); + CHECK_FATAL(sym != nullptr, "null ptr check"); + } else { + sym = InSymbol(nullptr); + CHECK_FATAL(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + stIdx.SetIdx(sym->GetStIdx().Idx()); + if (op == OP_dassign) { + DassignNode *s = func->GetCodeMemPool()->New(); + s->SetStIdx(stIdx); + s->SetFieldID(num); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + } else { + DassignoffNode *s = func->GetCodeMemPool()->New(); + s->SetPrimType(primType); + s->stIdx = stIdx; + s->offset = num; + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + } + break; + } + case OP_regassign: { + RegassignNode *s = func->GetCodeMemPool()->New(); + s->SetPrimType(static_cast(Read())); + s->SetRegIdx(ImportPreg(func)); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_iassign: { + IassignNode *s = func->GetCodeMemPool()->New(); + s->SetTyIdx(ImportType()); + s->SetFieldID(static_cast(ReadNum())); + s->SetAddrExpr(ImportExpression(func)); + s->SetRHS(ImportExpression(func)); + stmt = s; + break; + } + case OP_iassignoff: { + IassignoffNode *s = func->GetCodeMemPool()->New(); + s->SetPrimType((PrimType)Read()); + s->SetOffset(static_cast(ReadNum())); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + s->SetOpnd(ImportExpression(func), kSecondOpnd); + stmt = s; + break; + } + case OP_iassignspoff: + case OP_iassignfpoff: { + IassignFPoffNode *s = func->GetCodeMemPool()->New(op); + s->SetPrimType(static_cast(Read())); + s->SetOffset(static_cast(ReadNum())); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_blkassignoff: { + BlkassignoffNode *s = func->GetCodeMemPool()->New(); + int32 offsetAlign = static_cast(ReadNum()); + s->offset = offsetAlign >> kOffset4bit; + s->alignLog2 = offsetAlign & 0xf; + s->blockSize = static_cast(ReadNum()); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + s->SetOpnd(ImportExpression(func), kSecondOpnd); + stmt = s; + break; + } + case OP_call: + case OP_virtualcall: + case OP_virtualicall: + case OP_superclasscall: + case OP_interfacecall: + case OP_interfaceicall: + case OP_customcall: { + CallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + f->GetFuncSymbol()->SetAppearsInCode(true); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_interfaceicallassigned: + case OP_customcallassigned: { + CallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + f->GetFuncSymbol()->SetAppearsInCode(true); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + const auto &calleeName = + GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx())->GetName(); + if (calleeName == "setjmp") { + func->SetHasSetjmp(); + } + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_polymorphiccall: { + CallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + f->GetFuncSymbol()->SetAppearsInCode(true); + s->SetTyIdx(ImportType()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_polymorphiccallassigned: { + CallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + f->GetFuncSymbol()->SetAppearsInCode(true); + s->SetTyIdx(ImportType()); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_icallproto: + case OP_icall: { + IcallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetRetTyIdx(ImportType()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_icallprotoassigned: + case OP_icallassigned: { + IcallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetRetTyIdx(ImportType()); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_intrinsiccall: + case OP_xintrinsiccall: { + IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetIntrinsic(static_cast(ReadNum())); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: { + IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetIntrinsic((MIRIntrinsicID)ReadNum()); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + if (s->GetReturnVec().size() == 1 && s->GetReturnVec()[0].first.Idx() != 0) { + MIRSymbol *retsymbol = func->GetSymTab()->GetSymbolFromStIdx(s->GetReturnVec()[0].first.Idx()); + MIRType *rettype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retsymbol->GetTyIdx()); + CHECK_FATAL(rettype != nullptr, "rettype is null in MIRParser::ParseStmtIntrinsiccallAssigned"); + s->SetPrimType(rettype->GetPrimType()); + } + stmt = s; + break; + } + case OP_intrinsiccallwithtype: { + IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetIntrinsic((MIRIntrinsicID)ReadNum()); + s->SetTyIdx(ImportType()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_intrinsiccallwithtypeassigned: { + IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetIntrinsic((MIRIntrinsicID)ReadNum()); + s->SetTyIdx(ImportType()); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + if (s->GetReturnVec().size() == 1 && s->GetReturnVec()[0].first.Idx() != 0) { + MIRSymbol *retsymbol = func->GetSymTab()->GetSymbolFromStIdx(s->GetReturnVec()[0].first.Idx()); + MIRType *rettype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retsymbol->GetTyIdx()); + CHECK_FATAL(rettype != nullptr, "rettype is null in MIRParser::ParseStmtIntrinsiccallAssigned"); + s->SetPrimType(rettype->GetPrimType()); + } + stmt = s; + break; + } + case OP_syncenter: + case OP_syncexit: + case OP_return: { + NaryStmtNode *s = func->GetCodeMemPool()->New(mod, op); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_jscatch: + case OP_cppcatch: + case OP_finally: + case OP_endtry: + case OP_cleanuptry: + case OP_retsub: + case OP_membaracquire: + case OP_membarrelease: + case OP_membarstorestore: + case OP_membarstoreload: { + stmt = mod.CurFuncCodeMemPool()->New(op); + break; + } + case OP_eval: + case OP_throw: + case OP_free: + case OP_decref: + case OP_incref: + case OP_decrefreset: + CASE_OP_ASSERT_NONNULL + case OP_igoto: { + UnaryStmtNode *s = mod.CurFuncCodeMemPool()->New(op); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_label: { + LabelNode *s = mod.CurFuncCodeMemPool()->New(); + s->SetLabelIdx(ImportLabel(func)); + stmt = s; + break; + } + case OP_goto: + case OP_gosub: { + GotoNode *s = mod.CurFuncCodeMemPool()->New(op); + s->SetOffset(ImportLabel(func)); + stmt = s; + break; + } + case OP_brfalse: + case OP_brtrue: { + CondGotoNode *s = mod.CurFuncCodeMemPool()->New(op); + s->SetOffset(ImportLabel(func)); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_switch: { + SwitchNode *s = mod.CurFuncCodeMemPool()->New(mod); + s->SetDefaultLabel(ImportLabel(func)); + auto tagSize = static_cast(ReadNum()); + for (uint32 i = 0; i < tagSize; ++i) { + int64 casetag = ReadNum(); + LabelIdx lidx = ImportLabel(func); + CasePair cpair = std::make_pair(casetag, lidx); + s->GetSwitchTable().push_back(cpair); + } + s->SetSwitchOpnd(ImportExpression(func)); + stmt = s; + break; + } + case OP_rangegoto: { + RangeGotoNode *s = mod.CurFuncCodeMemPool()->New(mod); + s->SetTagOffset(static_cast(ReadNum())); + uint32 tagSize = static_cast(ReadNum()); + for (uint32 i = 0; i < tagSize; ++i) { + uint16 casetag = static_cast(ReadNum()); + LabelIdx lidx = ImportLabel(func); + s->AddRangeGoto(casetag, lidx); + } + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_jstry: { + JsTryNode *s = mod.CurFuncCodeMemPool()->New(); + s->SetCatchOffset(ImportLabel(func)); + s->SetFinallyOffset(ImportLabel(func)); + stmt = s; + break; + } + case OP_cpptry: + case OP_try: { + TryNode *s = mod.CurFuncCodeMemPool()->New(mod); + auto numLabels = static_cast(ReadNum()); + for (uint32 i = 0; i < numLabels; ++i) { + s->GetOffsets().push_back(ImportLabel(func)); + } + stmt = s; + break; + } + case OP_catch: { + CatchNode *s = mod.CurFuncCodeMemPool()->New(mod); + auto numTys = static_cast(ReadNum()); + for (uint32 i = 0; i < numTys; ++i) { + s->PushBack(ImportType()); + } + stmt = s; + break; + } + case OP_comment: { + CommentNode *s = mod.CurFuncCodeMemPool()->New(mod); + string str; + ReadAsciiStr(str); + s->SetComment(str); + stmt = s; + break; + } + case OP_dowhile: + case OP_while: { + WhileStmtNode *s = mod.CurFuncCodeMemPool()->New(op); + s->SetBody(ImportBlockNode(func)); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_if: { + IfStmtNode *s = mod.CurFuncCodeMemPool()->New(); + bool hasElsePart = (static_cast(ReadNum()) != kFirstOpnd); + s->SetThenPart(ImportBlockNode(func)); + if (hasElsePart) { + s->SetElsePart(ImportBlockNode(func)); + s->SetNumOpnds(kOperandNumTernary); + } + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_block: { + stmt = ImportBlockNode(func); + break; + } + case OP_asm: { + AsmNode *s = mod.CurFuncCodeMemPool()->New(&mod.GetCurFuncCodeMPAllocator()); + mod.CurFunction()->SetHasAsm(); + s->qualifiers = static_cast(ReadNum()); + string str; + ReadAsciiStr(str); + s->asmString = str; + // the outputs + auto count = static_cast(ReadNum()); + UStrIdx strIdx; + for (size_t i = 0; i < count; ++i) { + strIdx = ImportUsrStr(); + s->outputConstraints.push_back(strIdx); + } + ImportReturnValues(func, &s->asmOutputs); + // the clobber list + count = static_cast(ReadNum()); + for (size_t i = 0; i < count; ++i) { + strIdx = ImportUsrStr(); + s->clobberList.push_back(strIdx); + } + // the labels + count = static_cast(ReadNum()); + for (size_t i = 0; i < count; ++i) { + LabelIdx lidx = ImportLabel(func); + s->gotoLabels.push_back(lidx); + } + // the inputs + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + strIdx = ImportUsrStr(); + s->inputConstraints.push_back(strIdx); + const std::string &inStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(strIdx); + if (inStr[0] == '+') { + s->SetHasWriteInputs(); + } + } + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + default: + CHECK_FATAL(false, "Unhandled opcode tag %d", tag); + break; + } + stmt->SetSrcPos(thesrcPosition); + block->AddStatement(stmt); + } + if (func != nullptr) { + func->SetBody(block); + } + return block; +} + +void BinaryMplImport::ReadFunctionBodyField() +{ + (void)ReadInt(); /// skip total size + int32 size = ReadInt(); + for (int64 i = 0; i < size; ++i) { + PUIdx puIdx = ImportFunction(); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + mod.SetCurFunction(fn); + fn->GetFuncSymbol()->SetAppearsInCode(true); + localSymTab.clear(); + localSymTab.push_back(nullptr); + localPregTab.clear(); + localPregTab.push_back(0); + localLabelTab.clear(); + localLabelTab.push_back(0); + + fn->AllocSymTab(); + fn->AllocPregTab(); + fn->AllocTypeNameTab(); + fn->AllocLabelTab(); + + ImportFuncIdInfo(fn); + ImportLocalTypeNameTable(fn->GetTypeNameTab()); + ImportFormalsStIdx(fn); + if (mod.GetFlavor() < kMmpl) { + ImportAliasMap(fn); + } + (void)ImportBlockNode(fn); + mod.AddFunction(fn); + } + return; +} +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/bin_mpl_export.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/bin_mpl_export.cpp new file mode 100644 index 0000000000000000000000000000000000000000..94f8490a1e791b49a22500b3e81ff35b2060a059 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/bin_mpl_export.cpp @@ -0,0 +1,1416 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "bin_mpl_export.h" +#include +#include +#include "mir_function.h" +#include "namemangler.h" +#include "opcode_info.h" +#include "mir_pragma.h" +#include "bin_mplt.h" +#include "factory.h" + +namespace { +using namespace maple; +/* Storage location of field */ +constexpr uint32 kFirstField = 0; +constexpr uint32 kSecondField = 1; +constexpr uint32 kThirdField = 2; +constexpr uint32 kFourthField = 3; +constexpr int32 kFourthFieldInt = 3; +constexpr uint32 kFifthField = 4; +constexpr int32 kSixthFieldInt = 5; + +using OutputConstFactory = FunctionFactory; +using OutputTypeFactory = FunctionFactory; + +void OutputConstInt(const MIRConst &constVal, BinaryMplExport &mplExport) +{ + mplExport.WriteNum(kBinKindConstInt); + mplExport.OutputConstBase(constVal); + mplExport.WriteNum(static_cast(constVal).GetExtValue()); +} + +void OutputConstAddrof(const MIRConst &constVal, BinaryMplExport &mplExport) +{ + const MIRAddrofConst &addrof = static_cast(constVal); + if (addrof.GetSymbolIndex().IsGlobal()) { + mplExport.WriteNum(kBinKindConstAddrof); + } else { + mplExport.WriteNum(kBinKindConstAddrofLocal); + } + mplExport.OutputConstBase(constVal); + if (addrof.GetSymbolIndex().IsGlobal()) { + mplExport.OutputSymbol(mplExport.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(addrof.GetSymbolIndex())); + } else { + mplExport.OutputLocalSymbol(mplExport.curFunc->GetLocalOrGlobalSymbol(addrof.GetSymbolIndex())); + } + mplExport.WriteNum(addrof.GetFieldID()); + mplExport.WriteNum(addrof.GetOffset()); +} + +void OutputConstAddrofFunc(const MIRConst &constVal, BinaryMplExport &mplExport) +{ + mplExport.WriteNum(kBinKindConstAddrofFunc); + mplExport.OutputConstBase(constVal); + const auto &newConst = static_cast(constVal); + mplExport.OutputFunction(newConst.GetValue()); +} + +void OutputConstLbl(const MIRConst &constVal, BinaryMplExport &mplExport) +{ + mplExport.WriteNum(kBinKindConstAddrofLabel); + mplExport.OutputConstBase(constVal); + const MIRLblConst &lblConst = static_cast(constVal); + mplExport.OutputLabel(lblConst.GetValue()); +} + +void OutputConstStr(const MIRConst &constVal, BinaryMplExport &mplExport) +{ + mplExport.WriteNum(kBinKindConstStr); + mplExport.OutputConstBase(constVal); + const auto &newConst = static_cast(constVal); + mplExport.OutputUsrStr(newConst.GetValue()); +} + +void OutputConstStr16(const MIRConst &constVal, BinaryMplExport &mplExport) +{ + mplExport.WriteNum(kBinKindConstStr16); + mplExport.OutputConstBase(constVal); + const auto &mirStr16 = static_cast(constVal); + std::u16string str16 = GlobalTables::GetU16StrTable().GetStringFromStrIdx(mirStr16.GetValue()); + std::string str; + (void)namemangler::UTF16ToUTF8(str, str16); + mplExport.WriteNum(str.length()); + for (char c : str) { + mplExport.Write(static_cast(c)); + } +} + +void OutputConstFloat(const MIRConst &constVal, BinaryMplExport &mplExport) +{ + mplExport.WriteNum(kBinKindConstFloat); + mplExport.OutputConstBase(constVal); + const auto &newConst = static_cast(constVal); + mplExport.WriteNum(newConst.GetIntValue()); +} + +void OutputConstDouble(const MIRConst &constVal, BinaryMplExport &mplExport) +{ + mplExport.WriteNum(kBinKindConstDouble); + mplExport.OutputConstBase(constVal); + const auto &newConst = static_cast(constVal); + mplExport.WriteNum(newConst.GetIntValue()); +} + +void OutputConstAgg(const MIRConst &constVal, BinaryMplExport &mplExport) +{ + mplExport.WriteNum(kBinKindConstAgg); + mplExport.OutputConstBase(constVal); + const auto &aggConst = static_cast(constVal); + size_t size = aggConst.GetConstVec().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.WriteNum(aggConst.GetFieldIdItem(i)); + mplExport.OutputConst(aggConst.GetConstVecItem(i)); + } +} + +void OutputConstSt(MIRConst &constVal, BinaryMplExport &mplExport) +{ + mplExport.WriteNum(kBinKindConstSt); + mplExport.OutputConstBase(constVal); + auto &stConst = static_cast(constVal); + size_t size = stConst.GetStVec().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.OutputSymbol(stConst.GetStVecItem(i)); + } + size = stConst.GetStOffsetVec().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.WriteNum(stConst.GetStOffsetVecItem(i)); + } +} + +static bool InitOutputConstFactory() +{ + RegisterFactoryFunction(kConstInt, OutputConstInt); + RegisterFactoryFunction(kConstAddrof, OutputConstAddrof); + RegisterFactoryFunction(kConstAddrofFunc, OutputConstAddrofFunc); + RegisterFactoryFunction(kConstLblConst, OutputConstLbl); + RegisterFactoryFunction(kConstStrConst, OutputConstStr); + RegisterFactoryFunction(kConstStr16Const, OutputConstStr16); + RegisterFactoryFunction(kConstFloatConst, OutputConstFloat); + RegisterFactoryFunction(kConstDoubleConst, OutputConstDouble); + RegisterFactoryFunction(kConstAggConst, OutputConstAgg); + RegisterFactoryFunction(kConstStConst, OutputConstSt); + return true; +} + +void OutputTypeScalar(const MIRType &ty, BinaryMplExport &mplExport) +{ + mplExport.WriteNum(kBinKindTypeScalar); + mplExport.OutputTypeBase(ty); +} + +void OutputTypePointer(const MIRType &ty, BinaryMplExport &mplExport) +{ + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypePointer); + mplExport.OutputTypeBase(type); + mplExport.OutputTypeAttrs(type.GetTypeAttrs()); + mplExport.OutputType(type.GetPointedTyIdx()); +} + +void OutputTypeByName(const MIRType &ty, BinaryMplExport &mplExport) +{ + mplExport.WriteNum(kBinKindTypeByName); + mplExport.OutputTypeBase(ty); +} + +void OutputTypeFArray(const MIRType &ty, BinaryMplExport &mplExport) +{ + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeFArray); + mplExport.OutputTypeBase(type); + mplExport.OutputType(type.GetElemTyIdx()); +} + +void OutputTypeJArray(const MIRType &ty, BinaryMplExport &mplExport) +{ + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeJarray); + mplExport.OutputTypeBase(type); + mplExport.OutputType(type.GetElemTyIdx()); +} + +void OutputTypeArray(const MIRType &ty, BinaryMplExport &mplExport) +{ + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeArray); + mplExport.OutputTypeBase(type); + mplExport.WriteNum(type.GetDim()); + for (uint16 i = 0; i < type.GetDim(); ++i) { + mplExport.WriteNum(type.GetSizeArrayItem(i)); + } + mplExport.OutputType(type.GetElemTyIdx()); + mplExport.OutputTypeAttrs(type.GetTypeAttrs()); +} + +void OutputTypeFunction(const MIRType &ty, BinaryMplExport &mplExport) +{ + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeFunction); + mplExport.OutputTypeBase(type); + mplExport.OutputType(type.GetRetTyIdx()); + mplExport.WriteNum(type.funcAttrs.GetAttrFlag()); + size_t size = type.GetParamTypeList().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.OutputType(type.GetNthParamType(i)); + } + size = type.GetParamAttrsList().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.OutputTypeAttrs(type.GetNthParamAttrs(i)); + } +} + +void OutputTypeParam(const MIRType &ty, BinaryMplExport &mplExport) +{ + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeParam); + mplExport.OutputTypeBase(type); +} + +void OutputTypeInstantVector(const MIRType &ty, BinaryMplExport &mplExport) +{ + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeInstantVector); + mplExport.OutputTypeBase(type); + mplExport.WriteNum(ty.GetKind()); + mplExport.OutputTypePairs(type); +} + +void OutputTypeGenericInstant(const MIRType &ty, BinaryMplExport &mplExport) +{ + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeGenericInstant); + mplExport.OutputTypeBase(type); + mplExport.OutputTypePairs(type); + mplExport.OutputType(type.GetGenericTyIdx()); +} + +void OutputTypeBitField(const MIRType &ty, BinaryMplExport &mplExport) +{ + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeBitField); + mplExport.OutputTypeBase(type); + mplExport.WriteNum(type.GetFieldSize()); +} + +// for Struct/StructIncomplete/Union +void OutputTypeStruct(const MIRType &ty, BinaryMplExport &mplExport) +{ + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeStruct); + mplExport.OutputTypeBase(type); + MIRTypeKind kind = ty.GetKind(); + if (type.IsImported()) { + CHECK_FATAL(ty.GetKind() != kTypeUnion, "Must be."); + kind = kTypeStructIncomplete; + } + mplExport.WriteNum(kind); + mplExport.OutputTypeAttrs(type.GetTypeAttrs()); + if (kind != kTypeStructIncomplete) { + mplExport.OutputStructTypeData(type); + } +} + +void OutputTypeClass(const MIRType &ty, BinaryMplExport &mplExport) +{ + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeClass); + mplExport.OutputTypeBase(type); + MIRTypeKind kind = ty.GetKind(); + if (type.IsImported()) { + kind = kTypeClassIncomplete; + } + mplExport.WriteNum(kind); + if (kind != kTypeClassIncomplete) { + mplExport.OutputStructTypeData(type); + mplExport.OutputClassTypeData(type); + } +} + +void OutputTypeInterface(const MIRType &ty, BinaryMplExport &mplExport) +{ + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeInterface); + mplExport.OutputTypeBase(type); + MIRTypeKind kind = ty.GetKind(); + if (type.IsImported()) { + kind = kTypeInterfaceIncomplete; + } + mplExport.WriteNum(kind); + if (kind != kTypeInterfaceIncomplete) { + mplExport.OutputStructTypeData(type); + mplExport.OutputInterfaceTypeData(type); + } +} + +void OutputTypeConstString(const MIRType &ty, BinaryMplExport &) +{ + DEBUG_ASSERT(false, "Type's kind not yet implemented: %d", ty.GetKind()); + (void)ty; +} + +static bool InitOutputTypeFactory() +{ + RegisterFactoryFunction(kTypeScalar, OutputTypeScalar); + RegisterFactoryFunction(kTypePointer, OutputTypePointer); + RegisterFactoryFunction(kTypeByName, OutputTypeByName); + RegisterFactoryFunction(kTypeFArray, OutputTypeFArray); + RegisterFactoryFunction(kTypeJArray, OutputTypeJArray); + RegisterFactoryFunction(kTypeArray, OutputTypeArray); + RegisterFactoryFunction(kTypeFunction, OutputTypeFunction); + RegisterFactoryFunction(kTypeParam, OutputTypeParam); + RegisterFactoryFunction(kTypeInstantVector, OutputTypeInstantVector); + RegisterFactoryFunction(kTypeGenericInstant, OutputTypeGenericInstant); + RegisterFactoryFunction(kTypeBitField, OutputTypeBitField); + RegisterFactoryFunction(kTypeStruct, OutputTypeStruct); + RegisterFactoryFunction(kTypeStructIncomplete, OutputTypeStruct); + RegisterFactoryFunction(kTypeUnion, OutputTypeStruct); + RegisterFactoryFunction(kTypeClass, OutputTypeClass); + RegisterFactoryFunction(kTypeClassIncomplete, OutputTypeClass); + RegisterFactoryFunction(kTypeInterface, OutputTypeInterface); + RegisterFactoryFunction(kTypeInterfaceIncomplete, OutputTypeInterface); + RegisterFactoryFunction(kTypeConstString, OutputTypeConstString); + return true; +} +}; // namespace + +namespace maple { +int BinaryMplExport::typeMarkOffset = 0; + +BinaryMplExport::BinaryMplExport(MIRModule &md) : mod(md) +{ + bufI = 0; + Init(); + (void)InitOutputConstFactory(); + (void)InitOutputTypeFactory(); + not2mplt = false; +} + +uint8 BinaryMplExport::Read() +{ + CHECK_FATAL(bufI < buf.size(), "Index out of bound in BinaryMplImport::Read()"); + return buf[bufI++]; +} + +// Little endian +int32 BinaryMplExport::ReadInt() +{ + uint32 x0 = static_cast(Read()); + uint32 x1 = static_cast(Read()); + uint32 x2 = static_cast(Read()); + uint32 x3 = static_cast(Read()); + int32 x = static_cast((((((x3 << 8) + x2) << 8) + x1) << 8) + x0); + return x; +} + +void BinaryMplExport::Write(uint8 b) +{ + buf.push_back(b); +} + +// Little endian +void BinaryMplExport::WriteInt(int32 x) +{ + Write(static_cast(static_cast(x) & 0xFF)); + Write(static_cast((static_cast(x) >> 8) & 0xFF)); + Write(static_cast((static_cast(x) >> 16) & 0xFF)); + Write(static_cast((static_cast(x) >> 24) & 0xFF)); +} + +void BinaryMplExport::ExpandFourBuffSize() +{ + WriteInt(0); +} + +void BinaryMplExport::Fixup(size_t i, int32 x) +{ + constexpr int fixupCount = 4; + CHECK(i <= buf.size() - fixupCount, "Index out of bound in BinaryMplImport::Fixup()"); + buf[i] = static_cast(static_cast(x) & 0xFF); + buf[i + 1] = static_cast((static_cast(x) >> 8) & 0xFF); + buf[i + 2] = static_cast((static_cast(x) >> 16) & 0xFF); + buf[i + 3] = static_cast((static_cast(x) >> 24) & 0xFF); +} + +void BinaryMplExport::WriteInt64(int64 x) +{ + WriteInt(static_cast(static_cast(x) & 0xFFFFFFFF)); + WriteInt(static_cast((static_cast(x) >> 32) & 0xFFFFFFFF)); +} + +// LEB128 +void BinaryMplExport::WriteNum(int64 x) +{ + while (x < -0x40 || x >= 0x40) { + Write(static_cast((static_cast(x) & 0x7F) + 0x80)); + x = x >> 7; // This is a compress algorithm, do not cast int64 to uint64. If do so, small negtivate number like + // -3 will occupy 9 bits and we will not get the compressed benefit. + } + Write(static_cast(static_cast(x) & 0x7F)); +} + +void BinaryMplExport::WriteAsciiStr(const std::string &str) +{ + WriteNum(static_cast(str.size())); + for (size_t i = 0; i < str.size(); ++i) { + Write(static_cast(str[i])); + } +} + +void BinaryMplExport::DumpBuf(const std::string &name) +{ + FILE *f = fopen(name.c_str(), "wb"); + if (f == nullptr) { + LogInfo::MapleLogger(kLlErr) << "Error while creating the binary file: " << name << '\n'; + FATAL(kLncFatal, "Error while creating the binary file: %s\n", name.c_str()); + } + size_t size = buf.size(); + size_t k = fwrite(&buf[0], sizeof(uint8), size, f); + fclose(f); + if (k != size) { + LogInfo::MapleLogger(kLlErr) << "Error while writing the binary file: " << name << '\n'; + } +} + +void BinaryMplExport::OutputConstBase(const MIRConst &constVal) +{ + WriteNum(constVal.GetKind()); + OutputType(constVal.GetType().GetTypeIndex()); +} + +void BinaryMplExport::OutputConst(MIRConst *constVal) +{ + if (constVal == nullptr) { + WriteNum(0); + } else { + auto func = CreateProductFunction(constVal->GetKind()); + if (func != nullptr) { + func(*constVal, *this); + } + } +} + +void BinaryMplExport::OutputStr(const GStrIdx &gstr) +{ + if (gstr == 0u) { + WriteNum(0); + return; + } + + auto it = gStrMark.find(gstr); + if (it != gStrMark.end()) { + WriteNum(-(it->second)); + return; + } + + size_t mark = gStrMark.size(); + gStrMark[gstr] = mark; + WriteNum(kBinString); + DEBUG_ASSERT(GlobalTables::GetStrTable().StringTableSize() != 0, "Container check"); + WriteAsciiStr(GlobalTables::GetStrTable().GetStringFromStrIdx(gstr)); +} + +void BinaryMplExport::OutputUsrStr(UStrIdx ustr) +{ + if (ustr == 0u) { + WriteNum(0); + return; + } + + auto it = uStrMark.find(ustr); + if (it != uStrMark.end()) { + WriteNum(-(it->second)); + return; + } + + size_t mark = uStrMark.size(); + uStrMark[ustr] = mark; + WriteNum(kBinUsrString); + WriteAsciiStr(GlobalTables::GetUStrTable().GetStringFromStrIdx(ustr)); +} + +void BinaryMplExport::OutputPragmaElement(const MIRPragmaElement &e) +{ + OutputStr(e.GetNameStrIdx()); + OutputStr(e.GetTypeStrIdx()); + WriteNum(e.GetType()); + + if (e.GetType() == kValueString || e.GetType() == kValueType || e.GetType() == kValueField || + e.GetType() == kValueMethod || e.GetType() == kValueEnum) { + OutputStr(GStrIdx(e.GetI32Val())); + } else { + WriteInt64(e.GetU64Val()); + } + size_t size = e.GetSubElemVec().size(); + WriteNum(size); + for (size_t i = 0; i < size; ++i) { + OutputPragmaElement(*(e.GetSubElement(i))); + } +} + +void BinaryMplExport::OutputPragma(const MIRPragma &p) +{ + WriteNum(p.GetKind()); + WriteNum(p.GetVisibility()); + OutputStr(p.GetStrIdx()); + OutputType(p.GetTyIdx()); + OutputType(p.GetTyIdxEx()); + WriteNum(p.GetParamNum()); + size_t size = p.GetElementVector().size(); + WriteNum(size); + for (size_t i = 0; i < size; ++i) { + OutputPragmaElement(*(p.GetNthElement(i))); + } +} + +void BinaryMplExport::OutputTypeBase(const MIRType &type) +{ + WriteNum(type.GetPrimType()); + OutputStr(type.GetNameStrIdx()); + WriteNum(type.IsNameIsLocal()); +} + +void BinaryMplExport::OutputFieldPair(const FieldPair &fp) +{ + OutputStr(fp.first); // GStrIdx + OutputType(fp.second.first); // TyIdx + FieldAttrs fa = fp.second.second; + WriteNum(fa.GetAttrFlag()); + WriteNum(fa.GetAlignValue()); + if (fa.GetAttr(FLDATTR_static) && fa.GetAttr(FLDATTR_final) && + (fa.GetAttr(FLDATTR_public) || fa.GetAttr(FLDATTR_protected))) { + const std::string &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(fp.first); + MIRSymbol *fieldVar = mod.GetMIRBuilder()->GetGlobalDecl(fieldName); + if ((fieldVar != nullptr) && (fieldVar->GetKonst() != nullptr) && + (fieldVar->GetKonst()->GetKind() == kConstStr16Const)) { + WriteNum(kBinInitConst); + OutputConst(fieldVar->GetKonst()); + } else { + WriteNum(0); + } + } +} + +void BinaryMplExport::OutputMethodPair(const MethodPair &memPool) +{ + // use GStrIdx instead, StIdx will be created by ImportMethodPair + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(memPool.first.Idx()); + CHECK_FATAL(funcSt != nullptr, "Pointer funcSt is nullptr, can't get symbol! Check it!"); + WriteAsciiStr(GlobalTables::GetStrTable().GetStringFromStrIdx(funcSt->GetNameStrIdx())); + OutputType(memPool.second.first); // TyIdx + WriteNum(memPool.second.second.GetAttrFlag()); // FuncAttrs +} + +void BinaryMplExport::OutputFieldsOfStruct(const FieldVector &fields) +{ + WriteNum(fields.size()); + for (const FieldPair &fp : fields) { + OutputFieldPair(fp); + } +} + +void BinaryMplExport::OutputMethodsOfStruct(const MethodVector &methods) +{ + WriteNum(methods.size()); + for (const MethodPair &memPool : methods) { + OutputMethodPair(memPool); + } +} + +void BinaryMplExport::OutputStructTypeData(const MIRStructType &type) +{ + OutputFieldsOfStruct(type.GetFields()); + OutputFieldsOfStruct(type.GetStaticFields()); + OutputFieldsOfStruct(type.GetParentFields()); + OutputMethodsOfStruct(type.GetMethods()); +} + +void BinaryMplExport::OutputImplementedInterfaces(const std::vector &interfaces) +{ + WriteNum(interfaces.size()); + for (const TyIdx &tyIdx : interfaces) { + OutputType(tyIdx); + } +} + +void BinaryMplExport::OutputInfoIsString(const std::vector &infoIsString) +{ + WriteNum(infoIsString.size()); + for (bool isString : infoIsString) { + WriteNum(static_cast(isString)); + } +} + +void BinaryMplExport::OutputInfo(const std::vector &info, const std::vector &infoIsString) +{ + size_t size = info.size(); + WriteNum(size); + for (size_t i = 0; i < size; ++i) { + OutputStr(info[i].first); // GStrIdx + if (infoIsString[i]) { + OutputStr(GStrIdx(info[i].second)); + } else { + WriteNum(info[i].second); + } + } +} + +void BinaryMplExport::OutputPragmaVec(const std::vector &pragmaVec) +{ + WriteNum(pragmaVec.size()); + for (MIRPragma *pragma : pragmaVec) { + OutputPragma(*pragma); + } +} + +void BinaryMplExport::OutputClassTypeData(const MIRClassType &type) +{ + OutputType(type.GetParentTyIdx()); + OutputImplementedInterfaces(type.GetInterfaceImplemented()); + OutputInfoIsString(type.GetInfoIsString()); + if (!inIPA) { + OutputInfo(type.GetInfo(), type.GetInfoIsString()); + OutputPragmaVec(type.GetPragmaVec()); + } +} + +void BinaryMplExport::OutputInterfaceTypeData(const MIRInterfaceType &type) +{ + OutputImplementedInterfaces(type.GetParentsTyIdx()); + OutputInfoIsString(type.GetInfoIsString()); + if (!inIPA) { + OutputInfo(type.GetInfo(), type.GetInfoIsString()); + OutputPragmaVec(type.GetPragmaVec()); + } +} + +void BinaryMplExport::Init() +{ + BinaryMplExport::typeMarkOffset = 0; + gStrMark.clear(); + uStrMark.clear(); + symMark.clear(); + funcMark.clear(); + typMark.clear(); + gStrMark[GStrIdx(0)] = 0; + uStrMark[UStrIdx(0)] = 0; + symMark[nullptr] = 0; + funcMark[nullptr] = 0; + eaNodeMark[nullptr] = 0; + curFunc = nullptr; + for (uint32 pti = static_cast(PTY_begin); pti < static_cast(PTY_end); ++pti) { + typMark[GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(pti))] = pti; + } +} + +void BinaryMplExport::OutputSymbol(MIRSymbol *sym) +{ + if (sym == nullptr) { + WriteNum(0); + return; + } + + std::unordered_map::iterator it = symMark.find(sym); + if (it != symMark.end()) { + WriteNum(-(it->second)); + return; + } + + WriteNum(kBinSymbol); + WriteNum(sym->GetScopeIdx()); + OutputStr(sym->GetNameStrIdx()); + OutputUsrStr(sym->sectionAttr); + OutputUsrStr(sym->GetAsmAttr()); + WriteNum(sym->GetSKind()); + WriteNum(sym->GetStorageClass()); + size_t mark = symMark.size(); + symMark[sym] = mark; + OutputTypeAttrs(sym->GetAttrs()); + WriteNum(sym->GetIsTmp() ? 1 : 0); + if (sym->GetSKind() == kStPreg) { + WriteNum(sym->GetPreg()->GetPregNo()); + } else if (sym->GetSKind() == kStConst || sym->GetSKind() == kStVar) { + if (sym->GetKonst() != nullptr) { + sym->GetKonst()->SetType(*sym->GetType()); + } + OutputConst(sym->GetKonst()); + } else if (sym->GetSKind() == kStFunc) { + OutputFunction(sym->GetFunction()->GetPuidx()); + } else if (sym->GetSKind() == kStJavaClass || sym->GetSKind() == kStJavaInterface) { + } else { + CHECK_FATAL(false, "should not used"); + } + if (sym->GetSKind() == kStVar || sym->GetSKind() == kStFunc) { + OutputSrcPos(sym->GetSrcPosition()); + } + OutputType(sym->GetTyIdx()); +} + +void BinaryMplExport::OutputFunction(PUIdx puIdx) +{ + if (puIdx == 0) { + WriteNum(0); + mod.SetCurFunction(nullptr); + return; + } + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + CHECK_FATAL(func != nullptr, "Cannot get MIRFunction."); + auto it = funcMark.find(func); + if (it != funcMark.end()) { + WriteNum(-it->second); + mod.SetCurFunction(func); + return; + } + size_t mark = funcMark.size(); + funcMark[func] = mark; + MIRFunction *savedFunc = mod.CurFunction(); + mod.SetCurFunction(func); + + WriteNum(kBinFunction); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + CHECK_FATAL(funcSt != nullptr, "Pointer funcSt is nullptr, cannot get symbol! Check it!"); + OutputSymbol(funcSt); + OutputType(func->GetMIRFuncType()->GetTypeIndex()); + WriteNum(func->GetFuncAttrs().GetAttrFlag()); + + auto &attributes = func->GetFuncAttrs(); + if (attributes.GetAttr(FUNCATTR_constructor_priority)) { + WriteNum(attributes.GetConstructorPriority()); + } + + if (attributes.GetAttr(FUNCATTR_destructor_priority)) { + WriteNum(attributes.GetDestructorPriority()); + } + + WriteNum(func->GetFlag()); + OutputType(func->GetClassTyIdx()); + // output formal parameter information + WriteNum(static_cast(func->GetFormalDefVec().size())); + for (FormalDef formalDef : func->GetFormalDefVec()) { + OutputStr(formalDef.formalStrIdx); + OutputType(formalDef.formalTyIdx); + WriteNum(static_cast(formalDef.formalAttrs.GetAttrFlag())); + } + // store Side Effect for each func + if (func2SEMap) { + uint32 isSee = func->IsIpaSeen() == true ? 1 : 0; + uint32 isPure = func->IsPure() == true ? 1 : 0; + uint32 noDefArg = func->IsNoDefArgEffect() == true ? 1 : 0; + uint32 noDef = func->IsNoDefEffect() == true ? 1 : 0; + uint32 noRetGlobal = func->IsNoRetGlobal() == true ? 1 : 0; + uint32 noThr = func->IsNoThrowException() == true ? 1 : 0; + uint32 noRetArg = func->IsNoRetArg() == true ? 1 : 0; + uint32 noPriDef = func->IsNoPrivateDefEffect() == true ? 1 : 0; + uint32 i = 0; + uint8 se = noThr << i++; + se |= noRetGlobal << i++; + se |= noDef << i++; + se |= noDefArg << i++; + se |= isPure << i++; + se |= isSee << i++; + se |= noRetArg << i++; + se |= noPriDef << i; + if ((*func2SEMap).find(func->GetNameStrIdx()) == (*func2SEMap).end()) { + (*func2SEMap)[func->GetNameStrIdx()] = se; + } else if ((*func2SEMap)[func->GetNameStrIdx()] != se) { + FATAL(kLncFatal, "It is a bug."); + } + } + mod.SetCurFunction(savedFunc); +} + +void BinaryMplExport::WriteStrField(uint64 contentIdx) +{ + Fixup(contentIdx, buf.size()); + WriteNum(kBinStrStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_STR_START + size_t outStrSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutputStr + + int32 size = 0; + for (const auto &entity : GlobalTables::GetConstPool().GetConstU16StringPool()) { + MIRSymbol *sym = entity.second; + if (sym->IsLiteral()) { + OutputStr(sym->GetNameStrIdx()); + ++size; + } + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outStrSizeIdx, size); + WriteNum(~kBinStrStart); +} + +void BinaryMplExport::WriteHeaderField(uint64 contentIdx) +{ + Fixup(contentIdx, buf.size()); + WriteNum(kBinHeaderStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_IMPORT_START + WriteNum(mod.GetFlavor()); + WriteNum(mod.GetSrcLang()); + WriteNum(mod.GetID()); + if (mod.GetFlavor() == kFlavorLmbc) { + WriteNum(mod.GetGlobalMemSize()); + WriteNum(mod.IsWithDbgInfo()); + } + WriteNum(mod.GetNumFuncs()); + WriteAsciiStr(mod.GetEntryFuncName()); + OutputInfoVector(mod.GetFileInfo(), mod.GetFileInfoIsString()); + + if (mod.IsWithDbgInfo()) { + WriteNum(static_cast(mod.GetSrcFileInfo().size())); + for (uint32 i = 0; i < mod.GetSrcFileInfo().size(); i++) { + OutputStr(mod.GetSrcFileInfo()[i].first); + WriteNum(mod.GetSrcFileInfo()[i].second); + } + } else { + Write(0); + } + + WriteNum(static_cast(mod.GetImportFiles().size())); + for (GStrIdx strIdx : mod.GetImportFiles()) { + OutputStr(strIdx); + } + + WriteNum(static_cast(mod.GetAsmDecls().size())); + for (MapleString mapleStr : mod.GetAsmDecls()) { + std::string str(mapleStr.c_str()); + WriteAsciiStr(str); + } + + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + WriteNum(~kBinHeaderStart); + return; +} + +void BinaryMplExport::WriteTypeField(uint64 contentIdx, bool useClassList) +{ + Fixup(contentIdx, buf.size()); + WriteNum(kBinTypeStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_TYPE_START + size_t outTypeSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutputType + int32 size = 0; + if (useClassList) { + for (uint32 tyIdx : mod.GetClassList()) { + TyIdx curTyidx(tyIdx); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curTyidx); + CHECK_FATAL(type != nullptr, "Pointer type is nullptr, cannot get type, check it!"); + if (type->GetKind() == kTypeClass || type->GetKind() == kTypeInterface) { + auto *structType = static_cast(type); + // skip imported class/interface and incomplete types + if (!structType->IsImported() && !structType->IsIncomplete()) { + OutputType(curTyidx); + ++size; + } + } + } + } else { + uint32 idx = GlobalTables::GetTypeTable().lastDefaultTyIdx.GetIdx(); + for (idx = idx + 1; idx < GlobalTables::GetTypeTable().GetTypeTableSize(); idx++) { + OutputType(TyIdx(idx)); + size++; + } + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outTypeSizeIdx, size); + WriteNum(~kBinTypeStart); +} + +void BinaryMplExport::OutputCallInfo(CallInfo &callInfo) +{ + auto it = callInfoMark.find(callInfo.GetID()); + if (it != callInfoMark.end()) { + WriteNum(-(it->second)); + return; + } + WriteNum(kBinCallinfo); + size_t mark = callInfoMark.size(); + callInfoMark[callInfo.GetID()] = mark; + WriteNum(callInfo.GetCallType()); // call type + WriteInt(callInfo.GetLoopDepth()); + WriteInt(callInfo.GetID()); + callInfo.AreAllArgsLocal() ? Write(1) : Write(0); // All args are local variables or not. + OutputSymbol(callInfo.GetFunc()->GetFuncSymbol()); +} + +void BinaryMplExport::WriteCgField(uint64 contentIdx, const CallGraph *cg) +{ + if (contentIdx != 0) { + Fixup(contentIdx, buf.size()); + } + WriteNum(kBinCgStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_CG_START + size_t outcgSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutCG + int32 size = 0; + if (cg != nullptr) { + for (auto entry : cg->GetNodesMap()) { + MIRSymbol *methodSym = entry.first->GetFuncSymbol(); + WriteNum(kStartMethod); + OutputSymbol(methodSym); + size_t targetTyIdx = buf.size(); + ExpandFourBuffSize(); + int32 targSize = 0; + callInfoMark.clear(); + callInfoMark[0xffffffff] = 0; + for (const auto &callSite : entry.second->GetCallee()) { + OutputCallInfo(*(callSite.first)); + ++targSize; + } + Fixup(targetTyIdx, targSize); + WriteNum(~kStartMethod); + ++size; + } + } + + DEBUG_ASSERT((buf.size() - totalSizeIdx) <= 0xffffffff, "Integer overflow."); + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outcgSizeIdx, size); + WriteNum(~kBinCgStart); +} + +void BinaryMplExport::WriteSeField() +{ + DEBUG_ASSERT(func2SEMap != nullptr, "Expecting a func2SE map"); + WriteNum(kBinSeStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + size_t outseSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutSym + int32 size = 0; + + for (const auto &func2SE : *func2SEMap) { + uint8 se = func2SE.second; + if (static_cast(se)) { + OutputStr(func2SE.first); + Write(se); + if ((se & kPureFunc) == kPureFunc) { + const std::string &funcStr = GlobalTables::GetStrTable().GetStringFromStrIdx(func2SE.first); + auto *funcSymbol = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(funcStr)); + MIRFunction *func = (funcSymbol != nullptr) + ? GetMIRModule().GetMIRBuilder()->GetFunctionFromSymbol(*funcSymbol) + : nullptr; + OutputType(func->GetReturnTyIdx()); + } + ++size; + } + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outseSizeIdx, size); + WriteNum(~kBinSeStart); +} + +void BinaryMplExport::OutEaCgBaseNode(const EACGBaseNode &node, bool firstPart) +{ + if (firstPart) { + WriteNum(node.eaStatus); + WriteInt(static_cast(node.id)); + } else { + // in and out set in base node is not necessary to be outed + // start to out point-to set + size_t outP2SizeIdx = buf.size(); + WriteInt(0); + uint32 size = 0; + for (EACGBaseNode *outNode : node.GetPointsToSet()) { + OutEaCgNode(*outNode); + ++size; + } + Fixup(outP2SizeIdx, size); + // start to out in set + outP2SizeIdx = buf.size(); + WriteInt(0); + size = 0; + for (EACGBaseNode *outNode : node.GetInSet()) { + OutEaCgNode(*outNode); + ++size; + } + Fixup(outP2SizeIdx, size); + // start to out out set + outP2SizeIdx = buf.size(); + WriteInt(0); + size = 0; + for (EACGBaseNode *outNode : node.GetOutSet()) { + OutEaCgNode(*outNode); + ++size; + } + Fixup(outP2SizeIdx, size); + } +} + +void BinaryMplExport::OutEaCgObjNode(EACGObjectNode &obj) +{ + Write(uint8(obj.isPhantom)); + size_t outFieldSizeIdx = buf.size(); + WriteInt(0); + uint32 size = 0; + for (const auto &fieldNodePair : obj.fieldNodes) { + EACGBaseNode *fieldNode = fieldNodePair.second; + DEBUG_ASSERT(fieldNodePair.first == static_cast(fieldNode)->GetFieldID(), "Must be."); + OutEaCgNode(*fieldNode); + ++size; + } + Fixup(outFieldSizeIdx, size); + // start to out point by + outFieldSizeIdx = buf.size(); + WriteInt(0); + size = 0; + for (EACGBaseNode *node : obj.pointsBy) { + OutEaCgNode(*node); + ++size; + } + Fixup(outFieldSizeIdx, size); +} + +void BinaryMplExport::OutEaCgRefNode(const EACGRefNode &ref) +{ + Write(uint8(ref.isStaticField)); +} + +void BinaryMplExport::OutEaCgFieldNode(EACGFieldNode &field) +{ + WriteInt(field.GetFieldID()); + int32 size = 0; + size_t outFieldSizeIdx = buf.size(); + WriteInt(0); + for (EACGBaseNode *obj : field.belongsTo) { + OutEaCgNode(*obj); + ++size; + } + Fixup(outFieldSizeIdx, size); + Write(uint8(field.isPhantom)); +} + +void BinaryMplExport::OutEaCgActNode(const EACGActualNode &act) +{ + Write(uint8(act.isPhantom)); + Write(uint8(act.isReturn)); + Write(act.argIdx); + WriteInt(act.callSiteInfo); +} + +void BinaryMplExport::OutEaCgNode(EACGBaseNode &node) +{ + auto it = eaNodeMark.find(&node); + if (it != eaNodeMark.end()) { + WriteNum(-it->second); + return; + } + size_t mark = eaNodeMark.size(); + eaNodeMark[&node] = mark; + WriteNum(kBinEaCgNode); + WriteNum(node.kind); + OutEaCgBaseNode(node, true); + if (node.IsActualNode()) { + WriteNum(kBinEaCgActNode); + OutEaCgActNode(static_cast(node)); + } else if (node.IsFieldNode()) { + WriteNum(kBinEaCgFieldNode); + OutEaCgFieldNode(static_cast(node)); + } else if (node.IsObjectNode()) { + WriteNum(kBinEaCgObjNode); + OutEaCgObjNode(static_cast(node)); + } else if (node.IsReferenceNode()) { + WriteNum(kBinEaCgRefNode); + OutEaCgRefNode(static_cast(node)); + } else { + DEBUG_ASSERT(false, "Must be."); + } + OutEaCgBaseNode(node, false); + WriteNum(~kBinEaCgNode); +} + +void BinaryMplExport::WriteEaField(const CallGraph &cg) +{ + WriteNum(kBinEaStart); + uint64 totalSizeIdx = buf.size(); + WriteInt(0); + uint64 outeaSizeIdx = buf.size(); + WriteInt(0); + int32 size = 0; + for (auto cgNodePair : cg.GetNodesMap()) { + MIRFunction *func = cgNodePair.first; + if (func->GetEACG() == nullptr) { + continue; + } + EAConnectionGraph *eacg = func->GetEACG(); + DEBUG_ASSERT(eacg != nullptr, "Must be."); + OutputStr(eacg->GetFuncNameStrIdx()); + WriteInt(eacg->GetNodes().size()); + OutEaCgNode(*eacg->GetGlobalObject()); + uint64 outFunceaIdx = buf.size(); + WriteInt(0); + size_t funceaSize = 0; + for (EACGBaseNode *node : eacg->GetFuncArgNodes()) { + OutEaCgNode(*node); + ++funceaSize; + } + Fixup(outFunceaIdx, funceaSize); + ++size; + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outeaSizeIdx, size); + WriteNum(~kBinEaStart); +} + +void BinaryMplExport::WriteEaCgField(EAConnectionGraph *eaCg) +{ + if (eaCg == nullptr) { + WriteNum(~kBinEaCgStart); + return; + } + WriteNum(kBinEaCgStart); + size_t totalSizeIdx = buf.size(); + WriteInt(0); + // out this function's arg list + OutputStr(eaCg->GetFuncNameStrIdx()); + WriteInt(eaCg->GetNodes().size()); + OutEaCgNode(*eaCg->GetGlobalObject()); + size_t outNodeSizeIdx = buf.size(); + WriteInt(0); + size_t argNodeSize = 0; + for (EACGBaseNode *node : eaCg->GetFuncArgNodes()) { + OutEaCgNode(*node); + ++argNodeSize; + } + Fixup(outNodeSizeIdx, argNodeSize); + // out this function's call site's arg list + outNodeSizeIdx = buf.size(); + WriteInt(0); + size_t callSiteSize = 0; + for (auto nodePair : eaCg->GetCallSite2Nodes()) { + uint32 id = nodePair.first; + MapleVector *calleeArgNode = nodePair.second; + WriteInt(id); + size_t outCalleeArgSizeIdx = buf.size(); + WriteInt(0); + size_t calleeArgSize = 0; + for (EACGBaseNode *node : *calleeArgNode) { + OutEaCgNode(*node); + ++calleeArgSize; + } + Fixup(outCalleeArgSizeIdx, calleeArgSize); + ++callSiteSize; + } + Fixup(outNodeSizeIdx, callSiteSize); + + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + WriteNum(~kBinEaCgStart); +} + +void BinaryMplExport::WriteSymField(uint64 contentIdx) +{ + Fixup(contentIdx, buf.size()); + WriteNum(kBinSymStart); + uint64 totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + uint64 outsymSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutSym + int32 size = 0; + + if (not2mplt) { + for (auto sit = GetMIRModule().GetSymbolDefOrder().begin(); sit != GetMIRModule().GetSymbolDefOrder().end(); + ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx(sit->Idx()); + DEBUG_ASSERT(s != nullptr, "null ptr check"); + // Verify: all wpofake variables should have been deleted from globaltable + DEBUG_ASSERT(!(s->IsWpoFakeParm() || s->IsWpoFakeRet()) || s->IsDeleted(), "wpofake var not deleted"); + MIRStorageClass storageClass = s->GetStorageClass(); + MIRSymKind sKind = s->GetSKind(); + if (s->IsDeleted() || storageClass == kScUnused || (s->GetIsImported() && !s->GetAppearsInCode()) || + (sKind == kStFunc && (storageClass == kScExtern || !s->GetAppearsInCode()))) { + continue; + } + OutputSymbol(s); + size++; + } + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outsymSizeIdx, size); + WriteNum(~kBinSymStart); + return; +} + +void BinaryMplExport::WriteContentField4mplt(int fieldNum, uint64 *fieldStartP) +{ + WriteNum(kBinContentStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + + WriteInt(fieldNum); // size of Content item + + WriteNum(kBinStrStart); + fieldStartP[0] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinTypeStart); + fieldStartP[1] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinCgStart); + fieldStartP[2] = buf.size(); + ExpandFourBuffSize(); + + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + WriteNum(~kBinContentStart); +} + +void BinaryMplExport::WriteContentField4nonmplt(int fieldNum, uint64 *fieldStartP) +{ + CHECK_FATAL(fieldStartP != nullptr, "fieldStartP is null."); + WriteNum(kBinContentStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + + WriteInt(fieldNum); // size of Content item + + WriteNum(kBinHeaderStart); + fieldStartP[kFirstField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinSymStart); + fieldStartP[kSecondField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinFunctionBodyStart); + fieldStartP[kThirdField] = buf.size(); + ExpandFourBuffSize(); + + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + WriteNum(~kBinContentStart); +} + +void BinaryMplExport::WriteContentField4nonJava(int fieldNum, uint64 *fieldStartP) +{ + CHECK_FATAL(fieldStartP != nullptr, "fieldStartP is null."); + WriteNum(kBinContentStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + + WriteInt(fieldNum); // size of Content item + + WriteNum(kBinHeaderStart); + fieldStartP[kFirstField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinStrStart); + fieldStartP[kSecondField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinTypeStart); + fieldStartP[kThirdField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinSymStart); + fieldStartP[kFourthField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinFunctionBodyStart); + fieldStartP[kFifthField] = buf.size(); + ExpandFourBuffSize(); + + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + WriteNum(~kBinContentStart); +} + +void BinaryMplExport::Export(const std::string &fname, std::unordered_set *dumpFuncSet) +{ + uint64 fieldStartPoint[5]; + if (!not2mplt) { + WriteInt(kMpltMagicNumber); + WriteContentField4mplt(kFourthFieldInt, fieldStartPoint); + WriteStrField(fieldStartPoint[kFirstField]); + WriteTypeField(fieldStartPoint[kSecondField]); + WriteCgField(fieldStartPoint[kThirdField], nullptr); + importFileName = fname; + } else { + WriteInt(kMpltMagicNumber + 0x10); + if (mod.IsJavaModule()) { + WriteContentField4nonmplt(kFourthFieldInt, fieldStartPoint); + WriteHeaderField(fieldStartPoint[kFirstField]); + WriteSymField(fieldStartPoint[kSecondField]); + WriteFunctionBodyField(fieldStartPoint[kThirdField], dumpFuncSet); + } else { + WriteContentField4nonJava(kSixthFieldInt, fieldStartPoint); + WriteHeaderField(fieldStartPoint[kFirstField]); + WriteSymField(fieldStartPoint[kFourthField]); + WriteFunctionBodyField(fieldStartPoint[kFifthField], dumpFuncSet); + } + } + WriteNum(kBinFinish); + DumpBuf(fname); +} + +void BinaryMplExport::AppendAt(const std::string &name, int32 offset) +{ + FILE *f = fopen(name.c_str(), "r+b"); + if (f == nullptr) { + LogInfo::MapleLogger(kLlErr) << "Error while opening the binary file: " << name << '\n'; + FATAL(kLncFatal, "Error while creating the binary file: %s\n", name.c_str()); + } + int seekRet = fseek(f, static_cast(offset), SEEK_SET); + CHECK_FATAL(seekRet == 0, "Call fseek failed."); + size_t size = buf.size(); + size_t k = fwrite(&buf[0], sizeof(uint8), size, f); + fclose(f); + if (k != size) { + LogInfo::MapleLogger(kLlErr) << "Error while writing the binary file: " << name << '\n'; + } +} + +void BinaryMplExport::OutputTypePairs(const MIRInstantVectorType &type) +{ + size_t size = type.GetInstantVec().size(); + WriteNum(size); + for (const TypePair &typePair : type.GetInstantVec()) { + OutputType(typePair.first); + OutputType(typePair.second); + } +} + +void BinaryMplExport::OutputTypeAttrs(const TypeAttrs &ta) +{ + WriteNum(ta.GetAttrFlag()); + WriteNum(ta.GetAlignValue()); + WriteNum(ta.GetPack()); +} + +void BinaryMplExport::OutputType(TyIdx tyIdx) +{ + if (tyIdx == 0u) { + WriteNum(0); + return; + } + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(ty != nullptr, "If gets nulltype, should have been returned!"); + auto it = typMark.find(ty); + if (it != typMark.end()) { + if (ty->GetKind() != kTypeFunction) { + WriteNum(-(it->second)); + return; + } + ++BinaryMplExport::typeMarkOffset; + } else { + size_t mark = typMark.size() + BinaryMplExport::typeMarkOffset; + typMark[ty] = mark; + } + + auto func = CreateProductFunction(ty->GetKind()); + if (func != nullptr) { + func(*ty, *this); + } else { + DEBUG_ASSERT(false, "Type's kind not yet implemented: %d", ty->GetKind()); + } +} + +void UpdateMplt::UpdateCgField(BinaryMplt &binMplt, const CallGraph &cg) +{ + BinaryMplImport &binImport = binMplt.GetBinImport(); + BinaryMplExport &binExport = binMplt.GetBinExport(); + binImport.SetBufI(0); + if (binImport.IsBufEmpty() || binImport.ReadInt() != kMpltMagicNumber) { + INFO(kLncInfo, " This Module depends on nothing"); + return; + } + int64 cgStart = binImport.GetContent(kBinCgStart); + DEBUG_ASSERT(cgStart != 0, "Should be updated in import processing."); + binImport.SetBufI(cgStart); + [[maybe_unused]] int64 checkReadNum = binImport.ReadNum(); + DEBUG_ASSERT(checkReadNum == kBinCgStart, "Should be cg start point."); + int32 totalSize = binImport.ReadInt(); + constexpr int32 headLen = 4; + binImport.SetBufI(binImport.GetBufI() + totalSize - headLen); + checkReadNum = binImport.ReadNum(); + DEBUG_ASSERT(checkReadNum == ~kBinCgStart, "Should be end of cg."); + binExport.Init(); + std::map tmp; + binExport.func2SEMap = &tmp; + binExport.inIPA = true; + binExport.WriteCgField(0, &cg); + binExport.Init(); + binExport.WriteSeField(); + binExport.eaNodeMark.clear(); + binExport.eaNodeMark[nullptr] = 0; + binExport.gStrMark.clear(); + binExport.gStrMark[GStrIdx(0)] = 0; + binExport.WriteEaField(cg); + binExport.WriteNum(kBinFinish); + std::string filename(binMplt.GetImportFileName()); + binExport.AppendAt(filename, cgStart); +} + +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/bin_mpl_import.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/bin_mpl_import.cpp new file mode 100644 index 0000000000000000000000000000000000000000..31946dc91f9732489848148d0a87e4a26245ec39 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/bin_mpl_import.cpp @@ -0,0 +1,1734 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "bin_mpl_import.h" +#include +#include +#include +#include +#include "bin_mplt.h" +#include "mir_function.h" +#include "namemangler.h" +#include "opcode_info.h" +#include "mir_pragma.h" +#include "mir_builder.h" + +namespace maple { +uint8 BinaryMplImport::Read() +{ + CHECK_FATAL(bufI < buf.size(), "Index out of bound in BinaryMplImport::Read()"); + return buf[bufI++]; +} + +// Little endian +int32 BinaryMplImport::ReadInt() +{ + uint32 x0 = static_cast(Read()); + uint32 x1 = static_cast(Read()); + uint32 x2 = static_cast(Read()); + uint32 x3 = static_cast(Read()); + return (((((x3 << 8u) + x2) << 8u) + x1) << 8u) + x0; +} + +int64 BinaryMplImport::ReadInt64() +{ + // casts to avoid sign extension + uint32 x0 = static_cast(ReadInt()); + uint64 x1 = static_cast(ReadInt()); + return static_cast((x1 << 32) + x0); +} + +// LEB128 +int64 BinaryMplImport::ReadNum() +{ + uint64 n = 0; + int64 y = 0; + uint64 b = static_cast(Read()); + while (b >= 0x80) { + y += ((b - 0x80) << n); + n += 7; + b = static_cast(Read()); + } + b = (b & 0x3F) - (b & 0x40); + return y + (b << n); +} + +void BinaryMplImport::ReadAsciiStr(std::string &str) +{ + int64 n = ReadNum(); + for (int64 i = 0; i < n; i++) { + uint8 ch = Read(); + str.push_back(static_cast(ch)); + } +} + +void BinaryMplImport::ReadFileAt(const std::string &name, int32 offset) +{ + FILE *f = fopen(name.c_str(), "rb"); + CHECK_FATAL(f != nullptr, "Error while reading the binary file: %s", name.c_str()); + + int seekRet = fseek(f, 0, SEEK_END); + CHECK_FATAL(seekRet == 0, "call fseek failed"); + + long size = ftell(f); + size -= offset; + + CHECK_FATAL(size >= 0, "should not be negative"); + + seekRet = fseek(f, offset, SEEK_SET); + CHECK_FATAL(seekRet == 0, "call fseek failed"); + buf.resize(size); + + size_t result = fread(&buf[0], sizeof(uint8), static_cast(size), f); + fclose(f); + CHECK_FATAL(result == static_cast(size), "Error while reading the binary file: %s", name.c_str()); +} + +void BinaryMplImport::ImportConstBase(MIRConstKind &kind, MIRTypePtr &type) +{ + kind = static_cast(ReadNum()); + TyIdx tyidx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyidx); +} + +MIRConst *BinaryMplImport::ImportConst(MIRFunction *func) +{ + int64 tag = ReadNum(); + if (tag == 0) { + return nullptr; + } + + MIRConstKind kind; + MIRType *type = nullptr; + MemPool *memPool = mod.GetMemPool(); + + ImportConstBase(kind, type); + switch (tag) { + case kBinKindConstInt: + return GlobalTables::GetIntConstTable().GetOrCreateIntConst(ReadNum(), *type); + case kBinKindConstAddrof: { + MIRSymbol *sym = InSymbol(func); + CHECK_FATAL(sym != nullptr, "null ptr check"); + FieldID fi = ReadNum(); + int32 ofst = static_cast(ReadNum()); + // do not use "type"; instead, get exprTy from sym + TyIdx ptyIdx = sym->GetTyIdx(); + MIRPtrType ptrType(ptyIdx, (mod.IsJavaModule() ? PTY_ref : GetExactPtrPrimType())); + ptyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType *exprTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx); + return memPool->New(sym->GetStIdx(), fi, *exprTy, ofst); + } + case kBinKindConstAddrofLocal: { + MIRSymbol *sym = ImportLocalSymbol(func); + FieldID fi = static_cast(ReadNum()); + int32 ofst = static_cast(ReadNum()); + return memPool->New(sym->GetStIdx(), fi, *type, ofst); + } + case kBinKindConstAddrofFunc: { + PUIdx puIdx = ImportFunction(); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFuncTable()[puIdx]; + f->GetFuncSymbol()->SetAppearsInCode(true); + mod.SetCurFunction(func); + return memPool->New(puIdx, *type); + } + case kBinKindConstAddrofLabel: { + LabelIdx lidx = ImportLabel(func); + PUIdx puIdx = func->GetPuidx(); + MIRLblConst *lblConst = memPool->New(lidx, puIdx, *type); + (void)func->GetLabelTab()->addrTakenLabels.insert(lidx); + return lblConst; + } + case kBinKindConstStr: { + UStrIdx ustr = ImportUsrStr(); + return memPool->New(ustr, *type); + } + case kBinKindConstStr16: { + Conststr16Node *cs; + cs = memPool->New(); + cs->SetPrimType(type->GetPrimType()); + int64 len = ReadNum(); + std::ostringstream ostr; + for (int64 i = 0; i < len; ++i) { + ostr << Read(); + } + std::u16string str16; + (void)namemangler::UTF8ToUTF16(str16, ostr.str()); + cs->SetStrIdx(GlobalTables::GetU16StrTable().GetOrCreateStrIdxFromName(str16)); + return memPool->New(cs->GetStrIdx(), *type); + } + case kBinKindConstFloat: { + union { + float fvalue; + int32 ivalue; + } value; + + value.ivalue = ReadNum(); + return GlobalTables::GetFpConstTable().GetOrCreateFloatConst(value.fvalue); + } + case kBinKindConstDouble: { + union { + double dvalue; + int64 ivalue; + } value; + + value.ivalue = ReadNum(); + return GlobalTables::GetFpConstTable().GetOrCreateDoubleConst(value.dvalue); + } + case kBinKindConstAgg: { + MIRAggConst *aggConst = mod.GetMemPool()->New(mod, *type); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + auto fieldId = static_cast(ReadNum()); + auto fieldConst = ImportConst(func); + aggConst->AddItem(fieldConst, fieldId); + } + return aggConst; + } + case kBinKindConstSt: { + MIRStConst *stConst = mod.GetMemPool()->New(mod, *type); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + stConst->PushbackSymbolToSt(InSymbol(func)); + } + size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + stConst->PushbackOffsetToSt(ReadNum()); + } + return stConst; + } + default: + CHECK_FATAL(false, "Unhandled const type"); + } +} + +GStrIdx BinaryMplImport::ImportStr() +{ + int64 tag = ReadNum(); + if (tag == 0) { + return GStrIdx(0); + } + if (tag < 0) { + CHECK_FATAL(-tag < static_cast(gStrTab.size()), "index out of range in BinaryMplt::ImportStr"); + return gStrTab[-tag]; + } + CHECK_FATAL(tag == kBinString, "expecting kBinString"); + std::string str; + ReadAsciiStr(str); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str); + gStrTab.push_back(strIdx); + return strIdx; +} + +UStrIdx BinaryMplImport::ImportUsrStr() +{ + int64 tag = ReadNum(); + if (tag == 0) { + return UStrIdx(0); + } + if (tag < 0) { + CHECK_FATAL(-tag < static_cast(uStrTab.size()), "index out of range in BinaryMplt::InUsrStr"); + return uStrTab[-tag]; + } + CHECK_FATAL(tag == kBinUsrString, "expecting kBinUsrString"); + std::string str; + ReadAsciiStr(str); + UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(str); + uStrTab.push_back(strIdx); + return strIdx; +} + +MIRPragmaElement *BinaryMplImport::ImportPragmaElement() +{ + MIRPragmaElement *element = mod.GetPragmaMemPool()->New(mod); + element->SetNameStrIdx(ImportStr()); + element->SetTypeStrIdx(ImportStr()); + element->SetType(static_cast(ReadNum())); + if (element->GetType() == kValueString || element->GetType() == kValueType || element->GetType() == kValueField || + element->GetType() == kValueMethod || element->GetType() == kValueEnum) { + element->SetI32Val(static_cast(ImportStr())); + } else { + element->SetU64Val(static_cast(ReadInt64())); + } + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + element->SubElemVecPushBack(ImportPragmaElement()); + } + return element; +} + +MIRPragma *BinaryMplImport::ImportPragma() +{ + MIRPragma *p = mod.GetPragmaMemPool()->New(mod); + p->SetKind(static_cast(ReadNum())); + p->SetVisibility(ReadNum()); + p->SetStrIdx(ImportStr()); + if (mod.IsJavaModule()) { + p->SetTyIdx(ImportType()); + p->SetTyIdxEx(ImportType()); + } else { + p->SetTyIdx(ImportTypeNonJava()); + p->SetTyIdxEx(ImportTypeNonJava()); + } + p->SetParamNum(ReadNum()); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + p->PushElementVector(ImportPragmaElement()); + } + return p; +} + +void BinaryMplImport::ImportFieldPair(FieldPair &fp) +{ + fp.first = ImportStr(); + fp.second.first = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + fp.second.second.SetAttrFlag(ReadNum()); + fp.second.second.SetAlignValue(ReadNum()); + FieldAttrs fa = fp.second.second; + if (fa.GetAttr(FLDATTR_static) && fa.GetAttr(FLDATTR_final) && + (fa.GetAttr(FLDATTR_public) || fa.GetAttr(FLDATTR_protected))) { + int64 tag = ReadNum(); + if (tag == kBinInitConst) { + GlobalTables::GetConstPool().InsertConstPool(fp.first, ImportConst(nullptr)); + } + } +} + +void BinaryMplImport::ImportMethodPair(MethodPair &memPool) +{ + std::string funcName; + ReadAsciiStr(funcName); + TyIdx funcTyIdx = ImportType(); + int64 x = ReadNum(); + CHECK_FATAL(x >= 0, "ReadNum error, x: %d", x); + auto attrFlag = static_cast(x); + + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + MIRSymbol *prevFuncSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + MIRSymbol *funcSt = nullptr; + MIRFunction *fn = nullptr; + + if (prevFuncSt != nullptr && (prevFuncSt->GetStorageClass() == kScText && prevFuncSt->GetSKind() == kStFunc)) { + funcSt = prevFuncSt; + fn = funcSt->GetFunction(); + } else { + funcSt = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + funcSt->SetNameStrIdx(strIdx); + GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcSt); + funcSt->SetStorageClass(kScText); + funcSt->SetSKind(kStFunc); + funcSt->SetTyIdx(funcTyIdx); + funcSt->SetIsImported(imported); + funcSt->SetIsImportedDecl(imported); + methodSymbols.push_back(funcSt); + + fn = mod.GetMemPool()->New(&mod, funcSt->GetStIdx()); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + funcSt->SetFunction(fn); + auto *funcType = static_cast(funcSt->GetType()); + fn->SetMIRFuncType(funcType); + fn->SetFileIndex(0); + fn->SetBaseClassFuncNames(funcSt->GetNameStrIdx()); + fn->SetFuncAttrs(attrFlag); + } + memPool.first.SetFullIdx(funcSt->GetStIdx().FullIdx()); + memPool.second.first.reset(funcTyIdx); + memPool.second.second.SetAttrFlag(attrFlag); +} + +void BinaryMplImport::UpdateMethodSymbols() +{ + for (auto sym : methodSymbols) { + MIRFunction *fn = sym->GetFunction(); + CHECK_FATAL(fn != nullptr, "fn is null"); + auto *funcType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx())); + fn->SetMIRFuncType(funcType); + fn->SetReturnStruct(*GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx())); + if (fn->GetFormalDefVec().size() != 0) { + continue; // already updated in ImportFunction() + } + for (size_t i = 0; i < funcType->GetParamTypeList().size(); ++i) { + FormalDef formalDef(nullptr, funcType->GetParamTypeList()[i], funcType->GetParamAttrsList()[i]); + fn->GetFormalDefVec().push_back(formalDef); + } + } +} + +void BinaryMplImport::ImportFieldsOfStructType(FieldVector &fields, uint32 methodSize) +{ + int64 size = ReadNum(); + int64 initSize = fields.size() + methodSize; + for (int64 i = 0; i < size; ++i) { + FieldPair fp; + ImportFieldPair(fp); + if (initSize == 0) { + fields.push_back(fp); + } + } +} + +void BinaryMplImport::ImportMethodsOfStructType(MethodVector &methods) +{ + int64 size = ReadNum(); + bool isEmpty = methods.empty(); + for (int64 i = 0; i < size; ++i) { + MethodPair memPool; + ImportMethodPair(memPool); + if (isEmpty) { + methods.push_back(memPool); + } + } +} + +void BinaryMplImport::ImportStructTypeData(MIRStructType &type) +{ + uint32 methodSize = type.GetMethods().size(); + ImportFieldsOfStructType(type.GetFields(), methodSize); + ImportFieldsOfStructType(type.GetStaticFields(), methodSize); + ImportFieldsOfStructType(type.GetParentFields(), methodSize); + ImportMethodsOfStructType(type.GetMethods()); + type.SetIsImported(imported); +} + +void BinaryMplImport::ImportInterfacesOfClassType(std::vector &interfaces) +{ + int64 size = ReadNum(); + bool isEmpty = interfaces.empty(); + for (int64 i = 0; i < size; ++i) { + TyIdx idx = ImportType(); + if (isEmpty) { + interfaces.push_back(idx); + } + } +} + +void BinaryMplImport::ImportInfoIsStringOfStructType(MIRStructType &type) +{ + int64 size = ReadNum(); + bool isEmpty = type.GetInfoIsString().empty(); + + for (int64 i = 0; i < size; ++i) { + auto isString = static_cast(ReadNum()); + + if (isEmpty) { + type.PushbackIsString(isString); + } + } +} + +void BinaryMplImport::ImportInfoOfStructType(MIRStructType &type) +{ + uint64 size = static_cast(ReadNum()); + bool isEmpty = type.GetInfo().empty(); + for (size_t i = 0; i < size; ++i) { + GStrIdx idx = ImportStr(); + int64 x = (type.GetInfoIsStringElemt(i)) ? static_cast(ImportStr()) : ReadNum(); + CHECK_FATAL(x >= 0, "ReadNum nagative, x: %d", x); + CHECK_FATAL(x <= std::numeric_limits::max(), "ReadNum too large, x: %d", x); + if (isEmpty) { + type.PushbackMIRInfo(MIRInfoPair(idx, static_cast(x))); + } + } +} + +void BinaryMplImport::ImportPragmaOfStructType(MIRStructType &type) +{ + int64 size = ReadNum(); + bool isEmpty = type.GetPragmaVec().empty(); + for (int64 i = 0; i < size; ++i) { + MIRPragma *pragma = ImportPragma(); + if (isEmpty) { + type.PushbackPragma(pragma); + } + } +} + +void BinaryMplImport::SetClassTyidxOfMethods(MIRStructType &type) +{ + if (type.GetTypeIndex() != 0u) { + // set up classTyIdx for methods + for (size_t i = 0; i < type.GetMethods().size(); ++i) { + StIdx stidx = type.GetMethodsElement(i).first; + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + CHECK_FATAL(st != nullptr, "st is null"); + CHECK_FATAL(st->GetSKind() == kStFunc, "unexpected st->sKind"); + st->GetFunction()->SetClassTyIdx(type.GetTypeIndex()); + } + } +} + +void BinaryMplImport::ImportClassTypeData(MIRClassType &type) +{ + TyIdx tempType = ImportType(); + // Keep the parent_tyidx we first met. + if (type.GetParentTyIdx() == 0u) { + type.SetParentTyIdx(tempType); + } + ImportInterfacesOfClassType(type.GetInterfaceImplemented()); + ImportInfoIsStringOfStructType(type); + if (!inIPA) { + ImportInfoOfStructType(type); + ImportPragmaOfStructType(type); + } + SetClassTyidxOfMethods(type); +} + +void BinaryMplImport::ImportInterfaceTypeData(MIRInterfaceType &type) +{ + ImportInterfacesOfClassType(type.GetParentsTyIdx()); + ImportInfoIsStringOfStructType(type); + if (!inIPA) { + ImportInfoOfStructType(type); + ImportPragmaOfStructType(type); + } + SetClassTyidxOfMethods(type); +} + +void BinaryMplImport::Reset() +{ + buf.clear(); + bufI = 0; + gStrTab.clear(); + uStrTab.clear(); + typTab.clear(); + funcTab.clear(); + symTab.clear(); + methodSymbols.clear(); + definedLabels.clear(); + gStrTab.push_back(GStrIdx(0)); // Dummy + uStrTab.push_back(UStrIdx(0)); // Dummy + symTab.push_back(nullptr); // Dummy + funcTab.push_back(nullptr); // Dummy + eaCgTab.push_back(nullptr); + for (int32 pti = static_cast(PTY_begin); pti < static_cast(PTY_end); ++pti) { + typTab.push_back(TyIdx(pti)); + } +} + +TypeAttrs BinaryMplImport::ImportTypeAttrs() +{ + TypeAttrs ta; + ta.SetAttrFlag(static_cast(ReadNum())); + ta.SetAlignValue(static_cast(ReadNum())); + ta.SetPack(static_cast(ReadNum())); + return ta; +} + +void BinaryMplImport::ImportTypePairs(std::vector &insVecType) +{ + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + TyIdx t0 = ImportType(); + TyIdx t1 = ImportType(); + TypePair tp(t0, t1); + insVecType.push_back(tp); + } +} + +void BinaryMplImport::CompleteAggInfo(TyIdx tyIdx) +{ + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(type != nullptr, "MIRType is null"); + if (type->GetKind() == kTypeInterface) { + auto *interfaceType = static_cast(type); + ImportStructTypeData(*interfaceType); + ImportInterfaceTypeData(*interfaceType); + } else if (type->GetKind() == kTypeClass) { + auto *classType = static_cast(type); + ImportStructTypeData(*classType); + ImportClassTypeData(*classType); + } else if (type->GetKind() == kTypeStruct || type->GetKind() == kTypeUnion) { + auto *structType = static_cast(type); + ImportStructTypeData(*structType); + } else { + ERR(kLncErr, "in BinaryMplImport::CompleteAggInfo, MIRType error"); + } +} + +inline static bool IsIncomplete(const MIRType &type) +{ + return (type.GetKind() == kTypeInterfaceIncomplete || type.GetKind() == kTypeClassIncomplete || + type.GetKind() == kTypeStructIncomplete); +} + +TyIdx BinaryMplImport::ImportType(bool forPointedType) +{ + int64 tag = ReadNum(); + static MIRType *typeNeedsComplete = nullptr; + static int ptrLev = 0; + if (tag == 0) { + return TyIdx(0); + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < typTab.size(), "index out of bounds"); + return typTab.at(static_cast(-tag)); + } + PrimType primType = static_cast(0); + GStrIdx strIdx(0); + bool nameIsLocal = false; + ImportTypeBase(primType, strIdx, nameIsLocal); + + switch (tag) { + case kBinKindTypeScalar: + return TyIdx(primType); + case kBinKindTypePointer: { + MIRPtrType type(primType, strIdx); + type.SetNameIsLocal(nameIsLocal); + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetTypeAttrs(ImportTypeAttrs()); + ++ptrLev; + type.SetPointedTyIdx(ImportType(true)); + --ptrLev; + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + if (typeNeedsComplete != nullptr && ptrLev == 0) { + TyIdx tyIdxNeedsComplete = typeNeedsComplete->GetTypeIndex(); + typeNeedsComplete = nullptr; + CompleteAggInfo(tyIdxNeedsComplete); + } + return origType->GetTypeIndex(); + } + case kBinKindTypeFArray: { + MIRFarrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetElemtTyIdx(ImportType(forPointedType)); + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + return origType->GetTypeIndex(); + } + case kBinKindTypeJarray: { + MIRJarrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetElemtTyIdx(ImportType(forPointedType)); + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + return origType->GetTypeIndex(); + } + case kBinKindTypeArray: { + MIRArrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetDim(ReadNum()); + CHECK_FATAL(type.GetDim() < kMaxArrayDim, "array index out of range"); + for (uint16 i = 0; i < type.GetDim(); ++i) { + type.SetSizeArrayItem(i, ReadNum()); + } + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetElemTyIdx(ImportType(forPointedType)); + type.SetTypeAttrs(ImportTypeAttrs()); + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + return origType->GetTypeIndex(); + } + case kBinKindTypeFunction: { + MIRFuncType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetRetTyIdx(ImportType()); + type.funcAttrs.SetAttrFlag(ReadNum()); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + type.GetParamTypeList().push_back(ImportType()); + } + size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + type.GetParamAttrsList().push_back(ImportTypeAttrs()); + } + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + return origType->GetTypeIndex(); + } + case kBinKindTypeParam: { + MIRTypeParam type(strIdx); + type.SetNameIsLocal(nameIsLocal); + MIRType *origType = &InsertInTypeTables(type); + typTab.push_back(origType->GetTypeIndex()); + return origType->GetTypeIndex(); + } + case kBinKindTypeInstantVector: { + auto kind = static_cast(ReadNum()); + MIRInstantVectorType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + auto *origType = static_cast(&InsertInTypeTables(type)); + typTab.push_back(origType->GetTypeIndex()); + ImportTypePairs(origType->GetInstantVec()); + return origType->GetTypeIndex(); + } + case kBinKindTypeGenericInstant: { + MIRGenericInstantType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + auto *origType = static_cast(&InsertInTypeTables(type)); + typTab.push_back(origType->GetTypeIndex()); + ImportTypePairs(origType->GetInstantVec()); + origType->SetGenericTyIdx(ImportType()); + return origType->GetTypeIndex(); + } + case kBinKindTypeBitField: { + uint8 fieldSize = ReadNum(); + MIRBitFieldType type(fieldSize, primType, strIdx); + type.SetNameIsLocal(nameIsLocal); + MIRType *origType = &InsertInTypeTables(type); + typTab.push_back(origType->GetTypeIndex()); + return origType->GetTypeIndex(); + } + case kBinKindTypeStruct: { + auto kind = static_cast(ReadNum()); + MIRStructType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetTypeAttrs(ImportTypeAttrs()); + MIRStructType &origType = static_cast(InsertInTypeTables(type)); + typTab.push_back(origType.GetTypeIndex()); + if (kind != kTypeStructIncomplete) { + if (forPointedType) { + typeNeedsComplete = &origType; + } else { + ImportStructTypeData(origType); + } + } + return origType.GetTypeIndex(); + } + case kBinKindTypeClass: { + auto kind = static_cast(ReadNum()); + MIRClassType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + auto &origType = static_cast(InsertInTypeTables(type)); + typTab.push_back(origType.GetTypeIndex()); + if (kind != kTypeClassIncomplete) { + if (forPointedType) { + typeNeedsComplete = &origType; + } else { + ImportStructTypeData(origType); + ImportClassTypeData(origType); + } + } + return origType.GetTypeIndex(); + } + case kBinKindTypeInterface: { + auto kind = static_cast(ReadNum()); + MIRInterfaceType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + auto &origType = static_cast(InsertInTypeTables(type)); + typTab.push_back(origType.GetTypeIndex()); + if (kind != kTypeInterfaceIncomplete) { + if (forPointedType) { + typeNeedsComplete = &origType; + } else { + ImportStructTypeData(origType); + ImportInterfaceTypeData(origType); + } + } + return origType.GetTypeIndex(); + } + default: + CHECK_FATAL(false, "Unexpected binary kind"); + } +} + +TyIdx BinaryMplImport::ImportTypeNonJava() +{ + int64 tag = ReadNum(); + if (tag == 0) { + return TyIdx(0); + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < typTab.size(), "index out of bounds"); + return typTab[static_cast(-tag)]; + } + PrimType primType = static_cast(0); + GStrIdx strIdx(0); + bool nameIsLocal = false; + ImportTypeBase(primType, strIdx, nameIsLocal); + TyIdx tyIdxUsed(GlobalTables::GetTypeTable().GetTypeTableSize()); + if (tag != kBinKindTypeScalar) { + GlobalTables::GetTypeTable().PushNull(); + typTab.push_back(tyIdxUsed); + } + + switch (tag) { + case kBinKindTypeScalar: + return TyIdx(primType); + case kBinKindTypePointer: { + MIRPtrType type(primType, strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetTypeAttrs(ImportTypeAttrs()); + type.SetPointedTyIdx(ImportTypeNonJava()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeFArray: { + MIRFarrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetElemtTyIdx(ImportTypeNonJava()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeJarray: { + MIRJarrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetElemtTyIdx(ImportTypeNonJava()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeArray: { + MIRArrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetDim(ReadNum()); + CHECK_FATAL(type.GetDim() < kMaxArrayDim, "array index out of range"); + for (uint16 i = 0; i < type.GetDim(); ++i) { + type.SetSizeArrayItem(i, ReadNum()); + } + type.SetElemTyIdx(ImportTypeNonJava()); + type.SetTypeAttrs(ImportTypeAttrs()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeFunction: { + MIRFuncType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetRetTyIdx(ImportTypeNonJava()); + type.funcAttrs.SetAttrFlag(ReadNum()); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + type.GetParamTypeList().push_back(ImportTypeNonJava()); + } + size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + type.GetParamAttrsList().push_back(ImportTypeAttrs()); + } + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeParam: { + MIRTypeParam type(strIdx); + type.SetNameIsLocal(nameIsLocal); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeInstantVector: { + auto kind = static_cast(ReadNum()); + MIRInstantVectorType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + ImportTypePairs(type.GetInstantVec()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeGenericInstant: { + MIRGenericInstantType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + ImportTypePairs(type.GetInstantVec()); + type.SetGenericTyIdx(ImportTypeNonJava()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeBitField: { + uint8 fieldSize = ReadNum(); + MIRBitFieldType type(fieldSize, primType, strIdx); + type.SetNameIsLocal(nameIsLocal); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeStruct: { + auto kind = static_cast(ReadNum()); + MIRStructType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetTypeAttrs(ImportTypeAttrs()); + if (kind != kTypeStructIncomplete) { + ImportStructTypeData(type); + } + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, IsIncomplete(type)); + return tyIdxUsed; + } + case kBinKindTypeClass: { + auto kind = static_cast(ReadNum()); + MIRClassType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + if (kind != kTypeClassIncomplete) { + ImportStructTypeData(type); + ImportClassTypeData(type); + } + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, true, IsIncomplete(type)); + return tyIdxUsed; + } + case kBinKindTypeInterface: { + auto kind = static_cast(ReadNum()); + MIRInterfaceType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + if (kind != kTypeInterfaceIncomplete) { + ImportStructTypeData(type); + ImportInterfaceTypeData(type); + } + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, true, IsIncomplete(type)); + return tyIdxUsed; + } + default: + CHECK_FATAL(false, "Unexpected binary kind"); + } +} + +void BinaryMplImport::ImportTypeBase(PrimType &primType, GStrIdx &strIdx, bool &nameIsLocal) +{ + primType = static_cast(ReadNum()); + strIdx = ImportStr(); + nameIsLocal = ReadNum(); +} + +inline static bool IsObject(const MIRType &type) +{ + return (type.GetKind() == kTypeClass || type.GetKind() == kTypeClassIncomplete || + type.GetKind() == kTypeInterface || type.GetKind() == kTypeInterfaceIncomplete); +} + +MIRType &BinaryMplImport::InsertInTypeTables(MIRType &type) +{ + MIRType *resultTypePtr = &type; + TyIdx prevTyIdx = mod.GetTypeNameTab()->GetTyIdxFromGStrIdx(type.GetNameStrIdx()); + if (prevTyIdx != 0u && !type.IsNameIsLocal()) { + MIRType *prevType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(prevTyIdx); + if (!prevType->IsMIRTypeByName() && + ((IsIncomplete(*prevType) && IsIncomplete(type)) || (!IsIncomplete(*prevType) && !IsIncomplete(type)) || + (!IsIncomplete(*prevType) && IsIncomplete(type)))) { + resultTypePtr = prevType->CopyMIRTypeNode(); + if (resultTypePtr->GetKind() == kTypeStruct || resultTypePtr->GetKind() == kTypeUnion || + resultTypePtr->GetKind() == kTypeStructIncomplete) { + tmpStruct.push_back(static_cast(resultTypePtr)); + } else if (resultTypePtr->GetKind() == kTypeClass || resultTypePtr->GetKind() == kTypeClassIncomplete) { + tmpClass.push_back(static_cast(resultTypePtr)); + } else if (resultTypePtr->GetKind() == kTypeInterface || + resultTypePtr->GetKind() == kTypeInterfaceIncomplete) { + tmpInterface.push_back(static_cast(resultTypePtr)); + } + } else { + // New definition wins + type.SetTypeIndex(prevTyIdx); + CHECK_FATAL(GlobalTables::GetTypeTable().GetTypeTable().empty() == false, "container check"); + GlobalTables::GetTypeTable().SetTypeWithTyIdx(prevTyIdx, *type.CopyMIRTypeNode()); + resultTypePtr = GlobalTables::GetTypeTable().GetTypeFromTyIdx(prevTyIdx); + if (!IsIncomplete(*resultTypePtr)) { + GlobalTables::GetTypeNameTable().SetGStrIdxToTyIdx(resultTypePtr->GetNameStrIdx(), + resultTypePtr->GetTypeIndex()); + } + } + } else { + // New type, no previous definition or anonymous type + TyIdx tyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&type); + resultTypePtr = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (tyIdx + 1 == GlobalTables::GetTypeTable().GetTypeTable().size() && !resultTypePtr->IsNameIsLocal()) { + GStrIdx stridx = resultTypePtr->GetNameStrIdx(); + if (stridx != 0) { + mod.GetTypeNameTab()->SetGStrIdxToTyIdx(stridx, tyIdx); + mod.PushbackTypeDefOrder(stridx); + if (IsObject(*resultTypePtr)) { + mod.AddClass(tyIdx); + if (!IsIncomplete(*resultTypePtr)) { + GlobalTables::GetTypeNameTable().SetGStrIdxToTyIdx(stridx, tyIdx); + } + } + } + } + } + return *resultTypePtr; +} + +void BinaryMplImport::SetupEHRootType() +{ + // setup eh root type with most recent Ljava_2Flang_2FObject_3B + GStrIdx gStrIdx = GlobalTables::GetStrTable().GetStrIdxFromName(namemangler::kJavaLangObjectStr); + if (gStrIdx == 0u) { + return; + } + + TyIdx tyIdx = GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(gStrIdx); + if (tyIdx != 0u) { + mod.SetThrowableTyIdx(tyIdx); + } +} + +MIRSymbol *BinaryMplImport::GetOrCreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mclass, MIRStorageClass sclass, + MIRFunction *func, uint8 scpID) +{ + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + if (st != nullptr && st->GetStorageClass() == sclass && st->GetSKind() == mclass && scpID == kScopeGlobal) { + return st; + } + return mirBuilder.CreateSymbol(tyIdx, strIdx, mclass, sclass, func, scpID); +} + +MIRSymbol *BinaryMplImport::InSymbol(MIRFunction *func) +{ + int64 tag = ReadNum(); + if (tag == 0) { + return nullptr; + } else if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < symTab.size(), "index out of bounds"); + return symTab.at(-tag); + } else { + CHECK_FATAL(tag == kBinSymbol, "expecting kBinSymbol"); + int64 scope = ReadNum(); + GStrIdx stridx = ImportStr(); + UStrIdx secAttr = ImportUsrStr(); + UStrIdx asmAttr = ImportUsrStr(); + auto skind = static_cast(ReadNum()); + auto sclass = static_cast(ReadNum()); + TyIdx tyTmp(0); + MIRSymbol *sym = GetOrCreateSymbol(tyTmp, stridx, skind, sclass, func, scope); + if (secAttr != 0) { + sym->sectionAttr = secAttr; + } + if (asmAttr != 0) { + sym->SetAsmAttr(asmAttr); + } + symTab.push_back(sym); + sym->SetAttrs(ImportTypeAttrs()); + sym->SetIsTmp(ReadNum() != 0); + sym->SetIsImported(imported); + uint32 thepregno = 0; + if (skind == kStPreg) { + CHECK_FATAL(scope == kScopeLocal && func != nullptr, "Expecting kScopeLocal"); + thepregno = static_cast(ReadNum()); + } else if (skind == kStConst || skind == kStVar) { + sym->SetKonst(ImportConst(func)); + } else if (skind == kStFunc) { + PUIdx puidx = ImportFunction(); + mod.SetCurFunction(func); + if (puidx != 0) { + sym->SetFunction(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puidx)); + } + } + if (skind == kStVar || skind == kStFunc) { + ImportSrcPos(sym->GetSrcPosition()); + } + TyIdx tyIdx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + sym->SetTyIdx(tyIdx); + if (skind == kStPreg) { + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + PregIdx pregidx = func->GetPregTab()->EnterPregNo(thepregno, mirType->GetPrimType(), mirType); + MIRPregTable *pregTab = func->GetPregTab(); + MIRPreg *preg = pregTab->PregFromPregIdx(pregidx); + preg->SetPrimType(mirType->GetPrimType()); + sym->SetPreg(preg); + } + return sym; + } +} + +PUIdx BinaryMplImport::ImportFunction() +{ + int64 tag = ReadNum(); + if (tag == 0) { + mod.SetCurFunction(nullptr); + return 0; + } else if (tag < 0) { + CHECK_FATAL(static_cast(-tag) <= funcTab.size(), "index out of bounds"); + if (static_cast(-tag) == funcTab.size()) { // function was exported before its symbol + return static_cast(0); + } + PUIdx puIdx = funcTab[static_cast(-tag)]->GetPuidx(); + mod.SetCurFunction(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx)); + return puIdx; + } + CHECK_FATAL(tag == kBinFunction, "expecting kBinFunction"); + MIRSymbol *funcSt = InSymbol(nullptr); + CHECK_FATAL(funcSt != nullptr, "null ptr check"); + MIRFunction *func = nullptr; + if (funcSt->GetFunction() == nullptr) { + maple::MIRBuilder builder(&mod); + func = builder.CreateFunction(funcSt->GetStIdx()); + funcTab.push_back(func); + } else { + func = funcSt->GetFunction(); + funcTab.push_back(func); + } + funcSt->SetFunction(func); + methodSymbols.push_back(funcSt); + if (mod.IsJavaModule()) { + func->SetBaseClassFuncNames(funcSt->GetNameStrIdx()); + } + TyIdx funcTyIdx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + func->SetMIRFuncType(static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcTyIdx))); + + func->SetStIdx(funcSt->GetStIdx()); + if (!inCG) { + func->SetFuncAttrs(ReadNum()); // merge side effect + } else { + if (!func->IsDirty()) { + func->SetDirty(true); + func->SetFuncAttrs(ReadNum()); // merge side effect + } else { + FuncAttrs tmp; + tmp.SetAttrFlag(ReadNum()); + if (func->IsNoDefArgEffect() != tmp.GetAttr(FUNCATTR_nodefargeffect)) { + tmp.SetAttr(FUNCATTR_nodefargeffect, true); + } + if (func->IsNoDefEffect() != tmp.GetAttr(FUNCATTR_nodefeffect)) { + tmp.SetAttr(FUNCATTR_nodefeffect, true); + } + if (func->IsNoRetGlobal() != tmp.GetAttr(FUNCATTR_noretglobal)) { + tmp.SetAttr(FUNCATTR_noretglobal, true); + } + if (func->IsNoThrowException() != tmp.GetAttr(FUNCATTR_nothrow_exception)) { + tmp.SetAttr(FUNCATTR_nothrow_exception, true); + } + if (func->IsIpaSeen() != tmp.GetAttr(FUNCATTR_ipaseen)) { + tmp.SetAttr(FUNCATTR_ipaseen); + } + if (func->IsPure() != tmp.GetAttr(FUNCATTR_pure)) { + tmp.SetAttr(FUNCATTR_pure, true); + } + if (func->IsNoRetArg() != tmp.GetAttr(FUNCATTR_noretarg)) { + tmp.SetAttr(FUNCATTR_noretarg, true); + } + if (func->IsNoPrivateDefEffect() != tmp.GetAttr(FUNCATTR_noprivate_defeffect)) { + tmp.SetAttr(FUNCATTR_noprivate_defeffect, true); + } + func->SetFuncAttrs(tmp); + } + } + + auto &attributes = func->GetFuncAttrs(); + if (attributes.GetAttr(FUNCATTR_constructor_priority)) { + attributes.SetConstructorPriority(static_cast(ReadNum())); + } + if (attributes.GetAttr(FUNCATTR_destructor_priority)) { + attributes.SetDestructorPriority(static_cast(ReadNum())); + } + + func->SetFlag(ReadNum()); + if (mod.IsJavaModule()) { + (void)ImportType(); // not set the field to mimic parser + } else { + (void)ImportTypeNonJava(); // not set the field to mimic parser + } + size_t size = static_cast(ReadNum()); + if (func->GetFormalDefVec().size() == 0) { + for (size_t i = 0; i < size; i++) { + GStrIdx strIdx = ImportStr(); + TyIdx tyIdx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + FormalDef formalDef(strIdx, nullptr, tyIdx, TypeAttrs()); + formalDef.formalAttrs.SetAttrFlag(static_cast(ReadNum())); + func->GetFormalDefVec().push_back(formalDef); + } + } else { + CHECK_FATAL(func->GetFormalDefVec().size() >= size, "ImportFunction: inconsistent number of formals"); + for (size_t i = 0; i < size; i++) { + func->GetFormalDefVec()[i].formalStrIdx = ImportStr(); + func->GetFormalDefVec()[i].formalTyIdx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + func->GetFormalDefVec()[i].formalAttrs.SetAttrFlag(static_cast(ReadNum())); + } + } + + mod.SetCurFunction(func); + return func->GetPuidx(); +} + +inline void BinaryMplImport::SkipTotalSize() +{ + ReadInt(); +} + +void BinaryMplImport::ReadStrField() +{ + SkipTotalSize(); + + int32 size = ReadInt(); + for (int64 i = 0; i < size; ++i) { + GStrIdx stridx = ImportStr(); + GlobalTables::GetConstPool().PutLiteralNameAsImported(stridx); + } + int64 tag = 0; + tag = ReadNum(); + CHECK_FATAL(tag == ~kBinStrStart, "pattern mismatch in Read STR"); +} + +void BinaryMplImport::ReadHeaderField() +{ + SkipTotalSize(); + mod.SetFlavor(static_cast(ReadNum())); + mod.SetSrcLang(static_cast(ReadNum())); + mod.SetID(static_cast(ReadNum())); + if (mod.GetFlavor() == kFlavorLmbc) { + mod.SetGlobalMemSize(static_cast(ReadNum())); + mod.SetWithDbgInfo(static_cast(ReadNum())); + } + mod.SetNumFuncs(static_cast(ReadNum())); + std::string inStr; + ReadAsciiStr(inStr); + mod.SetEntryFuncName(inStr); + ImportInfoVector(mod.GetFileInfo(), mod.GetFileInfoIsString()); + + int32 size = static_cast(ReadNum()); + MIRInfoPair infopair; + for (int32 i = 0; i < size; i++) { + infopair.first = ImportStr(); + infopair.second = static_cast(ReadNum()); + mod.PushbackFileInfo(infopair); + } + + size = static_cast(ReadNum()); + for (int32 i = 0; i < size; i++) { + GStrIdx gStrIdx = ImportStr(); + mod.GetImportFiles().push_back(gStrIdx); + std::string importfilename = GlobalTables::GetStrTable().GetStringFromStrIdx(gStrIdx); + // record the imported file for later reading summary info, if exists + mod.PushbackImportedMplt(importfilename); + BinaryMplt *binMplt = new BinaryMplt(mod); + binMplt->GetBinImport().imported = true; + + INFO(kLncInfo, "importing %s", importfilename.c_str()); + if (!binMplt->GetBinImport().Import(importfilename, false)) { // not a binary mplt + FATAL(kLncFatal, "cannot open binary MPLT file: %s\n", importfilename.c_str()); + } else { + INFO(kLncInfo, "finished import of %s", importfilename.c_str()); + } + if (i == 0) { + binMplt->SetImportFileName(importfilename); + mod.SetBinMplt(binMplt); + } else { + delete binMplt; + } + } + + size = static_cast(ReadNum()); + for (int32 i = 0; i < size; i++) { + std::string str; + ReadAsciiStr(str); + mod.GetAsmDecls().emplace_back(MapleString(str, mod.GetMemPool())); + } + + int32 tag = static_cast(ReadNum()); + CHECK_FATAL(tag == ~kBinHeaderStart, "pattern mismatch in Read Import"); + return; +} + +void BinaryMplImport::ReadTypeField() +{ + SkipTotalSize(); + + int32 size = ReadInt(); + if (mod.IsJavaModule()) { + for (int64 i = 0; i < size; ++i) { + ImportType(); + } + } else { + for (int64 i = 0; i < size; ++i) { + (void)ImportTypeNonJava(); + } + } + int64 tag = 0; + tag = ReadNum(); + CHECK_FATAL(tag == ~kBinTypeStart, "pattern mismatch in Read TYPE"); +} + +CallInfo *BinaryMplImport::ImportCallInfo() +{ + int64 tag = ReadNum(); + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < callInfoTab.size(), "index out of bounds"); + return callInfoTab.at(-tag); + } + CHECK_FATAL(tag == kBinCallinfo, "expecting kBinCallinfo"); + CallType ctype = static_cast(ReadNum()); // call type + uint32 loopDepth = static_cast(ReadInt()); + uint32 id = static_cast(ReadInt()); + bool argLocal = Read() == 1; + MIRSymbol *funcSym = InSymbol(nullptr); + CHECK_FATAL(funcSym != nullptr, "func_sym is null in BinaryMplImport::InCallInfo"); + CallInfo *ret = mod.GetMemPool()->New(ctype, *funcSym->GetFunction(), static_cast(nullptr), + loopDepth, id, argLocal); + callInfoTab.push_back(ret); + return ret; +} + +void BinaryMplImport::MergeDuplicated(PUIdx methodPuidx, std::vector &targetSet, + std::vector &newSet) +{ + if (targetSet.empty()) { + (void)targetSet.insert(targetSet.begin(), newSet.begin(), newSet.end()); + std::unordered_set tmp; + mod.AddValueToMethod2TargetHash(methodPuidx, tmp); + for (size_t i = 0; i < newSet.size(); ++i) { + mod.InsertTargetHash(methodPuidx, newSet[i]->GetID()); + } + } else { + for (size_t i = 0; i < newSet.size(); ++i) { + CallInfo *newItem = newSet[i]; + if (!mod.HasTargetHash(methodPuidx, newItem->GetID())) { + targetSet.push_back(newItem); + mod.InsertTargetHash(methodPuidx, newItem->GetID()); + } + } + } +} + +void BinaryMplImport::ReadCgField() +{ + SkipTotalSize(); + + int32 size = ReadInt(); + int64 tag = 0; + + for (int i = 0; i < size; ++i) { + tag = ReadNum(); + CHECK_FATAL(tag == kStartMethod, " should be start point of method"); + MIRSymbol *tmpInSymbol = InSymbol(nullptr); + CHECK_FATAL(tmpInSymbol != nullptr, "null ptr check"); + PUIdx methodPuidx = tmpInSymbol->GetFunction()->GetPuidx(); + CHECK_FATAL(methodPuidx, "should not be 0"); + if (mod.GetMethod2TargetMap().find(methodPuidx) == mod.GetMethod2TargetMap().end()) { + std::vector targetSetTmp; + mod.AddMemToMethod2TargetMap(methodPuidx, targetSetTmp); + } + int32 targSize = ReadInt(); + std::vector targetSet; + callInfoTab.clear(); + callInfoTab.push_back(nullptr); + for (int32 j = 0; j < targSize; ++j) { + CallInfo *callInfo = ImportCallInfo(); + targetSet.push_back(callInfo); + } + MergeDuplicated(methodPuidx, mod.GetMemFromMethod2TargetMap(methodPuidx), targetSet); + tag = ReadNum(); + CHECK_FATAL(tag == ~kStartMethod, " should be start point of method"); + } + tag = ReadNum(); + CHECK_FATAL(tag == ~kBinCgStart, "pattern mismatch in Read CG"); +} + +void BinaryMplImport::ReadEaField() +{ + ReadInt(); + int size = ReadInt(); + for (int i = 0; i < size; ++i) { + GStrIdx funcName = ImportStr(); + int nodesSize = ReadInt(); + EAConnectionGraph *newEaCg = + mod.GetMemPool()->New(&mod, &mod.GetMPAllocator(), funcName, true); + newEaCg->ResizeNodes(nodesSize, nullptr); + InEaCgNode(*newEaCg); + int eaSize = ReadInt(); + for (int j = 0; j < eaSize; ++j) { + EACGBaseNode *node = &InEaCgNode(*newEaCg); + newEaCg->funcArgNodes.push_back(node); + } + mod.SetEAConnectionGraph(funcName, newEaCg); + } + CHECK_FATAL(ReadNum() == ~kBinEaStart, "pattern mismatch in Read EA"); +} + +void BinaryMplImport::ReadSeField() +{ + SkipTotalSize(); + + int32 size = ReadInt(); +#ifdef MPLT_DEBUG + LogInfo::MapleLogger() << "SE SIZE : " << size << '\n'; +#endif + for (int32 i = 0; i < size; ++i) { + GStrIdx funcName = ImportStr(); + uint8 specialEffect = Read(); + TyIdx tyIdx = kInitTyIdx; + if ((specialEffect & kPureFunc) == kPureFunc) { + tyIdx = ImportType(); + } + const std::string &funcStr = GlobalTables::GetStrTable().GetStringFromStrIdx(funcName); + if (funcStr == "Ljava_2Flang_2FObject_3B_7Cwait_7C_28_29V") { + specialEffect = 0; + } + auto *funcSymbol = + GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(funcStr)); + MIRFunction *func = funcSymbol != nullptr ? mirBuilder.GetFunctionFromSymbol(*funcSymbol) : nullptr; + if (func != nullptr) { + func->SetAttrsFromSe(specialEffect); + } else if ((specialEffect & kPureFunc) == kPureFunc) { + func = mirBuilder.GetOrCreateFunction(funcStr, tyIdx); + func->SetAttrsFromSe(specialEffect); + } + } + int64 tag = ReadNum(); + CHECK_FATAL(tag == ~kBinSeStart, "pattern mismatch in Read TYPE"); +} + +void BinaryMplImport::InEaCgBaseNode(EACGBaseNode &base, EAConnectionGraph &newEaCg, bool firstPart) +{ + if (firstPart) { + base.SetEAStatus(static_cast(ReadNum())); + base.SetID(ReadInt()); + } else { + // start to in points to + int size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *point2Node = &InEaCgNode(newEaCg); + CHECK_FATAL(point2Node->IsObjectNode(), "must be"); + (void)base.pointsTo.insert(static_cast(point2Node)); + } + // start to in in + size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *point2Node = &InEaCgNode(newEaCg); + base.InsertInSet(point2Node); + } + // start to in out + size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *point2Node = &InEaCgNode(newEaCg); + base.InsertOutSet(point2Node); + } + } +} + +void BinaryMplImport::InEaCgActNode(EACGActualNode &actual) +{ + actual.isPhantom = Read() == 1; + actual.isReturn = Read() == 1; + actual.argIdx = Read(); + actual.callSiteInfo = static_cast(ReadInt()); +} + +void BinaryMplImport::InEaCgFieldNode(EACGFieldNode &field, EAConnectionGraph &newEaCg) +{ + field.SetFieldID(ReadInt()); + int size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *node = &InEaCgNode(newEaCg); + CHECK_FATAL(node->IsObjectNode(), "must be"); + (void)field.belongsTo.insert(static_cast(node)); + } + field.isPhantom = Read() == 1; +} + +void BinaryMplImport::InEaCgObjNode(EACGObjectNode &obj, EAConnectionGraph &newEaCg) +{ + Read(); + obj.isPhantom = true; + int size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *node = &InEaCgNode(newEaCg); + CHECK_FATAL(node->IsFieldNode(), "must be"); + auto *field = static_cast(node); + obj.fieldNodes[static_cast(field)->GetFieldID()] = field; + } + // start to in point by + size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *point2Node = &InEaCgNode(newEaCg); + (void)obj.pointsBy.insert(point2Node); + } +} + +void BinaryMplImport::InEaCgRefNode(EACGRefNode &ref) +{ + ref.isStaticField = Read() == 1 ? true : false; +} + +EACGBaseNode &BinaryMplImport::InEaCgNode(EAConnectionGraph &newEaCg) +{ + int64 tag = ReadNum(); + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < eaCgTab.size(), "index out of bounds"); + return *eaCgTab[-tag]; + } + CHECK_FATAL(tag == kBinEaCgNode, "must be"); + NodeKind kind = static_cast(ReadNum()); + EACGBaseNode *node = nullptr; + switch (kind) { + case kObejectNode: + node = new EACGObjectNode(&mod, &mod.GetMPAllocator(), &newEaCg); + break; + case kReferenceNode: + node = new EACGRefNode(&mod, &mod.GetMPAllocator(), &newEaCg); + break; + case kFieldNode: + node = new EACGFieldNode(&mod, &mod.GetMPAllocator(), &newEaCg); + break; + case kActualNode: + node = new EACGActualNode(&mod, &mod.GetMPAllocator(), &newEaCg); + break; + default: + CHECK_FATAL(false, "impossible"); + } + node->SetEACG(&newEaCg); + eaCgTab.push_back(node); + InEaCgBaseNode(*node, newEaCg, true); + newEaCg.SetNodeAt(node->id - 1, node); + if (node->IsActualNode()) { + CHECK_FATAL(ReadNum() == kBinEaCgActNode, "must be"); + InEaCgActNode(static_cast(*node)); + } else if (node->IsFieldNode()) { + CHECK_FATAL(ReadNum() == kBinEaCgFieldNode, "must be"); + InEaCgFieldNode(static_cast(*node), newEaCg); + } else if (node->IsObjectNode()) { + CHECK_FATAL(ReadNum() == kBinEaCgObjNode, "must be"); + InEaCgObjNode(static_cast(*node), newEaCg); + } else if (node->IsReferenceNode()) { + CHECK_FATAL(ReadNum() == kBinEaCgRefNode, "must be"); + InEaCgRefNode(static_cast(*node)); + } + InEaCgBaseNode(*node, newEaCg, false); + CHECK_FATAL(ReadNum() == ~kBinEaCgNode, "must be"); + return *node; +} + +EAConnectionGraph *BinaryMplImport::ReadEaCgField() +{ + if (ReadNum() == ~kBinEaCgStart) { + return nullptr; + } + ReadInt(); + GStrIdx funcStr = ImportStr(); + int nodesSize = ReadInt(); + EAConnectionGraph *newEaCg = mod.GetMemPool()->New(&mod, &mod.GetMPAllocator(), funcStr, true); + newEaCg->ResizeNodes(nodesSize, nullptr); + InEaCgNode(*newEaCg); + CHECK_FATAL(newEaCg->GetNode(0)->IsObjectNode(), "must be"); + CHECK_FATAL(newEaCg->GetNode(1)->IsReferenceNode(), "must be"); + CHECK_FATAL(newEaCg->GetNode(2)->IsFieldNode(), "must be"); + newEaCg->globalField = static_cast(newEaCg->GetNode(2)); + newEaCg->globalObj = static_cast(newEaCg->GetNode(0)); + newEaCg->globalRef = static_cast(newEaCg->GetNode(1)); + CHECK_FATAL(newEaCg->globalField && newEaCg->globalObj && newEaCg->globalRef, "must be"); + int32 nodeSize = ReadInt(); + for (int j = 0; j < nodeSize; ++j) { + EACGBaseNode *node = &InEaCgNode(*newEaCg); + newEaCg->funcArgNodes.push_back(node); + } + + int32 callSitesize = ReadInt(); + for (int i = 0; i < callSitesize; ++i) { + uint32 id = static_cast(ReadInt()); + newEaCg->callSite2Nodes[id] = + mod.GetMemPool()->New>(mod.GetMPAllocator().Adapter()); + int32 calleeArgSize = ReadInt(); + for (int j = 0; j < calleeArgSize; ++j) { + EACGBaseNode *node = &InEaCgNode(*newEaCg); + newEaCg->callSite2Nodes[id]->push_back(node); + } + } + +#ifdef DEBUG + for (EACGBaseNode *node : newEaCg->GetNodes()) { + if (node == nullptr) { + continue; + } + node->CheckAllConnectionInNodes(); + } +#endif + CHECK_FATAL(ReadNum() == ~kBinEaCgStart, "pattern mismatch in Read EACG"); + return newEaCg; +} + +void BinaryMplImport::ReadSymField() +{ + SkipTotalSize(); + int32 size = ReadInt(); + for (int64 i = 0; i < size; i++) { + (void)InSymbol(nullptr); + } + int64 tag = ReadNum(); + CHECK_FATAL(tag == ~kBinSymStart, "pattern mismatch in Read SYM"); + return; +} + +void BinaryMplImport::ReadSymTabField() +{ + SkipTotalSize(); + int32 size = ReadInt(); + for (int64 i = 0; i < size; i++) { + std::string str; + ReadAsciiStr(str); + } + int64 tag = ReadNum(); + CHECK_FATAL(tag == ~kBinSymTabStart, "pattern mismatch in Read TYPE"); + return; +} + +void BinaryMplImport::ReadContentField() +{ + SkipTotalSize(); + + int32 size = ReadInt(); + int64 item; + int32 offset; + for (int32 i = 0; i < size; ++i) { + item = ReadNum(); + offset = ReadInt(); + content[item] = offset; + } + CHECK_FATAL(ReadNum() == ~kBinContentStart, "pattern mismatch in Read CONTENT"); +} + +void BinaryMplImport::Jump2NextField() +{ + uint32 totalSize = static_cast(ReadInt()); + bufI += (totalSize - sizeof(uint32)); + ReadNum(); // skip end tag for this field +} + +bool BinaryMplImport::ImportForSrcLang(const std::string &fname, MIRSrcLang &srcLang) +{ + Reset(); + ReadFileAt(fname, 0); + int32 magic = ReadInt(); + if (kMpltMagicNumber != magic && (kMpltMagicNumber + 0x10) != magic) { + buf.clear(); + return false; + } + importingFromMplt = kMpltMagicNumber == magic; + int64 fieldID = ReadNum(); + while (fieldID != kBinFinish) { + switch (fieldID) { + case kBinHeaderStart: { + SkipTotalSize(); + (void)ReadNum(); // skip flavor + srcLang = static_cast(ReadNum()); + return true; + } + default: { + Jump2NextField(); + break; + } + } + fieldID = ReadNum(); + } + return false; +} + +bool BinaryMplImport::Import(const std::string &fname, bool readSymbols, bool readSe) +{ + Reset(); + ReadFileAt(fname, 0); + int32 magic = ReadInt(); + if (kMpltMagicNumber != magic && (kMpltMagicNumber + 0x10) != magic) { + buf.clear(); + return false; + } + importingFromMplt = kMpltMagicNumber == magic; + int64 fieldID = ReadNum(); + if (readSe) { + while (fieldID != kBinFinish) { + if (fieldID == kBinSeStart) { +#ifdef MPLT_DEBUG + LogInfo::MapleLogger() << "read SE of : " << fname << '\n'; +#endif + BinaryMplImport tmp(mod); + tmp.Reset(); + tmp.buf = buf; + tmp.bufI = bufI; + tmp.importFileName = fname; + tmp.ReadSeField(); + Jump2NextField(); + } else if (fieldID == kBinEaStart) { + BinaryMplImport tmp(mod); + tmp.Reset(); + tmp.buf = buf; + tmp.bufI = bufI; + tmp.importFileName = fname; + tmp.ReadEaField(); + Jump2NextField(); + } else { + Jump2NextField(); + } + fieldID = ReadNum(); + } + return true; + } + while (fieldID != kBinFinish) { + switch (fieldID) { + case kBinContentStart: { + ReadContentField(); + break; + } + case kBinStrStart: { + ReadStrField(); + break; + } + case kBinHeaderStart: { + ReadHeaderField(); + break; + } + case kBinTypeStart: { + ReadTypeField(); + break; + } + case kBinSymStart: { + if (readSymbols) { + ReadSymField(); + } else { + Jump2NextField(); + } + break; + } + case kBinSymTabStart: { + ReadSymTabField(); + break; + } + case kBinCgStart: { + if (readSymbols) { +#ifdef MPLT_DEBUG + LogInfo::MapleLogger() << "read CG of : " << fname << '\n'; +#endif + BinaryMplImport tmp(mod); + tmp.Reset(); + tmp.inIPA = true; + tmp.buf = buf; + tmp.bufI = bufI; + tmp.importFileName = fname; + tmp.ReadCgField(); + tmp.UpdateMethodSymbols(); + Jump2NextField(); + } else { + Jump2NextField(); + } + break; + } + case kBinSeStart: { + Jump2NextField(); + break; + } + case kBinEaStart: { + if (readSymbols) { +#ifdef MPLT_DEBUG + LogInfo::MapleLogger() << "read EA of : " << fname << '\n'; +#endif + BinaryMplImport tmp(mod); + tmp.Reset(); + tmp.buf = buf; + tmp.bufI = bufI; + tmp.importFileName = fname; + tmp.ReadEaField(); + Jump2NextField(); + } else { + Jump2NextField(); + } + break; + } + case kBinFunctionBodyStart: { + ReadFunctionBodyField(); + break; + } + default: + CHECK_FATAL(false, "should not run here"); + } + fieldID = ReadNum(); + } + UpdateMethodSymbols(); + SetupEHRootType(); + return true; +} +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/debug_info.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/debug_info.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9d368e5c5c361cb4d9799d849c37d5fb9891dea3 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/debug_info.cpp @@ -0,0 +1,1524 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "debug_info.h" +#include "mir_builder.h" +#include "printing.h" +#include "maple_string.h" +#include "global_tables.h" +#include "mir_type.h" +#include +#include "securec.h" +#include "mpl_logging.h" +#include "version.h" + +namespace maple { +extern const char *GetDwTagName(unsigned n); +extern const char *GetDwFormName(unsigned n); +extern const char *GetDwAtName(unsigned n); +extern const char *GetDwOpName(unsigned n); +extern const char *GetDwAteName(unsigned n); +extern const char *GetDwCfaName(unsigned n); +extern DwAte GetAteFromPTY(PrimType pty); + +constexpr uint32 kIndx2 = 2; +constexpr uint32 kStructDBGSize = 8888; + +// DBGDie methods +DBGDie::DBGDie(MIRModule *m, DwTag tag) + : module(m), + tag(tag), + id(m->GetDbgInfo()->GetMaxId()), + withChildren(false), + sibling(nullptr), + firstChild(nullptr), + abbrevId(0), + tyIdx(0), + offset(0), + size(0), + attrVec(m->GetMPAllocator().Adapter()), + subDieVec(m->GetMPAllocator().Adapter()) +{ + if (module->GetDbgInfo()->GetParentDieSize()) { + parent = module->GetDbgInfo()->GetParentDie(); + } else { + parent = nullptr; + } + m->GetDbgInfo()->SetIdDieMap(m->GetDbgInfo()->GetIncMaxId(), this); + attrVec.clear(); + subDieVec.clear(); +} + +void DBGDie::ResetParentDie() +{ + module->GetDbgInfo()->ResetParentDie(); +} + +DBGDieAttr *DBGDie::AddAttr(DwAt at, DwForm form, uint64 val) +{ + // collect strps which need label + if (form == DW_FORM_strp) { + module->GetDbgInfo()->AddStrps(static_cast(val)); + } + DBGDieAttr *attr = module->GetDbgInfo()->CreateAttr(at, form, val); + AddAttr(attr); + return attr; +} + +DBGDieAttr *DBGDie::AddSimpLocAttr(DwAt at, DwForm form, uint64 val) +{ + DBGExprLoc *p = module->GetMemPool()->New(module, DW_OP_fbreg); + if (val != kDbgDefaultVal) { + p->AddSimpLocOpnd(val); + } + DBGDieAttr *attr = module->GetDbgInfo()->CreateAttr(at, form, reinterpret_cast(p)); + AddAttr(attr); + return attr; +} + +DBGDieAttr *DBGDie::AddGlobalLocAttr(DwAt at, DwForm form, uint64 val) +{ + DBGExprLoc *p = module->GetMemPool()->New(module, DW_OP_addr); + p->SetGvarStridx(static_cast(val)); + DBGDieAttr *attr = module->GetDbgInfo()->CreateAttr(at, form, reinterpret_cast(p)); + AddAttr(attr); + return attr; +} + +DBGDieAttr *DBGDie::AddFrmBaseAttr(DwAt at, DwForm form) +{ + DBGExprLoc *p = module->GetMemPool()->New(module, DW_OP_call_frame_cfa); + DBGDieAttr *attr = module->GetDbgInfo()->CreateAttr(at, form, reinterpret_cast(p)); + AddAttr(attr); + return attr; +} + +DBGExprLoc *DBGDie::GetExprLoc() +{ + for (auto it : attrVec) { + if (it->GetDwAt() == DW_AT_location) { + return it->GetPtr(); + } + } + return nullptr; +} + +bool DBGDie::SetAttr(DwAt attr, uint64 val) +{ + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetU(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, int val) +{ + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetI(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, uint32 val) +{ + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetId(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, int64 val) +{ + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetJ(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, float val) +{ + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetF(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, double val) +{ + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetD(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, DBGExprLoc *ptr) +{ + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetPtr(ptr); + return true; + } + } + return false; +} + +void DBGDie::AddAttr(DBGDieAttr *attr) +{ + for (auto it : attrVec) { + if (it->GetDwAt() == attr->GetDwAt()) { + return; + } + } + attrVec.push_back(attr); +} + +void DBGDie::AddSubVec(DBGDie *die) +{ + if (!die) + return; + for (auto it : subDieVec) { + if (it->GetId() == die->GetId()) { + return; + } + } + subDieVec.push_back(die); + die->parent = this; +} + +// DBGAbbrevEntry methods +DBGAbbrevEntry::DBGAbbrevEntry(MIRModule *m, DBGDie *die) : attrPairs(m->GetMPAllocator().Adapter()) +{ + tag = die->GetTag(); + abbrevId = 0; + withChildren = die->GetWithChildren(); + for (auto it : die->GetAttrVec()) { + attrPairs.push_back(it->GetDwAt()); + attrPairs.push_back(it->GetDwForm()); + } +} + +bool DBGAbbrevEntry::Equalto(DBGAbbrevEntry *entry) +{ + if (attrPairs.size() != entry->attrPairs.size()) { + return false; + } + if (withChildren != entry->GetWithChildren()) { + return false; + } + for (uint32 i = 0; i < attrPairs.size(); i++) { + if (attrPairs[i] != entry->attrPairs[i]) { + return false; + } + } + return true; +} + +// DebugInfo methods +void DebugInfo::Init() +{ + mplSrcIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(module->GetFileName()); + compUnit = module->GetMemPool()->New(module, DW_TAG_compile_unit); + module->SetWithDbgInfo(true); + ResetParentDie(); + if (module->GetSrcLang() == kSrcLangC) { + varPtrPrefix = ""; + } +} + +void DebugInfo::SetupCU() +{ + compUnit->SetWithChildren(true); + /* Add the Producer (Compiler) Information */ + const char *producer = strdup((std::string("Maple Version ") + Version::GetVersionStr()).c_str()); + GStrIdx strIdx = module->GetMIRBuilder()->GetOrCreateStringIndex(producer); + delete producer; + producer = nullptr; + compUnit->AddAttr(DW_AT_producer, DW_FORM_strp, strIdx.GetIdx()); + + /* Source Languate */ + compUnit->AddAttr(DW_AT_language, DW_FORM_data4, DW_LANG_C99); + + /* Add the compiled source file information */ + compUnit->AddAttr(DW_AT_name, DW_FORM_strp, mplSrcIdx.GetIdx()); + strIdx = module->GetMIRBuilder()->GetOrCreateStringIndex("/to/be/done/current/path"); + compUnit->AddAttr(DW_AT_comp_dir, DW_FORM_strp, strIdx.GetIdx()); + + compUnit->AddAttr(DW_AT_low_pc, DW_FORM_addr, kDbgDefaultVal); + compUnit->AddAttr(DW_AT_high_pc, DW_FORM_data8, kDbgDefaultVal); + + compUnit->AddAttr(DW_AT_stmt_list, DW_FORM_sec_offset, kDbgDefaultVal); +} + +void DebugInfo::AddScopeDie(MIRScope *scope) +{ + if (!scope->NeedEmitAliasInfo()) { + return; + } + + if (scope->GetLevel() != 0) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_lexical_block); + die->AddAttr(DW_AT_low_pc, DW_FORM_addr, kDbgDefaultVal); + die->AddAttr(DW_AT_high_pc, DW_FORM_data8, kDbgDefaultVal); + + // add die to parent + GetParentDie()->AddSubVec(die); + + PushParentDie(die); + } + + // process aliasVarMap + AddAliasDies(scope->GetAliasVarMap()); + + if (scope->GetSubScopes().size() > 0) { + // process subScopes + for (auto it : scope->GetSubScopes()) { + AddScopeDie(it); + } + } + + if (scope->GetLevel() != 0) { + PopParentDie(); + } +} + +void DebugInfo::AddAliasDies(MapleMap &aliasMap) +{ + MIRFunction *func = GetCurFunction(); + for (auto &i : aliasMap) { + // maple var + MIRSymbol *var = nullptr; + if (i.second.isLocal) { + var = func->GetSymTab()->GetSymbolFromStrIdx(i.second.mplStrIdx); + } else { + var = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(i.second.mplStrIdx); + } + DEBUG_ASSERT(var, "can not find symbol"); + + // create alias die using maple var except name + DBGDie *vdie = CreateVarDie(var, i.first); + + GetParentDie()->AddSubVec(vdie); + + // add alias var name to debug_str section + strps.insert(i.first.GetIdx()); + } +} + +void DebugInfo::Finish() +{ + SetupCU(); + FillTypeAttrWithDieId(); + // build tree from root DIE compUnit + BuildDieTree(); + BuildAbbrev(); + ComputeSizeAndOffsets(); +} + +void DebugInfo::BuildDebugInfo() +{ + DEBUG_ASSERT(module->GetDbgInfo(), "null dbgInfo"); + + Init(); + + // containner types + for (auto it : module->GetTypeNameTab()->GetGStrIdxToTyIdxMap()) { + GStrIdx strIdx = it.first; + TyIdx tyIdx = it.second; + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx.GetIdx()); + + switch (type->GetKind()) { + case kTypeClass: + case kTypeClassIncomplete: + case kTypeInterface: + case kTypeInterfaceIncomplete: + case kTypeStruct: + case kTypeStructIncomplete: + case kTypeUnion: { + (void)GetOrCreateStructTypeDie(type); + break; + } + default: + LogInfo::MapleLogger() << "named type " + << GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx).c_str() << "\n"; + break; + } + } + + for (size_t i = 0; i < GlobalTables::GetGsymTable().GetSymbolTableSize(); ++i) { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(static_cast(i)); + if (mirSymbol == nullptr || mirSymbol->IsDeleted() || mirSymbol->GetStorageClass() == kScUnused || + mirSymbol->GetStorageClass() == kScExtern) { + continue; + } + if (module->IsCModule() && mirSymbol->IsGlobal() && mirSymbol->IsVar()) { + DBGDie *vdie = CreateVarDie(mirSymbol); + compUnit->AddSubVec(vdie); + } + } + + // setup debug info for functions + for (auto func : GlobalTables::GetFunctionTable().GetFuncTable()) { + // the first one in funcTable is nullptr + if (!func) { + continue; + } + SetCurFunction(func); + // function decl + if (stridxDieIdMap.find(func->GetNameStrIdx().GetIdx()) == stridxDieIdMap.end()) { + DBGDie *fdie = GetOrCreateFuncDeclDie(func); + if (!func->GetClassTyIdx().GetIdx() && func->GetBody()) { + compUnit->AddSubVec(fdie); + } + } + // function def + if (funcDefStrIdxDieIdMap.find(func->GetNameStrIdx().GetIdx()) == funcDefStrIdxDieIdMap.end()) { + DBGDie *fdie = GetOrCreateFuncDefDie(func, 0); + if (!func->GetClassTyIdx().GetIdx() && func->GetBody()) { + compUnit->AddSubVec(fdie); + } + } + } + + // finalize debug info + Finish(); +} + +DBGDieAttr *DebugInfo::CreateAttr(DwAt at, DwForm form, uint64 val) +{ + DBGDieAttr *attr = module->GetMemPool()->New(kDwAt); + attr->SetDwAt(at); + attr->SetDwForm(form); + attr->SetU(val); + return attr; +} + +void DebugInfo::SetLocalDie(MIRFunction *func, GStrIdx strIdx, const DBGDie *die) +{ + (funcLstrIdxDieIdMap[func])[strIdx.GetIdx()] = die->GetId(); +} + +DBGDie *DebugInfo::GetLocalDie(MIRFunction *func, GStrIdx strIdx) +{ + uint32 id = (funcLstrIdxDieIdMap[func])[strIdx.GetIdx()]; + return idDieMap[id]; +} + +void DebugInfo::SetLocalDie(GStrIdx strIdx, const DBGDie *die) +{ + (funcLstrIdxDieIdMap[GetCurFunction()])[strIdx.GetIdx()] = die->GetId(); +} + +DBGDie *DebugInfo::GetLocalDie(GStrIdx strIdx) +{ + uint32 id = (funcLstrIdxDieIdMap[GetCurFunction()])[strIdx.GetIdx()]; + return idDieMap[id]; +} + +void DebugInfo::SetLabelIdx(MIRFunction *func, GStrIdx strIdx, LabelIdx labidx) +{ + (funcLstrIdxLabIdxMap[func])[strIdx.GetIdx()] = labidx; +} + +LabelIdx DebugInfo::GetLabelIdx(MIRFunction *func, GStrIdx strIdx) +{ + LabelIdx labidx = (funcLstrIdxLabIdxMap[func])[strIdx.GetIdx()]; + return labidx; +} + +void DebugInfo::SetLabelIdx(GStrIdx strIdx, LabelIdx labidx) +{ + (funcLstrIdxLabIdxMap[GetCurFunction()])[strIdx.GetIdx()] = labidx; +} + +LabelIdx DebugInfo::GetLabelIdx(GStrIdx strIdx) +{ + LabelIdx labidx = (funcLstrIdxLabIdxMap[GetCurFunction()])[strIdx.GetIdx()]; + return labidx; +} + +DBGDie *DebugInfo::CreateFormalParaDie(MIRFunction *func, MIRType *type, MIRSymbol *sym) +{ + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_formal_parameter); + + (void)GetOrCreateTypeDie(type); + die->AddAttr(DW_AT_type, DW_FORM_ref4, type->GetTypeIndex().GetIdx()); + + /* var Name */ + if (sym) { + die->AddAttr(DW_AT_name, DW_FORM_strp, sym->GetNameStrIdx().GetIdx()); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, sym->GetSrcPosition().FileNum()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, sym->GetSrcPosition().LineNum()); + die->AddAttr(DW_AT_decl_column, DW_FORM_data4, sym->GetSrcPosition().Column()); + die->AddSimpLocAttr(DW_AT_location, DW_FORM_exprloc, kDbgDefaultVal); + SetLocalDie(func, sym->GetNameStrIdx(), die); + } + return die; +} + +DBGDie *DebugInfo::GetOrCreateLabelDie(LabelIdx labid) +{ + MIRFunction *func = GetCurFunction(); + CHECK(labid < func->GetLabelTab()->GetLabelTableSize(), "index out of range in DebugInfo::GetOrCreateLabelDie"); + GStrIdx strid = func->GetLabelTab()->GetSymbolFromStIdx(labid); + if ((funcLstrIdxDieIdMap[func]).size() && + (funcLstrIdxDieIdMap[func]).find(strid.GetIdx()) != (funcLstrIdxDieIdMap[func]).end()) { + return GetLocalDie(strid); + } + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_label); + die->AddAttr(DW_AT_name, DW_FORM_strp, strid.GetIdx()); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, mplSrcIdx.GetIdx()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, lexer->GetLineNum()); + die->AddAttr(DW_AT_low_pc, DW_FORM_addr, kDbgDefaultVal); + GetParentDie()->AddSubVec(die); + SetLocalDie(strid, die); + SetLabelIdx(strid, labid); + return die; +} + +DBGDie *DebugInfo::CreateVarDie(MIRSymbol *sym) +{ + // filter vtab + if (sym->GetName().find(VTAB_PREFIX_STR) == 0) { + return nullptr; + } + + if (sym->GetName().find(GCTIB_PREFIX_STR) == 0) { + return nullptr; + } + + if (sym->GetStorageClass() == kScFormal) { + return nullptr; + } + + bool isLocal = sym->IsLocal(); + GStrIdx strIdx = sym->GetNameStrIdx(); + + if (isLocal) { + MIRFunction *func = GetCurFunction(); + if ((funcLstrIdxDieIdMap[func]).size() && + (funcLstrIdxDieIdMap[func]).find(strIdx.GetIdx()) != (funcLstrIdxDieIdMap[func]).end()) { + return GetLocalDie(strIdx); + } + } else { + if (stridxDieIdMap.find(strIdx.GetIdx()) != stridxDieIdMap.end()) { + uint32 id = stridxDieIdMap[strIdx.GetIdx()]; + return idDieMap[id]; + } + } + + DBGDie *die = CreateVarDie(sym, strIdx); + + GetParentDie()->AddSubVec(die); + if (isLocal) { + SetLocalDie(strIdx, die); + } else { + stridxDieIdMap[strIdx.GetIdx()] = die->GetId(); + } + + return die; +} + +DBGDie *DebugInfo::CreateVarDie(MIRSymbol *sym, GStrIdx strIdx) +{ + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_variable); + + /* var Name */ + die->AddAttr(DW_AT_name, DW_FORM_strp, strIdx.GetIdx()); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, sym->GetSrcPosition().FileNum()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, sym->GetSrcPosition().LineNum()); + die->AddAttr(DW_AT_decl_column, DW_FORM_data4, sym->GetSrcPosition().Column()); + + bool isLocal = sym->IsLocal(); + if (isLocal) { + die->AddSimpLocAttr(DW_AT_location, DW_FORM_exprloc, kDbgDefaultVal); + } else { + // global var just use its name as address in .s + uint64 idx = strIdx.GetIdx(); + if ((sym->IsReflectionClassInfo() && !sym->IsReflectionArrayClassInfo()) || sym->IsStatic()) { + std::string ptrName = varPtrPrefix + sym->GetName(); + idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(ptrName).GetIdx(); + } + die->AddGlobalLocAttr(DW_AT_location, DW_FORM_exprloc, idx); + } + + MIRType *type = sym->GetType(); + (void)GetOrCreateTypeDie(type); + die->AddAttr(DW_AT_type, DW_FORM_ref4, type->GetTypeIndex().GetIdx()); + + return die; +} + +DBGDie *DebugInfo::GetOrCreateFuncDeclDie(MIRFunction *func) +{ + uint32 funcnameidx = func->GetNameStrIdx().GetIdx(); + if (stridxDieIdMap.find(funcnameidx) != stridxDieIdMap.end()) { + uint32 id = stridxDieIdMap[funcnameidx]; + return idDieMap[id]; + } + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_subprogram); + stridxDieIdMap[funcnameidx] = die->GetId(); + + die->AddAttr(DW_AT_external, DW_FORM_flag_present, 1); + + // Function Name + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + + die->AddAttr(DW_AT_name, DW_FORM_strp, funcnameidx); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, sym->GetSrcPosition().FileNum()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, sym->GetSrcPosition().LineNum()); + die->AddAttr(DW_AT_decl_column, DW_FORM_data4, sym->GetSrcPosition().Column()); + + // Attributes for DW_AT_accessibility + uint32 access = 0; + if (func->IsPublic()) { + access = DW_ACCESS_public; + } else if (func->IsPrivate()) { + access = DW_ACCESS_private; + } else if (func->IsProtected()) { + access = DW_ACCESS_protected; + } + if (access) { + die->AddAttr(DW_AT_accessibility, DW_FORM_data4, access); + } + + die->AddAttr(DW_AT_GNU_all_tail_call_sites, DW_FORM_flag_present, kDbgDefaultVal); + + PushParentDie(die); + + // formal parameter + for (uint32 i = 0; i < func->GetFormalCount(); i++) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(func->GetFormalDefAt(i).formalTyIdx); + DBGDie *param = CreateFormalParaDie(func, type, nullptr); + die->AddSubVec(param); + } + + PopParentDie(); + + return die; +} + +bool LIsCompilerGenerated(const MIRFunction *func) +{ + return ((func->GetName().c_str())[0] != 'L'); +} + +DBGDie *DebugInfo::GetOrCreateFuncDefDie(MIRFunction *func, uint32 lnum) +{ + uint32 funcnameidx = func->GetNameStrIdx().GetIdx(); + if (funcDefStrIdxDieIdMap.find(funcnameidx) != funcDefStrIdxDieIdMap.end()) { + uint32 id = funcDefStrIdxDieIdMap[funcnameidx]; + return idDieMap[id]; + } + + DBGDie *funcdecldie = GetOrCreateFuncDeclDie(func); + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_subprogram); + // update funcDefStrIdxDieIdMap and leave stridxDieIdMap for the func decl + funcDefStrIdxDieIdMap[funcnameidx] = die->GetId(); + + die->AddAttr(DW_AT_specification, DW_FORM_ref4, funcdecldie->GetId()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, lnum); + + if (!func->IsReturnVoid()) { + auto returnType = func->GetReturnType(); + (void)GetOrCreateTypeDie(returnType); + die->AddAttr(DW_AT_type, DW_FORM_ref4, returnType->GetTypeIndex().GetIdx()); + } + + die->AddAttr(DW_AT_low_pc, DW_FORM_addr, kDbgDefaultVal); + die->AddAttr(DW_AT_high_pc, DW_FORM_data8, kDbgDefaultVal); + die->AddFrmBaseAttr(DW_AT_frame_base, DW_FORM_exprloc); + if (!func->IsStatic() && !LIsCompilerGenerated(func)) { + die->AddAttr(DW_AT_object_pointer, DW_FORM_ref4, kDbgDefaultVal); + } + die->AddAttr(DW_AT_GNU_all_tail_call_sites, DW_FORM_flag_present, kDbgDefaultVal); + + PushParentDie(die); + + // formal parameter + for (uint32 i = 0; i < func->GetFormalCount(); i++) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(func->GetFormalDefAt(i).formalTyIdx); + DBGDie *pdie = CreateFormalParaDie(func, type, func->GetFormalDefAt(i).formalSym); + die->AddSubVec(pdie); + } + + if (func->GetSymTab()) { + // local variables, start from 1 + for (uint32 i = 1; i < func->GetSymTab()->GetSymbolTableSize(); i++) { + MIRSymbol *var = func->GetSymTab()->GetSymbolFromStIdx(i); + DBGDie *vdie = CreateVarDie(var); + die->AddSubVec(vdie); + } + } + + // add scope die + AddScopeDie(func->GetScope()); + + PopParentDie(); + + return die; +} + +DBGDie *DebugInfo::GetOrCreatePrimTypeDie(MIRType *ty) +{ + PrimType pty = ty->GetPrimType(); + uint32 tid = static_cast(pty); + if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + return idDieMap[id]; + } + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_base_type); + die->SetTyIdx(static_cast(pty)); + + if (ty->GetNameStrIdx().GetIdx() == 0) { + const char *name = GetPrimTypeName(ty->GetPrimType()); + std::string pname = std::string(name); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(pname); + ty->SetNameStrIdx(strIdx); + } + + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, GetPrimTypeSize(pty)); + die->AddAttr(DW_AT_encoding, DW_FORM_data4, GetAteFromPTY(pty)); + die->AddAttr(DW_AT_name, DW_FORM_strp, ty->GetNameStrIdx().GetIdx()); + + compUnit->AddSubVec(die); + tyIdxDieIdMap[static_cast(pty)] = die->GetId(); + return die; +} + +DBGDie *DebugInfo::CreatePointedFuncTypeDie(MIRFuncType *ftype) +{ + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_subroutine_type); + + die->AddAttr(DW_AT_prototyped, DW_FORM_data4, static_cast(ftype->GetParamTypeList().size() > 0)); + MIRType *rtype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ftype->GetRetTyIdx()); + (void)GetOrCreateTypeDie(rtype); + die->AddAttr(DW_AT_type, DW_FORM_ref4, ftype->GetRetTyIdx().GetIdx()); + + compUnit->AddSubVec(die); + + for (uint32 i = 0; i < ftype->GetParamTypeList().size(); i++) { + DBGDie *paramdie = module->GetMemPool()->New(module, DW_TAG_formal_parameter); + MIRType *ptype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ftype->GetNthParamType(i)); + (void)GetOrCreateTypeDie(ptype); + paramdie->AddAttr(DW_AT_type, DW_FORM_ref4, ftype->GetNthParamType(i).GetIdx()); + die->AddSubVec(paramdie); + } + + tyIdxDieIdMap[ftype->GetTypeIndex().GetIdx()] = die->GetId(); + return die; +} + +DBGDie *DebugInfo::GetOrCreateTypeDie(MIRType *type) +{ + if (type == nullptr) { + return nullptr; + } + + uint32 tid = type->GetTypeIndex().GetIdx(); + if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + return idDieMap[id]; + } + + uint32 sid = type->GetNameStrIdx().GetIdx(); + if (sid) + if (stridxDieIdMap.find(sid) != stridxDieIdMap.end()) { + uint32 id = stridxDieIdMap[sid]; + return idDieMap[id]; + } + + if (type->GetTypeIndex() == static_cast(type->GetPrimType())) { + return GetOrCreatePrimTypeDie(type); + } + + DBGDie *die = nullptr; + switch (type->GetKind()) { + case kTypePointer: { + MIRPtrType *ptype = static_cast(type); + die = GetOrCreatePointTypeDie(ptype); + break; + } + case kTypeFunction: { + MIRFuncType *ftype = static_cast(type); + die = CreatePointedFuncTypeDie(ftype); + break; + } + case kTypeArray: + case kTypeFArray: + case kTypeJArray: { + MIRArrayType *atype = static_cast(type); + die = GetOrCreateArrayTypeDie(atype); + break; + } + case kTypeUnion: + case kTypeStruct: + case kTypeStructIncomplete: + case kTypeClass: + case kTypeClassIncomplete: + case kTypeInterface: + case kTypeInterfaceIncomplete: { + die = GetOrCreateStructTypeDie(type); + break; + } + case kTypeBitField: + break; + default: + CHECK_FATAL(false, "TODO: support type"); + break; + } + + return die; +} + +DBGDie *DebugInfo::GetOrCreatePointTypeDie(const MIRPtrType *ptrtype) +{ + uint32 tid = ptrtype->GetTypeIndex().GetIdx(); + if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + return idDieMap[id]; + } + + MIRType *type = ptrtype->GetPointedType(); + // for <* void> + if ((type != nullptr) && (type->GetPrimType() == PTY_void || type->GetKind() == kTypeFunction)) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_pointer_type); + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, k8BitSize); + if (type->GetKind() == kTypeFunction) { + DBGDie *pdie = GetOrCreateTypeDie(type); + die->AddAttr(DW_AT_type, DW_FORM_ref4, type->GetTypeIndex().GetIdx()); + tyIdxDieIdMap[type->GetTypeIndex().GetIdx()] = pdie->GetId(); + } + tyIdxDieIdMap[ptrtype->GetTypeIndex().GetIdx()] = die->GetId(); + compUnit->AddSubVec(die); + return die; + } + + (void)GetOrCreateTypeDie(type); + if (typeDefTyIdxMap.find(type->GetTypeIndex().GetIdx()) != typeDefTyIdxMap.end()) { + uint32 tyIdx = typeDefTyIdxMap[type->GetTypeIndex().GetIdx()]; + if (pointedPointerMap.find(tyIdx) != pointedPointerMap.end()) { + uint32 tyid = pointedPointerMap[tyIdx]; + if (tyIdxDieIdMap.find(tyid) != tyIdxDieIdMap.end()) { + uint32 dieid = tyIdxDieIdMap[tyid]; + DBGDie *die = idDieMap[dieid]; + return die; + } + } + } + + // update incomplete type from stridxDieIdMap to tyIdxDieIdMap + MIRStructType *stype = static_cast(type); + if ((stype != nullptr) && stype->IsIncomplete()) { + uint32 sid = stype->GetNameStrIdx().GetIdx(); + if (stridxDieIdMap.find(sid) != stridxDieIdMap.end()) { + uint32 dieid = stridxDieIdMap[sid]; + if (dieid) { + tyIdxDieIdMap[stype->GetTypeIndex().GetIdx()] = dieid; + } + } + } + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_pointer_type); + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, k8BitSize); + // fill with type idx instead of typedie->id to avoid nullptr typedie of + // forward reference of class types + die->AddAttr(DW_AT_type, DW_FORM_ref4, type->GetTypeIndex().GetIdx()); + tyIdxDieIdMap[ptrtype->GetTypeIndex().GetIdx()] = die->GetId(); + + compUnit->AddSubVec(die); + + return die; +} + +DBGDie *DebugInfo::GetOrCreateArrayTypeDie(const MIRArrayType *arraytype) +{ + uint32 tid = arraytype->GetTypeIndex().GetIdx(); + if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + return idDieMap[id]; + } + + MIRType *type = arraytype->GetElemType(); + (void)GetOrCreateTypeDie(type); + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_array_type); + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, k8BitSize); + // fill with type idx instead of typedie->id to avoid nullptr typedie of + // forward reference of class types + die->AddAttr(DW_AT_type, DW_FORM_ref4, type->GetTypeIndex().GetIdx()); + tyIdxDieIdMap[arraytype->GetTypeIndex().GetIdx()] = die->GetId(); + + compUnit->AddSubVec(die); + + // maple uses array of 1D array to represent 2D array + // so only one DW_TAG_subrange_type entry is needed + DBGDie *rangedie = module->GetMemPool()->New(module, DW_TAG_subrange_type); + (void)GetOrCreatePrimTypeDie(GlobalTables::GetTypeTable().GetUInt32()); + rangedie->AddAttr(DW_AT_type, DW_FORM_ref4, PTY_u32); + rangedie->AddAttr(DW_AT_upper_bound, DW_FORM_data4, arraytype->GetSizeArrayItem(0)); + + die->AddSubVec(rangedie); + + return die; +} + +DBGDie *DebugInfo::CreateFieldDie(maple::FieldPair pair, uint32 lnum) +{ + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_member); + die->AddAttr(DW_AT_name, DW_FORM_strp, pair.first.GetIdx()); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, mplSrcIdx.GetIdx()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, lnum); + + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pair.second.first); + (void)GetOrCreateTypeDie(type); + // fill with type idx instead of typedie->id to avoid nullptr typedie of + // forward reference of class types + die->AddAttr(DW_AT_type, DW_FORM_ref4, type->GetTypeIndex().GetIdx()); + + die->AddAttr(DW_AT_data_member_location, DW_FORM_data4, kDbgDefaultVal); + + return die; +} + +DBGDie *DebugInfo::CreateBitfieldDie(const MIRBitFieldType *type, GStrIdx sidx, uint32 prevBits) +{ + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_member); + + die->AddAttr(DW_AT_name, DW_FORM_strp, sidx.GetIdx()); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, mplSrcIdx.GetIdx()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, 0); + + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(type->GetPrimType()); + (void)GetOrCreateTypeDie(ty); + die->AddAttr(DW_AT_type, DW_FORM_ref4, ty->GetTypeIndex().GetIdx()); + + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, GetPrimTypeSize(type->GetPrimType())); + die->AddAttr(DW_AT_bit_size, DW_FORM_data4, type->GetFieldSize()); + die->AddAttr(DW_AT_bit_offset, DW_FORM_data4, + GetPrimTypeSize(type->GetPrimType()) * k8BitSize - type->GetFieldSize() - prevBits); + die->AddAttr(DW_AT_data_member_location, DW_FORM_data4, 0); + + return die; +} + +DBGDie *DebugInfo::GetOrCreateStructTypeDie(const MIRType *type) +{ + DEBUG_ASSERT(type, "null struture type"); + GStrIdx strIdx = type->GetNameStrIdx(); + DEBUG_ASSERT(strIdx.GetIdx(), "struture type missing name"); + + if (tyIdxDieIdMap.find(type->GetTypeIndex().GetIdx()) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[type->GetTypeIndex().GetIdx()]; + return idDieMap[id]; + } + + DBGDie *die = nullptr; + switch (type->GetKind()) { + case kTypeClass: + case kTypeClassIncomplete: { + const MIRClassType *classtype = static_cast(type); + die = CreateClassTypeDie(strIdx, classtype); + break; + } + case kTypeInterface: + case kTypeInterfaceIncomplete: { + const MIRInterfaceType *interfacetype = static_cast(type); + die = CreateInterfaceTypeDie(strIdx, interfacetype); + break; + } + case kTypeStruct: + case kTypeStructIncomplete: + case kTypeUnion: { + const MIRStructType *stype = static_cast(type); + die = CreateStructTypeDie(strIdx, stype, false); + break; + } + default: + LogInfo::MapleLogger() << "named type " << GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx).c_str() + << "\n"; + break; + } + + GlobalTables::GetTypeNameTable().SetGStrIdxToTyIdx(strIdx, type->GetTypeIndex()); + return die; +} + +// shared between struct and union +DBGDie *DebugInfo::CreateStructTypeDie(GStrIdx strIdx, const MIRStructType *structtype, bool update) +{ + DBGDie *die = nullptr; + + if (update) { + uint32 id = tyIdxDieIdMap[structtype->GetTypeIndex().GetIdx()]; + die = idDieMap[id]; + DEBUG_ASSERT(die, "update type die not exist"); + } else { + DwTag tag = structtype->GetKind() == kTypeStruct ? DW_TAG_structure_type : DW_TAG_union_type; + die = module->GetMemPool()->New(module, tag); + tyIdxDieIdMap[structtype->GetTypeIndex().GetIdx()] = die->GetId(); + } + + if (strIdx.GetIdx()) { + stridxDieIdMap[strIdx.GetIdx()] = die->GetId(); + } + + compUnit->AddSubVec(die); + + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, kStructDBGSize); + die->AddAttr(DW_AT_name, DW_FORM_strp, strIdx.GetIdx()); + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, kDbgDefaultVal); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, mplSrcIdx.GetIdx()); + + PushParentDie(die); + + // fields + uint32 prevBits = 0; + for (size_t i = 0; i < structtype->GetFieldsSize(); i++) { + MIRType *ety = structtype->GetElemType(static_cast(i)); + FieldPair fp = structtype->GetFieldsElemt(i); + if (ety->IsMIRBitFieldType()) { + MIRBitFieldType *bfty = static_cast(ety); + DBGDie *bfdie = CreateBitfieldDie(bfty, fp.first, prevBits); + prevBits += bfty->GetFieldSize(); + die->AddSubVec(bfdie); + } else { + prevBits = 0; + DBGDie *fdie = CreateFieldDie(fp, 0); + die->AddSubVec(fdie); + } + } + + // parentFields + for (size_t i = 0; i < structtype->GetParentFieldsSize(); i++) { + FieldPair fp = structtype->GetParentFieldsElemt(i); + DBGDie *fdie = CreateFieldDie(fp, 0); + die->AddSubVec(fdie); + } + + // member functions decl + for (auto fp : structtype->GetMethods()) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(fp.first.Idx()); + DEBUG_ASSERT((symbol != nullptr) && symbol->GetSKind() == kStFunc, "member function symbol not exist"); + MIRFunction *func = symbol->GetValue().mirFunc; + DEBUG_ASSERT(func, "member function not exist"); + DBGDie *fdie = GetOrCreateFuncDeclDie(func); + die->AddSubVec(fdie); + } + + PopParentDie(); + + // member functions defination, these die are global + for (auto fp : structtype->GetMethods()) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(fp.first.Idx()); + DEBUG_ASSERT(symbol && symbol->GetSKind() == kStFunc, "member function symbol not exist"); + MIRFunction *func = symbol->GetValue().mirFunc; + if (!func->GetBody()) { + continue; + } + DEBUG_ASSERT(func, "member function not exist"); + DBGDie *fdie = GetOrCreateFuncDefDie(func, 0); + compUnit->AddSubVec(fdie); + } + + return die; +} + +DBGDie *DebugInfo::CreateClassTypeDie(GStrIdx strIdx, const MIRClassType *classtype) +{ + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_class_type); + + PushParentDie(die); + + // parent + uint32 ptid = classtype->GetParentTyIdx().GetIdx(); + if (ptid) { + MIRType *parenttype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(classtype->GetParentTyIdx()); + DBGDie *parentdie = GetOrCreateTypeDie(parenttype); + if (parentdie) { + parentdie = module->GetMemPool()->New(module, DW_TAG_inheritance); + parentdie->AddAttr(DW_AT_name, DW_FORM_strp, parenttype->GetNameStrIdx().GetIdx()); + parentdie->AddAttr(DW_AT_type, DW_FORM_ref4, ptid); + + // set to DW_ACCESS_public for now + parentdie->AddAttr(DW_AT_accessibility, DW_FORM_data4, DW_ACCESS_public); + die->AddSubVec(parentdie); + } + } + + PopParentDie(); + + // update common fields + tyIdxDieIdMap[classtype->GetTypeIndex().GetIdx()] = die->GetId(); + DBGDie *die1 = CreateStructTypeDie(strIdx, classtype, true); + DEBUG_ASSERT(die == die1, "ClassTypeDie update wrong die"); + + return die1; +} + +DBGDie *DebugInfo::CreateInterfaceTypeDie(GStrIdx strIdx, const MIRInterfaceType *interfacetype) +{ + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_interface_type); + + PushParentDie(die); + + // parents + for (auto it : interfacetype->GetParentsTyIdx()) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(it); + DBGDie *parentdie = GetOrCreateTypeDie(type); + if (parentdie) { + continue; + } + parentdie = module->GetMemPool()->New(module, DW_TAG_inheritance); + parentdie->AddAttr(DW_AT_name, DW_FORM_strp, type->GetNameStrIdx().GetIdx()); + parentdie->AddAttr(DW_AT_type, DW_FORM_ref4, it.GetIdx()); + parentdie->AddAttr(DW_AT_data_member_location, DW_FORM_data4, kDbgDefaultVal); + + // set to DW_ACCESS_public for now + parentdie->AddAttr(DW_AT_accessibility, DW_FORM_data4, DW_ACCESS_public); + die->AddSubVec(parentdie); + } + + PopParentDie(); + + // update common fields + tyIdxDieIdMap[interfacetype->GetTypeIndex().GetIdx()] = die->GetId(); + DBGDie *die1 = CreateStructTypeDie(strIdx, interfacetype, true); + DEBUG_ASSERT(die == die1, "InterfaceTypeDie update wrong die"); + + return die1; +} + +uint32 DebugInfo::GetAbbrevId(DBGAbbrevEntryVec *vec, DBGAbbrevEntry *entry) +{ + for (auto it : vec->GetEntryvec()) { + if (it->Equalto(entry)) { + return it->GetAbbrevId(); + ; + } + } + return 0; +} + +void DebugInfo::BuildAbbrev() +{ + uint32 abbrevid = 1; + for (uint32 i = 1; i < maxId; i++) { + DBGDie *die = idDieMap[i]; + DBGAbbrevEntry *entry = module->GetMemPool()->New(module, die); + + if (!tagAbbrevMap[die->GetTag()]) { + tagAbbrevMap[die->GetTag()] = module->GetMemPool()->New(module, die->GetTag()); + } + + uint32 id = GetAbbrevId(tagAbbrevMap[die->GetTag()], entry); + if (id) { + // using existing abbrev id + die->SetAbbrevId(id); + } else { + // add entry to vector + entry->SetAbbrevId(abbrevid++); + tagAbbrevMap[die->GetTag()]->GetEntryvec().push_back(entry); + abbrevVec.push_back(entry); + // update abbrevid in die + die->SetAbbrevId(entry->GetAbbrevId()); + } + } + for (uint32 i = 1; i < maxId; i++) { + DBGDie *die = idDieMap[i]; + if (die->GetAbbrevId() == 0) { + LogInfo::MapleLogger() << "0 abbrevId i = " << i << " die->id = " << die->GetId() << std::endl; + } + } +} + +void DebugInfo::BuildDieTree() +{ + for (auto it : idDieMap) { + if (!it.first) { + continue; + } + DBGDie *die = it.second; + uint32 size = die->GetSubDieVecSize(); + die->SetWithChildren(size > 0); + if (size) { + die->SetFirstChild(die->GetSubDieVecAt(0)); + for (uint32 i = 0; i < size - 1; i++) { + DBGDie *it0 = die->GetSubDieVecAt(i); + DBGDie *it1 = die->GetSubDieVecAt(i + 1); + if (it0->GetSubDieVecSize()) { + it0->SetSibling(it1); + (void)it0->AddAttr(DW_AT_sibling, DW_FORM_ref4, it1->GetId()); + } + } + } + } +} + +void DebugInfo::FillTypeAttrWithDieId() +{ + for (auto it : idDieMap) { + DBGDie *die = it.second; + for (auto at : die->GetAttrVec()) { + if (at->GetDwAt() == DW_AT_type) { + uint32 tid = at->GetId(); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(tid)); + if (type) { + uint32 dieid = tyIdxDieIdMap[tid]; + if (dieid) { + at->SetId(dieid); + } else { + LogInfo::MapleLogger() << "dieid not found, typeKind = " << type->GetKind() + << " primType = " << type->GetPrimType() + << " nameStrIdx = " << type->GetNameStrIdx().GetIdx() << std::endl; + } + } else { + LogInfo::MapleLogger() << "type not found, tid = " << tid << std::endl; + } + break; + } + } + } +} + +DBGDie *DebugInfo::GetDie(const MIRFunction *func) +{ + uint32 id = stridxDieIdMap[func->GetNameStrIdx().GetIdx()]; + if (id) { + return idDieMap[id]; + } + return nullptr; +} + +// Methods for calculating Offset and Size of DW_AT_xxx +size_t DBGDieAttr::SizeOf(DBGDieAttr *attr) +{ + DwForm form = attr->dwForm; + switch (form) { + // case DW_FORM_implicitconst: + case DW_FORM_flag_present: + return 0; // Not handled yet. + case DW_FORM_flag: + case DW_FORM_ref1: + case DW_FORM_data1: + return sizeof(int8); + case DW_FORM_ref2: + case DW_FORM_data2: + return sizeof(int16); + case DW_FORM_ref4: + case DW_FORM_data4: + return sizeof(int32); + case DW_FORM_ref8: + case DW_FORM_ref_sig8: + case DW_FORM_data8: + return sizeof(int64); + case DW_FORM_addr: + return sizeof(int64); + case DW_FORM_sec_offset: + case DW_FORM_ref_addr: + case DW_FORM_strp: + case DW_FORM_GNU_ref_alt: + // case DW_FORM_codeLinestrp: + // case DW_FORM_strp_sup: + // case DW_FORM_ref_sup: + return k4BitSize; // DWARF32, 8 if DWARF64 + + case DW_FORM_string: { + GStrIdx stridx(attr->value.id); + const std::string &str = GlobalTables::GetStrTable().GetStringFromStrIdx(stridx); + return str.length() + 1 /* terminal null byte */; + } + case DW_FORM_exprloc: { + DBGExprLoc *ptr = attr->value.ptr; + CHECK_FATAL(ptr != (DBGExprLoc *)(0xdeadbeef), "wrong ptr"); + switch (ptr->GetOp()) { + case DW_OP_call_frame_cfa: + return k2BitSize; // size 1 byte + DW_OP_call_frame_cfa 1 byte + case DW_OP_fbreg: { + // DW_OP_fbreg 1 byte + size_t size = 1 + namemangler::GetSleb128Size(ptr->GetFboffset()); + return size + namemangler::GetUleb128Size(size); + } + case DW_OP_addr: { + return namemangler::GetUleb128Size(k9BitSize) + k9BitSize; + } + default: + return k4BitSize; + } + } + default: + CHECK_FATAL(maple::GetDwFormName(form) != nullptr, + "GetDwFormName return null in DebugInfo::FillTypeAttrWithDieId"); + LogInfo::MapleLogger() << "unhandled SizeOf: " << maple::GetDwFormName(form) << std::endl; + return 0; + } +} + +void DebugInfo::ComputeSizeAndOffsets() +{ + // CU-relative offset is reset to 0 here. + uint32 cuOffset = sizeof(int32_t) // Length of Unit Info + + sizeof(int16) // DWARF version number : 0x0004 + + sizeof(int32) // Offset into Abbrev. Section : 0x0000 + + sizeof(int8); // Pointer Size (in bytes) : 0x08 + + // After returning from this function, the length value is the size + // of the .debug_info section + ComputeSizeAndOffset(compUnit, cuOffset); + debugInfoLength = cuOffset - sizeof(int32_t); +} + +// Compute the size and offset of a DIE. The Offset is relative to start of the CU. +// It returns the offset after laying out the DIE. +void DebugInfo::ComputeSizeAndOffset(DBGDie *die, uint32 &cuOffset) +{ + uint32 cuOffsetOrg = cuOffset; + die->SetOffset(cuOffset); + + // Add the byte size of the abbreviation code + cuOffset += static_cast(namemangler::GetUleb128Size(uint64_t(die->GetAbbrevId()))); + + // Add the byte size of all the DIE attributes. + for (const auto &attr : die->GetAttrVec()) { + cuOffset += static_cast(attr->SizeOf(attr)); + } + + die->SetSize(cuOffset - cuOffsetOrg); + + // Let the children compute their offsets. + if (die->GetWithChildren()) { + uint32 size = die->GetSubDieVecSize(); + + for (uint32 i = 0; i < size; i++) { + DBGDie *childDie = die->GetSubDieVecAt(i); + ComputeSizeAndOffset(childDie, cuOffset); + } + + // Each child chain is terminated with a zero byte, adjust the offset. + cuOffset += sizeof(int8); + } +} + +/* /////////////// + * Dumps + * /////////////// + */ +void DebugInfo::Dump(int indent) +{ + LogInfo::MapleLogger() << "\n" << std::endl; + LogInfo::MapleLogger() << "maple_debug_information {" + << " Length: " << HEX(debugInfoLength) << std::endl; + compUnit->Dump(indent + 1); + LogInfo::MapleLogger() << "}\n" << std::endl; + LogInfo::MapleLogger() << "maple_debug_abbrev {" << std::endl; + for (uint32 i = 1; i < abbrevVec.size(); i++) { + abbrevVec[i]->Dump(indent + 1); + } + LogInfo::MapleLogger() << "}" << std::endl; + return; +} + +void DBGExprLoc::Dump() +{ + LogInfo::MapleLogger() << " " << HEX(GetOp()); + for (auto it : simpLoc->GetOpnd()) { + LogInfo::MapleLogger() << " " << HEX(it); + } +} + +void DBGDieAttr::Dump(int indent) +{ + PrintIndentation(indent); + CHECK_FATAL(GetDwFormName(dwForm) && GetDwAtName(dwAttr), "null ptr check"); + LogInfo::MapleLogger() << GetDwAtName(dwAttr) << " " << GetDwFormName(dwForm); + if (dwForm == DW_FORM_string || dwForm == DW_FORM_strp) { + GStrIdx idx(value.id); + LogInfo::MapleLogger() << " 0x" << std::hex << value.u << std::dec; + LogInfo::MapleLogger() << " \"" << GlobalTables::GetStrTable().GetStringFromStrIdx(idx).c_str() << "\""; + } else if (dwForm == DW_FORM_ref4) { + LogInfo::MapleLogger() << " <" << HEX(value.id) << ">"; + } else if (dwAttr == DW_AT_encoding) { + CHECK_FATAL(GetDwAteName(static_cast(value.u)), "null ptr check"); + LogInfo::MapleLogger() << " " << GetDwAteName(static_cast(value.u)); + } else if (dwAttr == DW_AT_location) { + value.ptr->Dump(); + } else { + LogInfo::MapleLogger() << " 0x" << std::hex << value.u << std::dec; + } + LogInfo::MapleLogger() << std::endl; +} + +void DBGDie::Dump(int indent) +{ + PrintIndentation(indent); + LogInfo::MapleLogger() << "<" << HEX(id) << "><" << HEX(offset); + LogInfo::MapleLogger() << "><" << HEX(size) << "><" + << "> abbrev id: " << HEX(abbrevId); + CHECK_FATAL(GetDwTagName(tag), "null ptr check"); + LogInfo::MapleLogger() << " (" << GetDwTagName(tag) << ") "; + if (parent) { + LogInfo::MapleLogger() << "parent <" << HEX(parent->GetId()); + } + LogInfo::MapleLogger() << "> {"; + if (tyIdx) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(tyIdx)); + if (type->GetKind() == kTypeStruct || type->GetKind() == kTypeClass || type->GetKind() == kTypeInterface) { + MIRStructType *stype = static_cast(type); + LogInfo::MapleLogger() << " # " << stype->GetName(); + } else { + LogInfo::MapleLogger() << " # " << GetPrimTypeName(type->GetPrimType()); + } + } + LogInfo::MapleLogger() << std::endl; + ; + for (auto it : attrVec) { + it->Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "} "; + if (subDieVec.size()) { + LogInfo::MapleLogger() << " {" << std::endl; + for (auto it : subDieVec) { + it->Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "}"; + } + LogInfo::MapleLogger() << std::endl; + return; +} + +void DBGAbbrevEntry::Dump(int indent) +{ + PrintIndentation(indent); + CHECK_FATAL(GetDwTagName(tag), "null ptr check "); + LogInfo::MapleLogger() << "<" << HEX(abbrevId) << "> " << GetDwTagName(tag); + if (GetWithChildren()) { + LogInfo::MapleLogger() << " [with children] {" << std::endl; + } else { + LogInfo::MapleLogger() << " [no children] {" << std::endl; + } + for (uint32 i = 0; i < attrPairs.size(); i += k2BitSize) { + PrintIndentation(indent + 1); + CHECK_FATAL(GetDwAtName(attrPairs[i]) && GetDwFormName(attrPairs[i + 1]), "NULLPTR CHECK"); + + LogInfo::MapleLogger() << " " << GetDwAtName(attrPairs[i]) << " " << GetDwFormName(attrPairs[i + 1]) << " " + << std::endl; + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "}" << std::endl; + return; +} + +void DBGAbbrevEntryVec::Dump(int indent) +{ + for (auto it : entryVec) { + PrintIndentation(indent); + it->Dump(indent); + } + return; +} + +// DBGCompileMsgInfo methods +void DBGCompileMsgInfo::ClearLine(uint32 n) +{ + errno_t eNum = memset_s(codeLine[n], MAXLINELEN, 0, MAXLINELEN); + if (eNum) { + FATAL(kLncFatal, "memset_s failed"); + } +} + +DBGCompileMsgInfo::DBGCompileMsgInfo() : startLine(0), errPos(0) +{ + lineNum[0] = 0; + lineNum[1] = 0; + lineNum[kIndx2] = 0; + ClearLine(0); + ClearLine(1); + ClearLine(kIndx2); + errLNum = 0; + errCNum = 0; +} + +void DBGCompileMsgInfo::SetErrPos(uint32 lnum, uint32 cnum) +{ + errLNum = lnum; + errCNum = cnum; +} + +void DBGCompileMsgInfo::UpdateMsg(uint32 lnum, const char *line) +{ + size_t size = strlen(line); + if (size > MAXLINELEN - 1) { + size = MAXLINELEN - 1; + } + startLine = (startLine + k2BitSize) % k3BitSize; + ClearLine(startLine); + errno_t eNum = memcpy_s(codeLine[startLine], MAXLINELEN, line, size); + if (eNum) { + FATAL(kLncFatal, "memcpy_s failed"); + } + codeLine[startLine][size] = '\0'; + lineNum[startLine] = lnum; +} + +void DBGCompileMsgInfo::EmitMsg() +{ + char str[MAXLINELEN + 1]; + + errPos = errCNum; + errPos = (errPos < k2BitSize) ? k2BitSize : errPos; + errPos = (errPos > MAXLINELEN) ? MAXLINELEN : errPos; + for (uint32 i = 0; i < errPos - 1; i++) { + str[i] = ' '; + } + str[errPos - 1] = '^'; + str[errPos] = '\0'; + + fprintf(stderr, "\n===================================================================\n"); + fprintf(stderr, "=================="); + fprintf(stderr, BOLD YEL " Compilation Error Diagnosis " RESET); + fprintf(stderr, "==================\n"); + fprintf(stderr, "===================================================================\n"); + fprintf(stderr, "line %4u %s\n", lineNum[(startLine + k2BitSize) % k3BitSize], + reinterpret_cast(codeLine[(startLine + k2BitSize) % k3BitSize])); + fprintf(stderr, "line %4u %s\n", lineNum[(startLine + 1) % k3BitSize], + reinterpret_cast(codeLine[(startLine + 1) % k3BitSize])); + fprintf(stderr, "line %4u %s\n", lineNum[(startLine) % k3BitSize], + reinterpret_cast(codeLine[(startLine) % k3BitSize])); + fprintf(stderr, BOLD RED " %s\n" RESET, str); + fprintf(stderr, "===================================================================\n"); +} +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/debug_info_util.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/debug_info_util.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b25d61097f72511f23517861156b916606927093 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/debug_info_util.cpp @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mir_builder.h" +#include "debug_info.h" +#include "global_tables.h" +#include "mir_type.h" + +namespace maple { +#define TOSTR(s) #s +// utility functions to get the string from tag value etc. +// GetDwTagName(unsigned n) +const char *GetDwTagName(unsigned n) +{ + switch (n) { +#define DW_TAG(ID, NAME) \ + case DW_TAG_##NAME: \ + return TOSTR(DW_TAG_##NAME); +#include "dwarf.def" + case DW_TAG_lo_user: + return "DW_TAG_lo_user"; + case DW_TAG_hi_user: + return "DW_TAG_hi_user"; + case DW_TAG_user_base: + return "DW_TAG_user_base"; + default: + return nullptr; + } +} + +// GetDwFormName(unsigned n) +const char *GetDwFormName(unsigned n) +{ + switch (n) { +#define DW_FORM(ID, NAME) \ + case DW_FORM_##NAME: \ + return TOSTR(DW_FORM_##NAME); +#include "dwarf.def" + case DW_FORM_lo_user: + return "DW_FORM_lo_user"; + default: + return nullptr; + } +} + +// GetDwAtName(unsigned n) +const char *GetDwAtName(unsigned n) +{ + switch (n) { +#define DW_AT(ID, NAME) \ + case DW_AT_##NAME: \ + return TOSTR(DW_AT_##NAME); +#include "dwarf.def" + case DW_AT_lo_user: + return "DW_AT_lo_user"; + default: + return nullptr; + } +} + +// GetDwOpName(unsigned n) +const char *GetDwOpName(unsigned n) +{ + switch (n) { +#define DW_OP(ID, NAME) \ + case DW_OP_##NAME: \ + return TOSTR(DW_OP_##NAME); +#include "dwarf.def" + case DW_OP_hi_user: + return "DW_OP_hi_user"; + default: + return nullptr; + } +} + +const unsigned kDwAteVoid = 0x20; +// GetDwAteName(unsigned n) +const char *GetDwAteName(unsigned n) +{ + switch (n) { +#define DW_ATE(ID, NAME) \ + case DW_ATE_##NAME: \ + return TOSTR(DW_ATE_##NAME); +#include "dwarf.def" + case DW_ATE_lo_user: + return "DW_ATE_lo_user"; + case DW_ATE_hi_user: + return "DW_ATE_hi_user"; + case kDwAteVoid: + return "kDwAteVoid"; + default: + return nullptr; + } +} + +DwAte GetAteFromPTY(PrimType pty) +{ + switch (pty) { + case PTY_u1: + return DW_ATE_boolean; + case PTY_u8: + return DW_ATE_unsigned_char; + case PTY_u16: + case PTY_u32: + case PTY_u64: + return DW_ATE_unsigned; + case PTY_i8: + return DW_ATE_signed_char; + case PTY_i16: + case PTY_i32: + case PTY_i64: + return DW_ATE_signed; + case PTY_f32: + case PTY_f64: + case PTY_f128: + return DW_ATE_float; + case PTY_agg: + case PTY_ref: + case PTY_ptr: + case PTY_a32: + case PTY_a64: + return DW_ATE_address; + case PTY_c64: + case PTY_c128: + return DW_ATE_complex_float; + case PTY_void: + return kDwAteVoid; + default: + return kDwAteVoid; + } +} +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/driver.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/driver.cpp new file mode 100644 index 0000000000000000000000000000000000000000..53466a92e502b9852fb9f3934a07959c766d1ccc --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/driver.cpp @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "mir_parser.h" +#include "bin_mplt.h" +#include "opcode_info.h" +#include "mir_function.h" +#include "constantfold.h" +#include "mir_type.h" + +using namespace maple; + +std::unordered_set dumpFuncSet = {}; + +#if MIR_FEATURE_FULL + +int main(int argc, char **argv) +{ + constexpr int judgeNumber = 2; + constexpr uint32 k2Argv = 2; + constexpr uint32 k10Argv = 10; + constexpr uint32 kNlSize = 5; + if (argc < judgeNumber) { + (void)MIR_PRINTF( + "usage: ./irbuild [-b] [-dumpfunc=] [-srclang=] \n" + " By default, the files are converted to corresponding ascii format.\n" + " If -b is specified, output is binary format instead.\n" + " If -dumpfunc= is specified, only functions with name containing the string is output.\n" + " -dumpfunc= can be specified multiple times to give multiple strings.\n" + " -srclang specifies the source language that produces the mpl file. \n" + " Each output file has .irb added after its file stem.\n"); + exit(1); + } + + std::vector themodule(argc, nullptr); + bool useBinary = false; + bool doConstantFold = false; + MIRSrcLang srcLang = kSrcLangUnknown; + // process the options which must come first + maple::uint32 i = 1; + while (argv[i][0] == '-') { + if (argv[i][1] == 'b' && argv[i][k2Argv] == '\0') { + useBinary = true; + } else if (strcmp(argv[i], "-fold") == 0) { + doConstantFold = true; + } else if (strncmp(argv[i], "-dumpfunc=", k10Argv) == 0 && strlen(argv[i]) > k10Argv) { + std::string funcName(&argv[i][k10Argv]); + dumpFuncSet.insert(funcName); + } else if (strcmp(argv[i], "-srclang=java") == 0) { + srcLang = kSrcLangJava; + } else if (strcmp(argv[i], "-srclang=c") == 0) { + srcLang = kSrcLangC; + } else if (strcmp(argv[i], "-srclang=c++") == 0) { + srcLang = kSrcLangCPlusPlus; + } else { + ERR(kLncErr, "irbuild: unrecognized command line option"); + return 1; + } + ++i; + } + // process the input files + while (i < static_cast(argc)) { + themodule[i] = new maple::MIRModule(argv[i]); + themodule[i]->SetSrcLang(srcLang); + std::string::size_type lastdot = themodule[i]->GetFileName().find_last_of("."); + bool ismplt = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".mplt") == 0; + bool istmpl = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".tmpl") == 0; + bool ismpl = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".mpl\0") == 0; + bool isbpl = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".bpl\0") == 0; + bool ismbc = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".mbc\0") == 0; + bool islmbc = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".lmbc\0") == 0; + if (!ismplt && !istmpl && !ismpl && !isbpl && !ismbc && !islmbc) { + ERR(kLncErr, "irbuild: input must be .mplt or .mpl or .bpl or .mbc or .lmbc or .tmpl file"); + return 1; + } + // input the file + if (ismpl || istmpl) { + maple::MIRParser theparser(*themodule[i]); + if (!theparser.ParseMIR()) { + theparser.EmitError(themodule[i]->GetFileName().c_str()); + return 1; + } + } else { + BinaryMplImport binMplt(*themodule[i]); + binMplt.SetImported(false); + std::string modid = themodule[i]->GetFileName(); + if (!binMplt.Import(modid, true)) { + ERR(kLncErr, "irbuild: cannot open .mplt or .bpl or .mbc or .lmbc file: %s", modid.c_str()); + return 1; + } + } + + // output the file + if (!useBinary) { + themodule[i]->OutputAsciiMpl(".irb", (ismpl || isbpl || ismbc || islmbc) ? ".mpl" : ".tmpl", &dumpFuncSet, + true, false); + } else { + BinaryMplt binMplt(*themodule[i]); + std::string modid = themodule[i]->GetFileName(); + binMplt.GetBinExport().not2mplt = ismpl || isbpl || ismbc || islmbc; + std::string filestem = modid.substr(0, lastdot); + binMplt.Export(filestem + ((ismpl || isbpl || ismbc || islmbc) ? ".irb.bpl" : ".irb.mplt"), &dumpFuncSet); + } + ++i; + } + return 0; +} +#else +#warning "this module is compiled without MIR_FEATURE_FULL=1 defined" +#endif // MIR_FEATURE_FULL diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/global_tables.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/global_tables.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b61bb4471a45c6fcda71163c3c72a05b329b14c3 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/global_tables.cpp @@ -0,0 +1,573 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "global_tables.h" +#include "mir_type.h" +#include "mir_symbol.h" + +#if MIR_FEATURE_FULL +namespace maple { +MIRType *TypeTable::CreateMirType(uint32 primTypeIdx) const +{ + MIRTypeKind defaultKind = (primTypeIdx == PTY_constStr ? kTypeConstString : kTypeScalar); + auto primType = static_cast(primTypeIdx); + auto *mirType = new MIRType(defaultKind, primType); + return mirType; +} + +TypeTable::TypeTable() +{ + Init(); +} + +void TypeTable::Init() +{ + // enter the primitve types in type_table_ + typeTable.push_back(static_cast(nullptr)); + DEBUG_ASSERT(typeTable.size() == static_cast(PTY_void), "use PTY_void as the first index to type table"); + uint32 primTypeIdx; + for (primTypeIdx = static_cast(PTY_begin) + 1; primTypeIdx <= static_cast(PTY_end); ++primTypeIdx) { + MIRType *type = CreateMirType(primTypeIdx); + type->SetTypeIndex(TyIdx {primTypeIdx}); + typeTable.push_back(type); + PutToHashTable(type); + } + if (voidPtrType == nullptr) { + voidPtrType = GetOrCreatePointerType(*GetVoid(), PTY_ptr); + } + lastDefaultTyIdx.SetIdx(primTypeIdx); +} + +void TypeTable::Reset() { + ReleaseTypes(); + typeHashTable.clear(); + ptrTypeMap.clear(); + refTypeMap.clear(); + typeTable.clear(); + Init(); +} + +void TypeTable::ReleaseTypes() +{ + for (auto index = static_cast(PTY_void); index < typeTable.size(); ++index) { + delete typeTable[index]; + typeTable[index] = nullptr; + } +} + +void TypeTable::SetTypeWithTyIdx(const TyIdx &tyIdx, MIRType &type) +{ + CHECK_FATAL(tyIdx < typeTable.size(), "array index out of range"); + MIRType *oldType = typeTable.at(tyIdx); + typeTable.at(tyIdx) = &type; + if (oldType != nullptr && oldType != &type) { + (void)typeHashTable.erase(oldType); + (void)typeHashTable.insert(&type); + delete oldType; + } +} + +TypeTable::~TypeTable() +{ + ReleaseTypes(); +} + +void TypeTable::PutToHashTable(MIRType *mirType) +{ + (void)typeHashTable.insert(mirType); +} + +void TypeTable::UpdateMIRType(const MIRType &pType, const TyIdx tyIdx) +{ + MIRType *nType = pType.CopyMIRTypeNode(); + nType->SetTypeIndex(tyIdx); + SetTypeWithTyIdx(tyIdx, *nType); +} + +// used only by bin_mpl_import +void TypeTable::CreateMirTypeNodeAt(MIRType &pType, TyIdx tyIdxUsed, MIRModule *module, bool isObject, + bool isIncomplete) +{ + MIRType *nType = pType.CopyMIRTypeNode(); + nType->SetTypeIndex(tyIdxUsed); + typeTable[tyIdxUsed] = nType; + + if (pType.IsMIRPtrType()) { + auto &pty = static_cast(pType); + if (pty.GetTypeAttrs() == TypeAttrs()) { + if (pty.GetPrimType() != PTY_ref) { + ptrTypeMap[pty.GetPointedTyIdx()] = nType->GetTypeIndex(); + } else { + refTypeMap[pty.GetPointedTyIdx()] = nType->GetTypeIndex(); + } + } else { + (void)typeHashTable.insert(nType); + } + } else { + (void)typeHashTable.insert(nType); + } + + GStrIdx stridx = pType.GetNameStrIdx(); + if (stridx != 0) { + module->GetTypeNameTab()->SetGStrIdxToTyIdx(stridx, tyIdxUsed); + module->PushbackTypeDefOrder(stridx); + if (isObject) { + module->AddClass(tyIdxUsed); + if (!isIncomplete) { + GlobalTables::GetTypeNameTable().SetGStrIdxToTyIdx(stridx, tyIdxUsed); + } + } + } +} + +MIRType *TypeTable::CreateAndUpdateMirTypeNode(MIRType &pType) +{ + MIRType *nType = pType.CopyMIRTypeNode(); + nType->SetTypeIndex(TyIdx(typeTable.size())); + typeTable.push_back(nType); + + if (pType.IsMIRPtrType()) { + auto &pty = static_cast(pType); + if (pty.GetTypeAttrs() == TypeAttrs()) { + if (pty.GetPrimType() != PTY_ref) { + ptrTypeMap[pty.GetPointedTyIdx()] = nType->GetTypeIndex(); + } else { + refTypeMap[pty.GetPointedTyIdx()] = nType->GetTypeIndex(); + } + } else { + (void)typeHashTable.insert(nType); + } + } else { + (void)typeHashTable.insert(nType); + } + return nType; +} + +MIRType *TypeTable::GetOrCreateMIRTypeNode(MIRType &pType) +{ + if (pType.IsMIRPtrType()) { + auto &type = static_cast(pType); + if (type.GetTypeAttrs() == TypeAttrs()) { + auto *pMap = (type.GetPrimType() != PTY_ref ? &ptrTypeMap : &refTypeMap); + auto *otherPMap = (type.GetPrimType() == PTY_ref ? &ptrTypeMap : &refTypeMap); + { + std::shared_lock lock(mtx); + const auto it = pMap->find(type.GetPointedTyIdx()); + if (it != pMap->end()) { + return GetTypeFromTyIdx(it->second); + } + } + std::unique_lock lock(mtx); + CHECK_FATAL(!(type.GetPointedTyIdx().GetIdx() >= kPtyDerived && type.GetPrimType() == PTY_ref && + otherPMap->find(type.GetPointedTyIdx()) != otherPMap->end()), + "GetOrCreateMIRType: ref pointed-to type %d has previous ptr occurrence", + type.GetPointedTyIdx().GetIdx()); + return CreateAndUpdateMirTypeNode(pType); + } + } + { + std::shared_lock lock(mtx); + const auto it = typeHashTable.find(&pType); + if (it != typeHashTable.end()) { + return *it; + } + } + std::unique_lock lock(mtx); + return CreateAndUpdateMirTypeNode(pType); +} + +MIRType *TypeTable::voidPtrType = nullptr; +// get or create a type that pointing to pointedTyIdx +MIRType *TypeTable::GetOrCreatePointerType(const TyIdx &pointedTyIdx, PrimType primType, const TypeAttrs &attrs) +{ + MIRPtrType type(pointedTyIdx, primType); + type.SetTypeAttrs(attrs); + TyIdx tyIdx = GetOrCreateMIRType(&type); + DEBUG_ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreatePointerType"); + return typeTable.at(tyIdx); +} + +MIRType *TypeTable::GetOrCreatePointerType(const MIRType &pointTo, PrimType primType, const TypeAttrs &attrs) +{ + if (pointTo.GetPrimType() == PTY_constStr) { + primType = PTY_ptr; + } + return GetOrCreatePointerType(pointTo.GetTypeIndex(), primType, attrs); +} + +const MIRType *TypeTable::GetPointedTypeIfApplicable(MIRType &type) const +{ + if (type.GetKind() != kTypePointer) { + return &type; + } + auto &ptrType = static_cast(type); + return GetTypeFromTyIdx(ptrType.GetPointedTyIdx()); +} +MIRType *TypeTable::GetPointedTypeIfApplicable(MIRType &type) +{ + return const_cast(const_cast(this)->GetPointedTypeIfApplicable(type)); +} + +MIRArrayType *TypeTable::GetOrCreateArrayType(const MIRType &elem, uint8 dim, const uint32 *sizeArray, + const TypeAttrs &attrs) +{ + std::vector sizeVector; + for (size_t i = 0; i < dim; ++i) { + sizeVector.push_back(sizeArray != nullptr ? sizeArray[i] : 0); + } + MIRArrayType arrayType(elem.GetTypeIndex(), sizeVector); + arrayType.SetTypeAttrs(attrs); + TyIdx tyIdx = GetOrCreateMIRType(&arrayType); + return static_cast(typeTable[tyIdx]); +} + +// For one dimension array +MIRArrayType *TypeTable::GetOrCreateArrayType(const MIRType &elem, uint32 size, const TypeAttrs &attrs) +{ + return GetOrCreateArrayType(elem, 1, &size, attrs); +} + +MIRType *TypeTable::GetOrCreateFarrayType(const MIRType &elem) +{ + MIRFarrayType type; + type.SetElemtTyIdx(elem.GetTypeIndex()); + TyIdx tyIdx = GetOrCreateMIRType(&type); + DEBUG_ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateFarrayType"); + return typeTable.at(tyIdx); +} + +MIRType *TypeTable::GetOrCreateJarrayType(const MIRType &elem) +{ + MIRJarrayType type; + type.SetElemtTyIdx(elem.GetTypeIndex()); + TyIdx tyIdx = GetOrCreateMIRType(&type); + DEBUG_ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateJarrayType"); + return typeTable.at(tyIdx); +} + +MIRType *TypeTable::GetOrCreateFunctionType(const TyIdx &retTyIdx, const std::vector &vecType, + const std::vector &vecAttrs, bool isVarg, + const TypeAttrs &retAttrs) +{ + MIRFuncType funcType(retTyIdx, vecType, vecAttrs, retAttrs); + if (isVarg) { + funcType.SetVarArgs(); + } + TyIdx tyIdx = GetOrCreateMIRType(&funcType); + DEBUG_ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateFunctionType"); + return typeTable.at(tyIdx); +} + +MIRType *TypeTable::GetOrCreateStructOrUnion(const std::string &name, const FieldVector &fields, + const FieldVector &parentFields, MIRModule &module, bool forStruct, + const TypeAttrs &attrs) +{ + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + MIRStructType type(forStruct ? kTypeStruct : kTypeUnion, strIdx); + type.SetFields(fields); + type.SetParentFields(parentFields); + type.SetTypeAttrs(attrs); + + TyIdx tyIdx = GetOrCreateMIRType(&type); + // Global? + module.GetTypeNameTab()->SetGStrIdxToTyIdx(strIdx, tyIdx); + module.PushbackTypeDefOrder(strIdx); + DEBUG_ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateStructOrUnion"); + return typeTable.at(tyIdx); +} + +void TypeTable::PushIntoFieldVector(FieldVector &fields, const std::string &name, const MIRType &type) +{ + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + fields.push_back(FieldPair(strIdx, TyIdxFieldAttrPair(type.GetTypeIndex(), FieldAttrs()))); +} + +MIRType *TypeTable::GetOrCreateClassOrInterface(const std::string &name, MIRModule &module, bool forClass) +{ + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + TyIdx tyIdx = module.GetTypeNameTab()->GetTyIdxFromGStrIdx(strIdx); + if (!tyIdx) { + if (forClass) { + MIRClassType type(kTypeClassIncomplete, strIdx); // for class type + tyIdx = GetOrCreateMIRType(&type); + } else { + MIRInterfaceType type(kTypeInterfaceIncomplete, strIdx); // for interface type + tyIdx = GetOrCreateMIRType(&type); + } + module.PushbackTypeDefOrder(strIdx); + module.GetTypeNameTab()->SetGStrIdxToTyIdx(strIdx, tyIdx); + if (typeTable[tyIdx]->GetNameStrIdx() == 0u) { + typeTable[tyIdx]->SetNameStrIdx(strIdx); + } + } + DEBUG_ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateClassOrInterface"); + return typeTable.at(tyIdx); +} + +void TypeTable::AddFieldToStructType(MIRStructType &structType, const std::string &fieldName, const MIRType &fieldType) +{ + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fieldName); + FieldAttrs fieldAttrs; + fieldAttrs.SetAttr(FLDATTR_final); // Mark compiler-generated struct fields as final to improve AliasAnalysis + structType.GetFields().push_back(FieldPair(strIdx, TyIdxFieldAttrPair(fieldType.GetTypeIndex(), fieldAttrs))); +} + +void FPConstTable::PostInit() +{ + MIRType &typeFloat = *GlobalTables::GetTypeTable().GetPrimType(PTY_f32); + nanFloatConst = new MIRFloatConst(NAN, typeFloat); + infFloatConst = new MIRFloatConst(INFINITY, typeFloat); + minusInfFloatConst = new MIRFloatConst(-INFINITY, typeFloat); + minusZeroFloatConst = new MIRFloatConst(-0.0, typeFloat); + MIRType &typeDouble = *GlobalTables::GetTypeTable().GetPrimType(PTY_f64); + nanDoubleConst = new MIRDoubleConst(NAN, typeDouble); + infDoubleConst = new MIRDoubleConst(INFINITY, typeDouble); + minusInfDoubleConst = new MIRDoubleConst(-INFINITY, typeDouble); + minusZeroDoubleConst = new MIRDoubleConst(-0.0, typeDouble); +} + +MIRIntConst *IntConstTable::GetOrCreateIntConst(const IntVal &val, MIRType &type) +{ + if (ThreadEnv::IsMeParallel()) { + return DoGetOrCreateIntConstTreadSafe(val.GetExtValue(), type); + } + return DoGetOrCreateIntConst(val.GetExtValue(), type); +} + +MIRIntConst *IntConstTable::GetOrCreateIntConst(uint64 val, MIRType &type) +{ + if (ThreadEnv::IsMeParallel()) { + return DoGetOrCreateIntConstTreadSafe(val, type); + } + return DoGetOrCreateIntConst(val, type); +} + +MIRIntConst *IntConstTable::DoGetOrCreateIntConst(uint64 val, MIRType &type) +{ + IntConstKey key(val, type.GetTypeIndex()); + if (intConstTable.find(key) != intConstTable.end()) { + return intConstTable[key]; + } + intConstTable[key] = new MIRIntConst(val, type); + return intConstTable[key]; +} + +MIRIntConst *IntConstTable::DoGetOrCreateIntConstTreadSafe(uint64 val, MIRType &type) +{ + IntConstKey key(val, type.GetTypeIndex()); + { + std::shared_lock lock(mtx); + if (intConstTable.find(key) != intConstTable.end()) { + return intConstTable[key]; + } + } + std::unique_lock lock(mtx); + intConstTable[key] = new MIRIntConst(val, type); + return intConstTable[key]; +} + +IntConstTable::~IntConstTable() +{ + for (auto pair : intConstTable) { + delete pair.second; + } +} + +MIRFloatConst *FPConstTable::GetOrCreateFloatConst(float floatVal) +{ + if (std::isnan(floatVal)) { + return nanFloatConst; + } + if (std::isinf(floatVal)) { + return (floatVal < 0) ? minusInfFloatConst : infFloatConst; + } + if (floatVal == 0.0 && std::signbit(floatVal)) { + return minusZeroFloatConst; + } + if (ThreadEnv::IsMeParallel()) { + return DoGetOrCreateFloatConstThreadSafe(floatVal); + } + return DoGetOrCreateFloatConst(floatVal); +} + +MIRFloatConst *FPConstTable::DoGetOrCreateFloatConst(float floatVal) +{ + const auto it = floatConstTable.find(floatVal); + if (it != floatConstTable.cend()) { + return it->second; + } + // create a new one + auto *floatConst = new MIRFloatConst(floatVal, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx {PTY_f32})); + floatConstTable[floatVal] = floatConst; + return floatConst; +} + +MIRFloatConst *FPConstTable::DoGetOrCreateFloatConstThreadSafe(float floatVal) +{ + { + std::shared_lock lock(floatMtx); + const auto it = floatConstTable.find(floatVal); + if (it != floatConstTable.cend()) { + return it->second; + } + } + // create a new one + std::unique_lock lock(floatMtx); + auto *floatConst = new MIRFloatConst(floatVal, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx {PTY_f32})); + floatConstTable[floatVal] = floatConst; + return floatConst; +} + +MIRDoubleConst *FPConstTable::GetOrCreateDoubleConst(double doubleVal) +{ + if (std::isnan(doubleVal)) { + return nanDoubleConst; + } + if (std::isinf(doubleVal)) { + return (doubleVal < 0) ? minusInfDoubleConst : infDoubleConst; + } + if (doubleVal == 0.0 && std::signbit(doubleVal)) { + return minusZeroDoubleConst; + } + if (ThreadEnv::IsMeParallel()) { + return DoGetOrCreateDoubleConstThreadSafe(doubleVal); + } + return DoGetOrCreateDoubleConst(doubleVal); +} + +MIRDoubleConst *FPConstTable::DoGetOrCreateDoubleConst(double doubleVal) +{ + const auto it = doubleConstTable.find(doubleVal); + if (it != doubleConstTable.cend()) { + return it->second; + } + // create a new one + auto *doubleConst = + new MIRDoubleConst(doubleVal, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f64))); + doubleConstTable[doubleVal] = doubleConst; + return doubleConst; +} + +MIRDoubleConst *FPConstTable::DoGetOrCreateDoubleConstThreadSafe(double doubleVal) +{ + { + std::shared_lock lock(doubleMtx); + const auto it = doubleConstTable.find(doubleVal); + if (it != doubleConstTable.cend()) { + return it->second; + } + } + // create a new one + std::unique_lock lock(doubleMtx); + auto *doubleConst = + new MIRDoubleConst(doubleVal, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f64))); + doubleConstTable[doubleVal] = doubleConst; + return doubleConst; +} + +FPConstTable::~FPConstTable() +{ + delete nanFloatConst; + delete infFloatConst; + delete minusInfFloatConst; + delete minusZeroFloatConst; + delete nanDoubleConst; + delete infDoubleConst; + delete minusInfDoubleConst; + delete minusZeroDoubleConst; + for (const auto &floatConst : floatConstTable) { + delete floatConst.second; + } + for (const auto &doubleConst : doubleConstTable) { + delete doubleConst.second; + } +} + +GSymbolTable::GSymbolTable() +{ + Init(); +} + +void GSymbolTable::Init() +{ + symbolTable.push_back(static_cast(nullptr)); +} + +void GSymbolTable::Reset() +{ + ReleaseSymbols(); + symbolTable.clear(); + strIdxToStIdxMap.clear(); + Init(); +} + +void GSymbolTable::ReleaseSymbols() +{ + for (MIRSymbol *symbol : symbolTable) { + delete symbol; + } +} + +GSymbolTable::~GSymbolTable() +{ + ReleaseSymbols(); +} + +MIRSymbol *GSymbolTable::CreateSymbol(uint8 scopeID) +{ + auto *st = new MIRSymbol(symbolTable.size(), scopeID); + CHECK_FATAL(st != nullptr, "CreateSymbol failure"); + symbolTable.push_back(st); + module->AddSymbol(st); + return st; +} + +bool GSymbolTable::AddToStringSymbolMap(const MIRSymbol &st) +{ + GStrIdx strIdx = st.GetNameStrIdx(); + if (strIdxToStIdxMap[strIdx].FullIdx() != 0) { + return false; + } + strIdxToStIdxMap[strIdx] = st.GetStIdx(); + return true; +} + +bool GSymbolTable::RemoveFromStringSymbolMap(const MIRSymbol &st) +{ + const auto it = strIdxToStIdxMap.find(st.GetNameStrIdx()); + if (it != strIdxToStIdxMap.cend()) { + strIdxToStIdxMap.erase(it); + return true; + } + return false; +} + +void GSymbolTable::Dump(bool isLocal, int32 indent) const +{ + for (size_t i = 1; i < symbolTable.size(); ++i) { + const MIRSymbol *symbol = symbolTable[i]; + if (symbol != nullptr) { + symbol->Dump(isLocal, indent); + } + } +} + +thread_local GlobalTables GlobalTables::globalTables; +GlobalTables &GlobalTables::GetGlobalTables() +{ + return globalTables; +} +} // namespace maple +#endif // MIR_FEATURE_FULL diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/intrinsics.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/intrinsics.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c3c3070a1e5be4e0df7ea111f2bc7a0d8176c2f2 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/intrinsics.cpp @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "intrinsics.h" +#include "mir_module.h" +#include "mir_type.h" +#include "mir_builder.h" + +namespace maple { +MIRType *IntrinDesc::jsValueType = nullptr; +MIRModule *IntrinDesc::mirModule = nullptr; +IntrinDesc IntrinDesc::intrinTable[INTRN_LAST + 1] = { +#define DEF_MIR_INTRINSIC(X, NAME, INTRN_CLASS, RETURN_TYPE, ...) \ + {(NAME), (INTRN_CLASS), {(RETURN_TYPE), ##__VA_ARGS__}}, +#include "intrinsics.def" +#undef DEF_MIR_INTRINSIC +}; +MIRType *IntrinDesc::GetOrCreateJSValueType() +{ + if (jsValueType != nullptr) { + return jsValueType; + } + MIRBuilder *jsBuilder = mirModule->GetMIRBuilder(); + FieldVector payloadFields; + GStrIdx i32 = jsBuilder->GetOrCreateStringIndex("i32"); + GStrIdx u32 = jsBuilder->GetOrCreateStringIndex("u32"); + GStrIdx boo = jsBuilder->GetOrCreateStringIndex("boo"); + GStrIdx ptr = jsBuilder->GetOrCreateStringIndex("ptr"); + payloadFields.push_back( + FieldPair(i32, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetInt32()->GetTypeIndex(), FieldAttrs()))); + payloadFields.push_back( + FieldPair(u32, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetUInt32()->GetTypeIndex(), FieldAttrs()))); + payloadFields.push_back( + FieldPair(boo, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetUInt32()->GetTypeIndex(), FieldAttrs()))); + payloadFields.push_back( + FieldPair(ptr, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetVoidPtr()->GetTypeIndex(), FieldAttrs()))); + FieldVector parentFields; + MIRType *payloadType = + GlobalTables::GetTypeTable().GetOrCreateUnionType("payload_type", payloadFields, parentFields, *mirModule); + FieldVector sFields; + GStrIdx payload = jsBuilder->GetOrCreateStringIndex("payload"); + GStrIdx tag = jsBuilder->GetOrCreateStringIndex("tag"); + sFields.push_back(FieldPair(payload, TyIdxFieldAttrPair(payloadType->GetTypeIndex(), FieldAttrs()))); + sFields.push_back( + FieldPair(tag, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetUInt32()->GetTypeIndex(), FieldAttrs()))); + MIRType *sType = GlobalTables::GetTypeTable().GetOrCreateStructType("s_type", sFields, parentFields, *mirModule); + CHECK_FATAL(sType != nullptr, "can't get struct type, check it!"); + FieldVector jsValLayoutFields; + GStrIdx asBits = jsBuilder->GetOrCreateStringIndex("asBits"); + GStrIdx s = jsBuilder->GetOrCreateStringIndex("s"); + GStrIdx asDouble = jsBuilder->GetOrCreateStringIndex("asDouble"); + GStrIdx asPtr = jsBuilder->GetOrCreateStringIndex("asPtr"); + jsValLayoutFields.push_back( + FieldPair(asBits, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetUInt64()->GetTypeIndex(), FieldAttrs()))); + jsValLayoutFields.push_back(FieldPair(s, TyIdxFieldAttrPair(sType->GetTypeIndex(), FieldAttrs()))); + jsValLayoutFields.push_back(FieldPair( + asDouble, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetDouble()->GetTypeIndex(), FieldAttrs()))); + jsValLayoutFields.push_back( + FieldPair(asPtr, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetVoidPtr()->GetTypeIndex(), FieldAttrs()))); + MIRType *jsValLayoutType = GlobalTables::GetTypeTable().GetOrCreateUnionType("jsval_layout_type", jsValLayoutFields, + parentFields, *mirModule); + return jsValLayoutType; +} + +void IntrinDesc::InitMIRModule(MIRModule *mod) +{ + mirModule = mod; +} + +MIRType *IntrinDesc::GetTypeFromArgTy(IntrinArgType argType) const +{ + switch (argType) { + case kArgTyVoid: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_void)); + case kArgTyI8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i8)); + case kArgTyI16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i16)); + case kArgTyI32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i32)); + case kArgTyI64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i64)); + case kArgTyU8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u8)); + case kArgTyU16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u16)); + case kArgTyU32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u32)); + case kArgTyU64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u64)); + case kArgTyU1: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u1)); + case kArgTyPtr: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_ptr)); + case kArgTyRef: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_ref)); + case kArgTyA32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a32)); + case kArgTyA64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)); + case kArgTyF32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f32)); + case kArgTyF64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f64)); + case kArgTyF128: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f128)); + case kArgTyC64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_c64)); + case kArgTyC128: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_c128)); + case kArgTyAgg: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_agg)); + case kArgTyV2I64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2i64)); + case kArgTyV4I32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4i32)); + case kArgTyV8I16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v8i16)); + case kArgTyV16I8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v16i8)); + case kArgTyV2U64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2u64)); + case kArgTyV4U32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4u32)); + case kArgTyV8U16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v8u16)); + case kArgTyV16U8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v16u8)); + case kArgTyV2F64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2f64)); + case kArgTyV4F32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4f32)); + case kArgTyV1I64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i64)); + case kArgTyV2I32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2i32)); + case kArgTyV4I16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4i16)); + case kArgTyV8I8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v8i8)); + case kArgTyV1U64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u64)); + case kArgTyV2U32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2u32)); + case kArgTyV4U16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4u16)); + case kArgTyV8U8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v8u8)); + case kArgTyV1F64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f64)); + case kArgTyV2F32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2f32)); +#ifdef DYNAMICLANG + case kArgTySimplestr: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_simplestr)); + case kArgTySimpleobj: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_simpleobj)); + case kArgTyDynany: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_dynany)); + case kArgTyDyni32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_dyni32)); +#endif + default: + return nullptr; + } +} + +MIRType *IntrinDesc::GetArgType(uint32 index) const +{ + // 0 is the arg of return type + CHECK_FATAL(index < kMaxArgsNum, "index out of range"); + return GetTypeFromArgTy(argTypes[index + 1]); +} + +MIRType *IntrinDesc::GetReturnType() const +{ + return GetTypeFromArgTy(argTypes[0]); +} +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/lexer.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/lexer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..68336fbbe9eadb00502e4a77c7cabfcac8fe38b0 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/lexer.cpp @@ -0,0 +1,781 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "lexer.h" +#include +#include +#include +#include "mpl_logging.h" +#include "debug_info.h" +#include "mir_module.h" +#include "securec.h" +#include "utils.h" + +namespace maple { +int32 HexCharToDigit(char c) +{ + int32 ret = utils::ToDigit<16, int32>(c); + return (ret != INT32_MAX ? ret : 0); +} + +static uint8 Char2num(char c) +{ + uint8 ret = utils::ToDigit<16>(c); + DEBUG_ASSERT(ret != UINT8_MAX, "not a hex value"); + return ret; +} + +// Read (next) line from the MIR (text) file, and return the read +// number of chars. +// if the line is empty (nothing but a newline), returns 0. +// if EOF, return -1. +// The trailing new-line character has been removed. +int MIRLexer::ReadALine() +{ + if (airFile == nullptr) { + line = ""; + return -1; + } + + curIdx = 0; + if (!std::getline(*airFile, line)) { // EOF + line = ""; + airFile = nullptr; + currentLineSize = 0; + return -1; + } + + RemoveReturnInline(line); + currentLineSize = line.length(); + return currentLineSize; +} + +int MIRLexer::ReadALineByMirQueue() +{ + if (mirQueue.empty()) { + line = ""; + return -1; + } + curIdx = 0; + line = mirQueue.front(); + RemoveReturnInline(line); + currentLineSize = line.length(); + mirQueue.pop(); + return currentLineSize; +} + +MIRLexer::MIRLexer(MIRModule &mod) + : module(mod), seenComments(mod.GetMPAllocator().Adapter()), keywordMap(mod.GetMPAllocator().Adapter()) +{ + // initialize keywordMap + keywordMap.clear(); +#define KEYWORD(STR) \ + { \ + std::string str; \ + str = #STR; \ + keywordMap[str] = TK_##STR; \ + } +#include "keywords.def" +#undef KEYWORD +} + +void MIRLexer::PrepareForFile(const std::string &filename) +{ + // open MIR file + airFileInternal.open(filename); + CHECK_FATAL(airFileInternal.is_open(), "cannot open MIR file %s\n", &filename); + + airFile = &airFileInternal; + // try to read the first line + if (ReadALine() < 0) { + lineNum = 0; + } else { + lineNum = 1; + } + module.GetDbgInfo()->UpdateMsg(lineNum, line.c_str()); + kind = TK_invalid; +} + +void MIRLexer::PrepareForString(const std::string &src) +{ + SetMirQueue(src); + if (ReadALineByMirQueue() < 0) { + lineNum = 0; + } else { + lineNum = 1; + } + module.GetDbgInfo()->UpdateMsg(lineNum, line.c_str()); + kind = TK_invalid; +} + +void MIRLexer::GenName() +{ + uint32 startIdx = curIdx; + char c = GetNextCurrentCharWithUpperCheck(); + char cp = GetCharAt(curIdx - 1); + if (c == '@' && (cp == 'h' || cp == 'f')) { + // special pattern for exception handling labels: catch or finally + c = GetNextCurrentCharWithUpperCheck(); + } + while (utils::IsAlnum(c) || c < 0 || c == '_' || c == '$' || c == ';' || c == '/' || c == '|' || c == '.' || + c == '?' || c == '@') { + c = GetNextCurrentCharWithUpperCheck(); + } + name = line.substr(startIdx, curIdx - startIdx); +} + +// get the constant value +TokenKind MIRLexer::GetConstVal() +{ + bool negative = false; + int valStart = curIdx; + char c = GetCharAtWithUpperCheck(curIdx); + if (c == '-') { + c = GetNextCurrentCharWithUpperCheck(); + TokenKind tk = GetSpecialFloatConst(); + if (tk != TK_invalid) { + return tk; + } + negative = true; + } + const uint32 lenHexPrefix = 2; + if (line.compare(curIdx, lenHexPrefix, "0x") == 0) { + curIdx += lenHexPrefix; + return GetHexConst(valStart, negative); + } + uint32 startIdx = curIdx; + while (isdigit(c)) { + c = GetNextCurrentCharWithUpperCheck(); + } + char cs = GetCharAtWithUpperCheck(startIdx); + if (!isdigit(cs) && c != '.') { + return TK_invalid; + } + if (c != '.' && c != 'f' && c != 'F' && c != 'e' && c != 'E') { + curIdx = startIdx; + return GetIntConst(valStart, negative); + } + return GetFloatConst(valStart, startIdx, negative); +} + +TokenKind MIRLexer::GetSpecialFloatConst() +{ + constexpr uint32 lenSpecFloat = 4; + constexpr uint32 lenSpecDouble = 3; + if (line.compare(curIdx, lenSpecFloat, "inff") == 0 && + !utils::IsAlnum(GetCharAtWithUpperCheck(curIdx + lenSpecFloat))) { + curIdx += lenSpecFloat; + theFloatVal = -INFINITY; + return TK_floatconst; + } + if (line.compare(curIdx, lenSpecDouble, "inf") == 0 && + !utils::IsAlnum(GetCharAtWithUpperCheck(curIdx + lenSpecDouble))) { + curIdx += lenSpecDouble; + theDoubleVal = -INFINITY; + return TK_doubleconst; + } + if (line.compare(curIdx, lenSpecFloat, "nanf") == 0 && + !utils::IsAlnum(GetCharAtWithUpperCheck(curIdx + lenSpecFloat))) { + curIdx += lenSpecFloat; + theFloatVal = -NAN; + return TK_floatconst; + } + if (line.compare(curIdx, lenSpecDouble, "nan") == 0 && + !utils::IsAlnum(GetCharAtWithUpperCheck(curIdx + lenSpecDouble))) { + curIdx += lenSpecDouble; + theDoubleVal = -NAN; + return TK_doubleconst; + } + return TK_invalid; +} + +TokenKind MIRLexer::GetHexConst(uint32 valStart, bool negative) +{ + char c = GetCharAtWithUpperCheck(curIdx); + if (!isxdigit(c)) { + name = line.substr(valStart, curIdx - valStart); + return TK_invalid; + } + uint64 tmp = static_cast(HexCharToDigit(c)); + c = GetNextCurrentCharWithUpperCheck(); + while (isxdigit(c)) { + tmp = (tmp << 4) + static_cast(HexCharToDigit(c)); + c = GetNextCurrentCharWithUpperCheck(); + } + theIntVal = static_cast(static_cast(tmp)); + if (negative) { + theIntVal = -theIntVal; + } + theFloatVal = static_cast(theIntVal); + theDoubleVal = static_cast(theIntVal); + if (negative && theIntVal == 0) { + theFloatVal = -theFloatVal; + theDoubleVal = -theDoubleVal; + } + name = line.substr(valStart, curIdx - valStart); + return TK_intconst; +} + +TokenKind MIRLexer::GetIntConst(uint32 valStart, bool negative) +{ + auto negOrSelf = [negative](uint64 val) { return negative ? ~val + 1 : val; }; + + theIntVal = HexCharToDigit(GetCharAtWithUpperCheck(curIdx)); + + uint64 radix = theIntVal == 0 ? 8 : 10; + + char c = GetNextCurrentCharWithUpperCheck(); + + for (theIntVal = negOrSelf(theIntVal); isdigit(c); c = GetNextCurrentCharWithUpperCheck()) { + theIntVal = (theIntVal * radix) + negOrSelf(HexCharToDigit(c)); + } + + if (c == 'u' || c == 'U') { // skip 'u' or 'U' + c = GetNextCurrentCharWithUpperCheck(); + + if (c == 'l' || c == 'L') { + c = GetNextCurrentCharWithUpperCheck(); + } + } + + if (c == 'l' || c == 'L') { + c = GetNextCurrentCharWithUpperCheck(); + + if (c == 'l' || c == 'L' || c == 'u' || c == 'U') { + ++curIdx; + } + } + + name = line.substr(valStart, curIdx - valStart); + + if (negative) { + theFloatVal = static_cast(static_cast(theIntVal)); + theDoubleVal = static_cast(static_cast(theIntVal)); + + if (theIntVal == 0) { + theFloatVal = -theFloatVal; + theDoubleVal = -theDoubleVal; + } + } else { + theFloatVal = static_cast(theIntVal); + theDoubleVal = static_cast(theIntVal); + } + + return TK_intconst; +} + +TokenKind MIRLexer::GetFloatConst(uint32 valStart, uint32 startIdx, bool negative) +{ + char c = GetCharAtWithUpperCheck(curIdx); + if (c == '.') { + c = GetNextCurrentCharWithUpperCheck(); + } + while (isdigit(c)) { + c = GetNextCurrentCharWithUpperCheck(); + } + bool doublePrec = true; + if (c == 'e' || c == 'E') { + c = GetNextCurrentCharWithUpperCheck(); + if (!isdigit(c) && c != '-' && c != '+') { + name = line.substr(valStart, curIdx - valStart); + return TK_invalid; + } + if (c == '-' || c == '+') { + c = GetNextCurrentCharWithUpperCheck(); + } + while (isdigit(c)) { + c = GetNextCurrentCharWithUpperCheck(); + } + } + if (c == 'f' || c == 'F') { + doublePrec = false; + c = GetNextCurrentCharWithUpperCheck(); + } + if (c == 'l' || c == 'L') { + MIR_ERROR("warning: not yet support long double\n"); + ++curIdx; + } + + std::string floatStr = line.substr(startIdx, curIdx - startIdx); + // get the float constant value + if (!doublePrec) { + int eNum = sscanf_s(floatStr.c_str(), "%e", &theFloatVal); + CHECK_FATAL(eNum == 1, "sscanf_s failed"); + + if (negative) { + theFloatVal = -theFloatVal; + } + theIntVal = static_cast(theFloatVal); + theDoubleVal = static_cast(theFloatVal); + if (negative && fabs(theFloatVal) <= 1e-6) { + theDoubleVal = -theDoubleVal; + } + name = line.substr(valStart, curIdx - valStart); + return TK_floatconst; + } else { + int eNum = sscanf_s(floatStr.c_str(), "%le", &theDoubleVal); + CHECK_FATAL(eNum == 1, "sscanf_s failed"); + + if (negative) { + theDoubleVal = -theDoubleVal; + } + theIntVal = static_cast(theDoubleVal); + theFloatVal = static_cast(theDoubleVal); + if (negative && fabs(theDoubleVal) <= 1e-15) { + theFloatVal = -theFloatVal; + } + name = line.substr(valStart, curIdx - valStart); + return TK_doubleconst; + } +} + +TokenKind MIRLexer::GetTokenWithPrefixDollar() +{ + // token with prefix '$' + char c = GetCharAtWithUpperCheck(curIdx); + if (utils::IsAlpha(c) || c == '_' || c == '$') { + GenName(); + return TK_gname; + } else { + // for error reporting. + const uint32 printLength = 2; + name = line.substr(curIdx - 1, printLength); + return TK_invalid; + } +} + +TokenKind MIRLexer::GetTokenWithPrefixPercent() +{ + // token with prefix '%' + char c = GetCharAtWithUpperCheck(curIdx); + if (isdigit(c)) { + int valStart = curIdx - 1; + theIntVal = HexCharToDigit(c); + c = GetNextCurrentCharWithUpperCheck(); + while (isdigit(c)) { + theIntVal = (theIntVal * 10) + HexCharToDigit(c); + DEBUG_ASSERT(theIntVal >= 0, "int value overflow"); + c = GetNextCurrentCharWithUpperCheck(); + } + name = line.substr(valStart, curIdx - valStart); + return TK_preg; + } + if (utils::IsAlpha(c) || c == '_' || c == '$') { + GenName(); + return TK_lname; + } + if (c == '%' && utils::IsAlpha(GetCharAtWithUpperCheck(curIdx + 1))) { + ++curIdx; + GenName(); + return TK_specialreg; + } + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixAmpersand() +{ + // token with prefix '&' + char c = GetCurrentCharWithUpperCheck(); + if (utils::IsAlpha(c) || c == '_') { + GenName(); + return TK_fname; + } + // for error reporting. + constexpr uint32 printLength = 2; + name = line.substr(curIdx - 1, printLength); + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixAtOrCircumflex(char prefix) +{ + // token with prefix '@' or `^` + char c = GetCurrentCharWithUpperCheck(); + if (utils::IsAlnum(c) || c < 0 || c == '_' || c == '@' || c == '$' || c == '|') { + GenName(); + if (prefix == '@') { + return TK_label; + } + return TK_prntfield; + } + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixExclamation() +{ + // token with prefix '!' + char c = GetCurrentCharWithUpperCheck(); + if (utils::IsAlpha(c)) { + GenName(); + return TK_typeparam; + } + // for error reporting. + const uint32 printLength = 2; + name = line.substr(curIdx - 1, printLength); + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixQuotation() +{ + if (GetCharAtWithUpperCheck(curIdx + 1) == '\'') { + theIntVal = GetCharAtWithUpperCheck(curIdx); + curIdx += 2; + return TK_intconst; + } + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixDoubleQuotation() +{ + uint32 startIdx = curIdx; + uint32 shift = 0; + // for \", skip the \ to leave " only internally + // and also for the pair of chars \ and n become '\n' etc. + char c = GetCurrentCharWithUpperCheck(); + while ((c != 0) && (c != '\"' || GetCharAtWithLowerCheck(curIdx - 1) == '\\')) { + if (GetCharAtWithLowerCheck(curIdx - 1) == '\\') { + shift++; + switch (c) { + case '"': + line[curIdx - shift] = c; + break; + case '\\': + line[curIdx - shift] = c; + // avoid 3rd \ in \\\ being treated as an escaped one + line[curIdx] = 0; + break; + case 'a': + line[curIdx - shift] = '\a'; + break; + case 'b': + line[curIdx - shift] = '\b'; + break; + case 't': + line[curIdx - shift] = '\t'; + break; + case 'n': + line[curIdx - shift] = '\n'; + break; + case 'v': + line[curIdx - shift] = '\v'; + break; + case 'f': + line[curIdx - shift] = '\f'; + break; + case 'r': + line[curIdx - shift] = '\r'; + break; + // support hex value \xNN + case 'x': { + const uint32 hexShift = 4; + const uint32 hexLength = 2; + uint8 c1 = Char2num(GetCharAtWithLowerCheck(curIdx + 1)); + uint8 c2 = Char2num(GetCharAtWithLowerCheck(curIdx + 2)); + uint32 cNew = (c1 << hexShift) + c2; + line[curIdx - shift] = cNew; + curIdx += hexLength; + shift += hexLength; + break; + } + // support oct value \NNN + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': { + const uint32 octShift1 = 3; + const uint32 octShift2 = 6; + const uint32 octLength = 3; + DEBUG_ASSERT(curIdx + octLength < line.size(), "index out of range"); + uint32 cNew = (static_cast(GetCharAtWithLowerCheck(curIdx + 1) - '0') << octShift2) + + (static_cast(GetCharAtWithLowerCheck(curIdx + 2) - '0') << octShift1) + + static_cast(GetCharAtWithLowerCheck(curIdx + 3) - '0'); + line[curIdx - shift] = cNew; + curIdx += octLength; + shift += octLength; + break; + } + default: + line[curIdx - shift] = '\\'; + --shift; + line[curIdx - shift] = c; + break; + } + } else if (shift) { + line[curIdx - shift] = c; + } + c = GetNextCurrentCharWithUpperCheck(); + } + if (c != '\"') { + return TK_invalid; + } + // for empty string + if (startIdx == curIdx) { + name = ""; + } else { + name = line.substr(startIdx, curIdx - startIdx - shift); + } + ++curIdx; + return TK_string; +} + +TokenKind MIRLexer::GetTokenSpecial() +{ + --curIdx; + char c = GetCharAtWithLowerCheck(curIdx); + if (utils::IsAlpha(c) || c < 0 || c == '_') { + GenName(); + TokenKind tk = keywordMap[name]; + switch (tk) { + case TK_nanf: + theFloatVal = NAN; + return TK_floatconst; + case TK_nan: + theDoubleVal = NAN; + return TK_doubleconst; + case TK_inff: + theFloatVal = INFINITY; + return TK_floatconst; + case TK_inf: + theDoubleVal = INFINITY; + return TK_doubleconst; + default: + return tk; + } + } + MIR_ERROR("error in input file\n"); + return TK_eof; +} + +TokenKind MIRLexer::LexToken() +{ + // skip spaces + char c = GetCurrentCharWithUpperCheck(); + while (c == ' ' || c == '\t') { + c = GetNextCurrentCharWithUpperCheck(); + } + // check end of line + while (c == 0 || c == '#') { + if (c == '#') { // process comment contents + seenComments.push_back(line.substr(curIdx + 1, currentLineSize - curIdx - 1)); + } + if (needFile) { + if (ReadALine() < 0) { + return TK_eof; + } + } else { + if (ReadALineByMirQueue() < 0) { + return TK_eof; + } + } + ++lineNum; // a new line read. + module.GetDbgInfo()->UpdateMsg(lineNum, line.c_str()); + // skip spaces + c = GetCurrentCharWithUpperCheck(); + while (c == ' ' || c == '\t') { + c = GetNextCurrentCharWithUpperCheck(); + } + } + char curChar = c; + ++curIdx; + switch (curChar) { + case '\n': + return TK_newline; + case '(': + return TK_lparen; + case ')': + return TK_rparen; + case '{': + return TK_lbrace; + case '}': + return TK_rbrace; + case '[': + return TK_lbrack; + case ']': + return TK_rbrack; + case '<': + return TK_langle; + case '>': + return TK_rangle; + case '=': + return TK_eqsign; + case ',': + return TK_coma; + case ':': + return TK_colon; + case '*': + return TK_asterisk; + case '.': + if (GetCharAtWithUpperCheck(curIdx) == '.') { + const uint32 lenDotdot = 2; + curIdx += lenDotdot; + return TK_dotdotdot; + } + // fall thru for .9100 == 0.9100 + [[clang::fallthrough]]; + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '-': + --curIdx; + return GetConstVal(); + case '$': + return GetTokenWithPrefixDollar(); + case '%': + return GetTokenWithPrefixPercent(); + case '&': + return GetTokenWithPrefixAmpersand(); + case '@': + case '^': + return GetTokenWithPrefixAtOrCircumflex(curChar); + case '!': + return GetTokenWithPrefixExclamation(); + case '\'': + return GetTokenWithPrefixQuotation(); + case '\"': + return GetTokenWithPrefixDoubleQuotation(); + default: + return GetTokenSpecial(); + } +} + +TokenKind MIRLexer::NextToken() +{ + kind = LexToken(); + return kind; +} + +std::string MIRLexer::GetTokenString() const +{ + std::string temp; + switch (kind) { + case TK_gname: { + temp = "$"; + temp.append(name); + return temp; + } + case TK_lname: + case TK_preg: { + temp = "%"; + temp.append(name); + return temp; + } + case TK_specialreg: { + temp = "%%"; + temp.append(name); + return temp; + } + case TK_label: { + temp = "@"; + temp.append(name); + return temp; + } + case TK_prntfield: { + temp = "^"; + temp.append(name); + return temp; + } + case TK_intconst: { + temp = std::to_string(theIntVal); + return temp; + } + case TK_floatconst: { + temp = std::to_string(theFloatVal); + return temp; + } + case TK_doubleconst: { + temp = std::to_string(theDoubleVal); + return temp; + } + // misc. + case TK_newline: { + temp = "\\n"; + return temp; + } + case TK_lparen: { + temp = "("; + return temp; + } + case TK_rparen: { + temp = ")"; + return temp; + } + case TK_lbrace: { + temp = "{"; + return temp; + } + case TK_rbrace: { + temp = "}"; + return temp; + } + case TK_lbrack: { + temp = "["; + return temp; + } + case TK_rbrack: { + temp = "]"; + return temp; + } + case TK_langle: { + temp = "<"; + return temp; + } + case TK_rangle: { + temp = ">"; + return temp; + } + case TK_eqsign: { + temp = "="; + return temp; + } + case TK_coma: { + temp = ","; + return temp; + } + case TK_dotdotdot: { + temp = "..."; + return temp; + } + case TK_colon: { + temp = ":"; + return temp; + } + case TK_asterisk: { + temp = "*"; + return temp; + } + case TK_string: { + temp = "\""; + temp.append(name); + temp.append("\""); + return temp; + } + default: + temp = "invalid token"; + return temp; + } +} +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/mir_builder.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/mir_builder.cpp new file mode 100755 index 0000000000000000000000000000000000000000..9b333325e874e7731e9b1e71c5d7eff88e7e0c6b --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/mir_builder.cpp @@ -0,0 +1,1342 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mir_builder.h" +#include "mir_symbol_builder.h" + +namespace maple { +// This is for compiler-generated metadata 1-level struct +void MIRBuilder::AddIntFieldConst(const MIRStructType &sType, MIRAggConst &newConst, uint32 fieldID, int64 constValue) +{ + auto *fieldConst = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(constValue, *sType.GetElemType(fieldID - 1)); + newConst.AddItem(fieldConst, fieldID); +} + +// This is for compiler-generated metadata 1-level struct +void MIRBuilder::AddAddrofFieldConst(const MIRStructType &structType, MIRAggConst &newConst, uint32 fieldID, + const MIRSymbol &fieldSymbol) +{ + AddrofNode *fieldExpr = CreateExprAddrof(0, fieldSymbol, mirModule->GetMemPool()); + auto *fieldConst = mirModule->GetMemPool()->New(fieldExpr->GetStIdx(), fieldExpr->GetFieldID(), + *structType.GetElemType(fieldID - 1)); + newConst.AddItem(fieldConst, fieldID); +} + +// This is for compiler-generated metadata 1-level struct +void MIRBuilder::AddAddroffuncFieldConst(const MIRStructType &structType, MIRAggConst &newConst, uint32 fieldID, + const MIRSymbol &funcSymbol) +{ + MIRConst *fieldConst = nullptr; + MIRFunction *vMethod = funcSymbol.GetFunction(); + if (vMethod->IsAbstract()) { + fieldConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, *structType.GetElemType(fieldID - 1)); + } else { + AddroffuncNode *addrofFuncExpr = + CreateExprAddroffunc(funcSymbol.GetFunction()->GetPuidx(), mirModule->GetMemPool()); + fieldConst = mirModule->GetMemPool()->New(addrofFuncExpr->GetPUIdx(), + *structType.GetElemType(fieldID - 1)); + } + newConst.AddItem(fieldConst, fieldID); +} + +// fieldID is continuously being updated during traversal; +// when the field is found, its field id is returned via fieldID +bool MIRBuilder::TraverseToNamedField(MIRStructType &structType, GStrIdx nameIdx, uint32 &fieldID) +{ + TyIdx tid(0); + return TraverseToNamedFieldWithTypeAndMatchStyle(structType, nameIdx, tid, fieldID, kMatchAnyField); +} + +// traverse parent first but match self first. +void MIRBuilder::TraverseToNamedFieldWithType(MIRStructType &structType, GStrIdx nameIdx, TyIdx typeIdx, + uint32 &fieldID, uint32 &idx) +{ + if (structType.IsIncomplete()) { + (void)incompleteTypeRefedSet.insert(structType.GetTypeIndex()); + } + // process parent + if (structType.GetKind() == kTypeClass || structType.GetKind() == kTypeClassIncomplete) { + auto &classType = static_cast(structType); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(classType.GetParentTyIdx()); + auto *parentType = static_cast(type); + if (parentType != nullptr) { + ++fieldID; + TraverseToNamedFieldWithType(*parentType, nameIdx, typeIdx, fieldID, idx); + } + } + for (uint32 fieldIdx = 0; fieldIdx < structType.GetFieldsSize(); ++fieldIdx) { + ++fieldID; + TyIdx fieldTyIdx = structType.GetFieldsElemt(fieldIdx).second.first; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + if (structType.GetFieldsElemt(fieldIdx).first == nameIdx) { + if (typeIdx == 0u || fieldTyIdx == typeIdx) { + idx = fieldID; + continue; + } + // for pointer type, check their pointed type + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(typeIdx); + if (type->IsOfSameType(*fieldType)) { + idx = fieldID; + } + } + + if (fieldType->IsStructType()) { + auto *subStructType = static_cast(fieldType); + TraverseToNamedFieldWithType(*subStructType, nameIdx, typeIdx, fieldID, idx); + } + } +} + +// fieldID is continuously being updated during traversal; +// when the field is found, its field id is returned via fieldID +// +// typeidx: TyIdx(0) means do not check types. +// matchstyle: 0: do not match but traverse to update fieldID +// 1: match top level field only +// 2: match any field +// 4: traverse parent first +// 0xc: do not match but traverse to update fieldID, traverse parent first, found in child +bool MIRBuilder::TraverseToNamedFieldWithTypeAndMatchStyle(MIRStructType &structType, GStrIdx nameIdx, TyIdx typeIdx, + uint32 &fieldID, unsigned int matchStyle) +{ + if (structType.IsIncomplete()) { + (void)incompleteTypeRefedSet.insert(structType.GetTypeIndex()); + } + if (matchStyle & kParentFirst) { + // process parent + if ((structType.GetKind() != kTypeClass) && (structType.GetKind() != kTypeClassIncomplete)) { + return false; + } + + auto &classType = static_cast(structType); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(classType.GetParentTyIdx()); + auto *parentType = static_cast(type); + if (parentType != nullptr) { + ++fieldID; + if (matchStyle == (kFoundInChild | kParentFirst | kUpdateFieldID)) { + matchStyle = kParentFirst; + uint32 idxBackup = nameIdx; + nameIdx.reset(); + // do not match but traverse to update fieldID, traverse parent first + TraverseToNamedFieldWithTypeAndMatchStyle(*parentType, nameIdx, typeIdx, fieldID, matchStyle); + nameIdx.reset(idxBackup); + } else if (TraverseToNamedFieldWithTypeAndMatchStyle(*parentType, nameIdx, typeIdx, fieldID, matchStyle)) { + return true; + } + } + } + for (uint32 fieldIdx = 0; fieldIdx < structType.GetFieldsSize(); ++fieldIdx) { + ++fieldID; + TyIdx fieldTyIdx = structType.GetFieldsElemt(fieldIdx).second.first; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + DEBUG_ASSERT(fieldType != nullptr, "fieldType is null"); + if (matchStyle && structType.GetFieldsElemt(fieldIdx).first == nameIdx) { + if (typeIdx == 0u || fieldTyIdx == typeIdx || + fieldType->IsOfSameType(*GlobalTables::GetTypeTable().GetTypeFromTyIdx(typeIdx))) { + return true; + } + } + unsigned int style = matchStyle & kMatchAnyField; + if (fieldType->IsStructType()) { + auto *subStructType = static_cast(fieldType); + if (TraverseToNamedFieldWithTypeAndMatchStyle(*subStructType, nameIdx, typeIdx, fieldID, style)) { + return true; + } + } + } + return false; +} + +FieldID MIRBuilder::GetStructFieldIDFromNameAndType(MIRType &type, const std::string &name, TyIdx idx, + unsigned int matchStyle) +{ + auto &structType = static_cast(type); + uint32 fieldID = 0; + GStrIdx strIdx = GetStringIndex(name); + if (TraverseToNamedFieldWithTypeAndMatchStyle(structType, strIdx, idx, fieldID, matchStyle)) { + return fieldID; + } + return 0; +} + +FieldID MIRBuilder::GetStructFieldIDFromNameAndType(MIRType &type, const std::string &name, TyIdx idx) +{ + return GetStructFieldIDFromNameAndType(type, name, idx, kMatchAnyField); +} + +FieldID MIRBuilder::GetStructFieldIDFromNameAndTypeParentFirst(MIRType &type, const std::string &name, TyIdx idx) +{ + return GetStructFieldIDFromNameAndType(type, name, idx, kParentFirst); +} + +FieldID MIRBuilder::GetStructFieldIDFromNameAndTypeParentFirstFoundInChild(MIRType &type, const std::string &name, + TyIdx idx) +{ + // do not match but traverse to update fieldID, traverse parent first, found in child + return GetStructFieldIDFromNameAndType(type, name, idx, kFoundInChild | kParentFirst | kUpdateFieldID); +} + +FieldID MIRBuilder::GetStructFieldIDFromFieldName(MIRType &type, const std::string &name) +{ + return GetStructFieldIDFromNameAndType(type, name, TyIdx(0), kMatchAnyField); +} + +FieldID MIRBuilder::GetStructFieldIDFromFieldNameParentFirst(MIRType *type, const std::string &name) +{ + if (type == nullptr) { + return 0; + } + return GetStructFieldIDFromNameAndType(*type, name, TyIdx(0), kParentFirst); +} + +void MIRBuilder::SetStructFieldIDFromFieldName(MIRStructType &structType, const std::string &name, GStrIdx newStrIdx, + const MIRType &newFieldType) +{ + uint32 fieldID = 0; + GStrIdx strIdx = GetStringIndex(name); + while (true) { + if (structType.GetElemStrIdx(fieldID) == strIdx) { + if (newStrIdx != 0u) { + structType.SetElemStrIdx(fieldID, newStrIdx); + } + structType.SetElemtTyIdx(fieldID, newFieldType.GetTypeIndex()); + return; + } + ++fieldID; + } +} + +// create a function named str +MIRFunction *MIRBuilder::GetOrCreateFunction(const std::string &str, TyIdx retTyIdx) +{ + GStrIdx strIdx = GetStringIndex(str); + MIRSymbol *funcSt = nullptr; + if (strIdx != 0u) { + funcSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + if (funcSt == nullptr) { + funcSt = CreateSymbol(TyIdx(0), strIdx, kStFunc, kScText, nullptr, kScopeGlobal); + } else { + DEBUG_ASSERT(funcSt->GetSKind() == kStFunc, "runtime check error"); + return funcSt->GetFunction(); + } + } else { + strIdx = GetOrCreateStringIndex(str); + funcSt = CreateSymbol(TyIdx(0), strIdx, kStFunc, kScText, nullptr, kScopeGlobal); + } + auto *fn = mirModule->GetMemPool()->New(mirModule, funcSt->GetStIdx()); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + MIRFuncType funcType; + funcType.SetRetTyIdx(retTyIdx); + auto funcTyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&funcType); + auto *funcTypeInTypeTable = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcTyIdx)); + fn->SetMIRFuncType(funcTypeInTypeTable); + fn->SetReturnTyIdx(retTyIdx); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + funcSt->SetFunction(fn); + funcSt->SetTyIdx(funcTyIdx); + return fn; +} + +MIRFunction *MIRBuilder::GetFunctionFromSymbol(const MIRSymbol &funcSymbol) +{ + DEBUG_ASSERT(funcSymbol.GetSKind() == kStFunc, "Symbol %s is not a function symbol", funcSymbol.GetName().c_str()); + return funcSymbol.GetFunction(); +} + +MIRFunction *MIRBuilder::GetFunctionFromName(const std::string &str) +{ + auto *funcSymbol = + GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(str)); + return funcSymbol != nullptr ? GetFunctionFromSymbol(*funcSymbol) : nullptr; +} + +MIRFunction *MIRBuilder::GetFunctionFromStidx(StIdx stIdx) +{ + auto *funcSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + return funcSymbol != nullptr ? GetFunctionFromSymbol(*funcSymbol) : nullptr; +} + +MIRFunction *MIRBuilder::CreateFunction(const std::string &name, const MIRType &returnType, const ArgVector &arguments, + bool isVarg, bool createBody) const +{ + MIRSymbol *funcSymbol = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + GStrIdx strIdx = GetOrCreateStringIndex(name); + funcSymbol->SetNameStrIdx(strIdx); + if (!GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcSymbol)) { + return nullptr; + } + funcSymbol->SetStorageClass(kScText); + funcSymbol->SetSKind(kStFunc); + auto *fn = mirModule->GetMemPool()->New(mirModule, funcSymbol->GetStIdx()); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + std::vector funcVecType; + std::vector funcVecAttrs; + for (size_t i = 0; i < arguments.size(); ++i) { + MIRType *ty = arguments[i].second; + FormalDef formalDef(GetOrCreateStringIndex(arguments[i].first.c_str()), nullptr, ty->GetTypeIndex(), + TypeAttrs()); + fn->GetFormalDefVec().push_back(formalDef); + funcVecType.push_back(ty->GetTypeIndex()); + funcVecAttrs.push_back(TypeAttrs()); + if (fn->GetSymTab() != nullptr && formalDef.formalSym != nullptr) { + (void)fn->GetSymTab()->AddToStringSymbolMap(*formalDef.formalSym); + } + } + funcSymbol->SetTyIdx(GlobalTables::GetTypeTable() + .GetOrCreateFunctionType(returnType.GetTypeIndex(), funcVecType, funcVecAttrs, isVarg) + ->GetTypeIndex()); + auto *funcType = static_cast(funcSymbol->GetType()); + fn->SetMIRFuncType(funcType); + funcSymbol->SetFunction(fn); + if (createBody) { + fn->NewBody(); + } + return fn; +} + +MIRFunction *MIRBuilder::CreateFunction(StIdx stIdx, bool addToTable) const +{ + auto *fn = mirModule->GetMemPool()->New(mirModule, stIdx); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + if (addToTable) { + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + } + + auto *funcType = mirModule->GetMemPool()->New(); + fn->SetMIRFuncType(funcType); + return fn; +} + +MIRSymbol *MIRBuilder::GetOrCreateGlobalDecl(const std::string &str, TyIdx tyIdx, bool &created) const +{ + GStrIdx strIdx = GetStringIndex(str); + if (strIdx != 0u) { + StIdx stIdx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(strIdx); + if (stIdx.Idx() != 0) { + created = false; + return GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + } + } + created = true; + strIdx = GetOrCreateStringIndex(str); + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(strIdx); + st->SetTyIdx(tyIdx); + (void)GlobalTables::GetGsymTable().AddToStringSymbolMap(*st); + return st; +} + +MIRSymbol *MIRBuilder::GetOrCreateLocalDecl(const std::string &str, TyIdx tyIdx, MIRSymbolTable &symbolTable, + bool &created) const +{ + GStrIdx strIdx = GetStringIndex(str); + if (strIdx != 0u) { + StIdx stIdx = symbolTable.GetStIdxFromStrIdx(strIdx); + if (stIdx.Idx() != 0) { + created = false; + return symbolTable.GetSymbolFromStIdx(stIdx.Idx()); + } + } + created = true; + strIdx = GetOrCreateStringIndex(str); + MIRSymbol *st = symbolTable.CreateSymbol(kScopeLocal); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + st->SetNameStrIdx(strIdx); + st->SetTyIdx(tyIdx); + (void)symbolTable.AddToStringSymbolMap(*st); + return st; +} + +MIRSymbol *MIRBuilder::GetOrCreateDeclInFunc(const std::string &str, const MIRType &type, MIRFunction &func) +{ + MIRSymbolTable *symbolTable = func.GetSymTab(); + DEBUG_ASSERT(symbolTable != nullptr, "symbol_table is null"); + bool isCreated = false; + MIRSymbol *st = GetOrCreateLocalDecl(str, type.GetTypeIndex(), *symbolTable, isCreated); + if (isCreated) { + st->SetStorageClass(kScAuto); + st->SetSKind(kStVar); + } + return st; +} + +MIRSymbol *MIRBuilder::GetOrCreateLocalDecl(const std::string &str, const MIRType &type) +{ + MIRFunction *currentFunc = GetCurrentFunction(); + CHECK_FATAL(currentFunc != nullptr, "null ptr check"); + return GetOrCreateDeclInFunc(str, type, *currentFunc); +} + +MIRSymbol *MIRBuilder::CreateLocalDecl(const std::string &str, const MIRType &type) +{ + MIRFunction *currentFunctionInner = GetCurrentFunctionNotNull(); + return MIRSymbolBuilder::Instance().CreateLocalDecl(*currentFunctionInner->GetSymTab(), GetOrCreateStringIndex(str), + type); +} + +MIRSymbol *MIRBuilder::GetGlobalDecl(const std::string &str) +{ + return MIRSymbolBuilder::Instance().GetGlobalDecl(GetStringIndex(str)); +} + +MIRSymbol *MIRBuilder::GetLocalDecl(const std::string &str) +{ + MIRFunction *currentFunctionInner = GetCurrentFunctionNotNull(); + return MIRSymbolBuilder::Instance().GetLocalDecl(*currentFunctionInner->GetSymTab(), GetStringIndex(str)); +} + +// search the scope hierarchy +MIRSymbol *MIRBuilder::GetDecl(const std::string &str) +{ + GStrIdx strIdx = GetStringIndex(str); + MIRSymbol *sym = nullptr; + if (strIdx != 0u) { + // try to find the decl in local scope first + MIRFunction *currentFunctionInner = GetCurrentFunction(); + if (currentFunctionInner != nullptr) { + sym = currentFunctionInner->GetSymTab()->GetSymbolFromStrIdx(strIdx); + } + if (sym == nullptr) { + sym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + } + } + return sym; +} + +MIRSymbol *MIRBuilder::CreateGlobalDecl(const std::string &str, const MIRType &type, MIRStorageClass sc) +{ + return MIRSymbolBuilder::Instance().CreateGlobalDecl(GetOrCreateStringIndex(str), type, sc); +} + +MIRSymbol *MIRBuilder::GetOrCreateGlobalDecl(const std::string &str, const MIRType &type) +{ + bool isCreated = false; + MIRSymbol *st = GetOrCreateGlobalDecl(str, type.GetTypeIndex(), isCreated); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + if (isCreated) { + st->SetStorageClass(kScGlobal); + st->SetSKind(kStVar); + } else { + // Existing symbol may come from anther module. We need to register it + // in the current module so that per-module mpl file is self-sustained. + mirModule->AddSymbol(st); + } + MIRConst *cst = GlobalTables::GetConstPool().GetConstFromPool(st->GetNameStrIdx()); + if (cst != nullptr) { + st->SetKonst(cst); + } + return st; +} + +MIRSymbol *MIRBuilder::GetSymbolFromEnclosingScope(StIdx stIdx) const +{ + if (stIdx.FullIdx() == 0) { + return nullptr; + } + if (stIdx.Islocal()) { + MIRFunction *fun = GetCurrentFunctionNotNull(); + MIRSymbol *st = fun->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + if (st != nullptr) { + return st; + } + } + return GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); +} + +MIRSymbol *MIRBuilder::GetSymbol(TyIdx tyIdx, const std::string &name, MIRSymKind mClass, MIRStorageClass sClass, + uint8 scpID, bool sameType = false) const +{ + return GetSymbol(tyIdx, GetOrCreateStringIndex(name), mClass, sClass, scpID, sameType); +} + +// when sametype is true, it means match everything the of the symbol +MIRSymbol *MIRBuilder::GetSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, uint8 scpID, + bool sameType = false) const +{ + if (scpID != kScopeGlobal) { + ERR(kLncErr, "not yet implemented"); + return nullptr; + } + return MIRSymbolBuilder::Instance().GetSymbol(tyIdx, strIdx, mClass, sClass, sameType); +} + +MIRSymbol *MIRBuilder::GetOrCreateSymbol(TyIdx tyIdx, const std::string &name, MIRSymKind mClass, + MIRStorageClass sClass, MIRFunction *func, uint8 scpID, + bool sametype = false) const +{ + return GetOrCreateSymbol(tyIdx, GetOrCreateStringIndex(name), mClass, sClass, func, scpID, sametype); +} + +MIRSymbol *MIRBuilder::GetOrCreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID, bool sameType = false) const +{ + if (MIRSymbol *st = GetSymbol(tyIdx, strIdx, mClass, sClass, scpID, sameType)) { + return st; + } + return CreateSymbol(tyIdx, strIdx, mClass, sClass, func, scpID); +} + +// when func is null, create global symbol, otherwise create local symbol +MIRSymbol *MIRBuilder::CreateSymbol(TyIdx tyIdx, const std::string &name, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID) const +{ + return CreateSymbol(tyIdx, GetOrCreateStringIndex(name), mClass, sClass, func, scpID); +} + +// when func is null, create global symbol, otherwise create local symbol +MIRSymbol *MIRBuilder::CreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID) const +{ + return MIRSymbolBuilder::Instance().CreateSymbol(tyIdx, strIdx, mClass, sClass, func, scpID); +} + +MIRSymbol *MIRBuilder::CreateConstStringSymbol(const std::string &symbolName, const std::string &content) +{ + auto elemPrimType = PTY_u8; + MIRType *type = GlobalTables::GetTypeTable().GetPrimType(elemPrimType); + uint32 sizeIn = static_cast(content.length()); + MIRType *arrayTypeWithSize = GlobalTables::GetTypeTable().GetOrCreateArrayType( + *GlobalTables::GetTypeTable().GetPrimType(elemPrimType), 1, &sizeIn); + + if (GetLocalDecl(symbolName)) { + return GetLocalDecl(symbolName); + } + MIRSymbol *arrayVar = GetOrCreateGlobalDecl(symbolName, *arrayTypeWithSize); + arrayVar->SetAttr(ATTR_readonly); + arrayVar->SetStorageClass(kScFstatic); + MIRAggConst *val = mirModule->GetMemPool()->New(*mirModule, *arrayTypeWithSize); + for (uint32 i = 0; i < sizeIn; ++i) { + MIRConst *cst = mirModule->GetMemPool()->New(content[i], *type); + val->PushBack(cst); + } + // This interface is only for string literal, 0 is added to the end of the string. + MIRConst *cst0 = mirModule->GetMemPool()->New(0, *type); + val->PushBack(cst0); + arrayVar->SetKonst(val); + return arrayVar; +} + +MIRSymbol *MIRBuilder::CreatePregFormalSymbol(TyIdx tyIdx, PregIdx pRegIdx, MIRFunction &func) const +{ + return MIRSymbolBuilder::Instance().CreatePregFormalSymbol(tyIdx, pRegIdx, func); +} + +ConstvalNode *MIRBuilder::CreateConstval(MIRConst *mirConst) +{ + return GetCurrentFuncCodeMp()->New(mirConst->GetType().GetPrimType(), mirConst); +} + +ConstvalNode *MIRBuilder::CreateIntConst(uint64 val, PrimType pty) +{ + auto *mirConst = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(val, *GlobalTables::GetTypeTable().GetPrimType(pty)); + return GetCurrentFuncCodeMp()->New(pty, mirConst); +} + +ConstvalNode *MIRBuilder::CreateFloatConst(float val) +{ + auto *mirConst = + GetCurrentFuncDataMp()->New(val, *GlobalTables::GetTypeTable().GetPrimType(PTY_f32)); + return GetCurrentFuncCodeMp()->New(PTY_f32, mirConst); +} + +ConstvalNode *MIRBuilder::CreateDoubleConst(double val) +{ + auto *mirConst = + GetCurrentFuncDataMp()->New(val, *GlobalTables::GetTypeTable().GetPrimType(PTY_f64)); + return GetCurrentFuncCodeMp()->New(PTY_f64, mirConst); +} + +ConstvalNode *MIRBuilder::CreateFloat128Const(const uint64 *val) +{ + auto *mirConst = + GetCurrentFuncDataMp()->New(*val, *GlobalTables::GetTypeTable().GetPrimType(PTY_f128)); + return GetCurrentFuncCodeMp()->New(PTY_f128, mirConst); +} + +ConstvalNode *MIRBuilder::GetConstInt(MemPool &memPool, int val) +{ + auto *mirConst = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(val, *GlobalTables::GetTypeTable().GetInt64()); + return memPool.New(PTY_i32, mirConst); +} + +ConstvalNode *MIRBuilder::CreateAddrofConst(BaseNode &node) +{ + DEBUG_ASSERT(node.GetOpCode() == OP_addrof, "illegal op for addrof const"); + MIRFunction *currentFunctionInner = GetCurrentFunctionNotNull(); + + // determine the type of 'node' and create a pointer type, accordingly + auto &aNode = static_cast(node); + const MIRSymbol *var = currentFunctionInner->GetLocalOrGlobalSymbol(aNode.GetStIdx()); + TyIdx ptyIdx = var->GetTyIdx(); + MIRPtrType ptrType(ptyIdx); + ptyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType &exprType = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx); + auto *temp = mirModule->GetMemPool()->New(aNode.GetStIdx(), aNode.GetFieldID(), exprType); + return GetCurrentFuncCodeMp()->New(PTY_ptr, temp); +} + +ConstvalNode *MIRBuilder::CreateAddroffuncConst(const BaseNode &node) +{ + DEBUG_ASSERT(node.GetOpCode() == OP_addroffunc, "illegal op for addroffunc const"); + + const auto &aNode = static_cast(node); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(aNode.GetPUIdx()); + TyIdx ptyIdx = f->GetFuncSymbol()->GetTyIdx(); + MIRPtrType ptrType(ptyIdx); + ptyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType *exprType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx); + auto *mirConst = mirModule->GetMemPool()->New(aNode.GetPUIdx(), *exprType); + return GetCurrentFuncCodeMp()->New(PTY_ptr, mirConst); +} + +ConstvalNode *MIRBuilder::CreateStrConst(const BaseNode &node) +{ + DEBUG_ASSERT(node.GetOpCode() == OP_conststr, "illegal op for conststr const"); + UStrIdx strIdx = static_cast(node).GetStrIdx(); + CHECK_FATAL(PTY_u8 < GlobalTables::GetTypeTable().GetTypeTable().size(), + "index is out of range in MIRBuilder::CreateStrConst"); + TyIdx tyIdx = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_u8))->GetTypeIndex(); + MIRPtrType ptrType(tyIdx); + tyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType *exprType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + auto *mirConst = mirModule->GetMemPool()->New(strIdx, *exprType); + return GetCurrentFuncCodeMp()->New(PTY_ptr, mirConst); +} + +ConstvalNode *MIRBuilder::CreateStr16Const(const BaseNode &node) +{ + DEBUG_ASSERT(node.GetOpCode() == OP_conststr16, "illegal op for conststr16 const"); + U16StrIdx strIdx = static_cast(node).GetStrIdx(); + CHECK_FATAL(PTY_u16 < GlobalTables::GetTypeTable().GetTypeTable().size(), + "index out of range in MIRBuilder::CreateStr16Const"); + TyIdx ptyIdx = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_u16))->GetTypeIndex(); + MIRPtrType ptrType(ptyIdx); + ptyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType *exprType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx); + auto *mirConst = mirModule->GetMemPool()->New(strIdx, *exprType); + return GetCurrentFuncCodeMp()->New(PTY_ptr, mirConst); +} + +SizeoftypeNode *MIRBuilder::CreateExprSizeoftype(const MIRType &type) +{ + return GetCurrentFuncCodeMp()->New(PTY_u32, type.GetTypeIndex()); +} + +FieldsDistNode *MIRBuilder::CreateExprFieldsDist(const MIRType &type, FieldID field1, FieldID field2) +{ + return GetCurrentFuncCodeMp()->New(PTY_i32, type.GetTypeIndex(), field1, field2); +} + +AddrofNode *MIRBuilder::CreateExprAddrof(FieldID fieldID, const MIRSymbol &symbol, MemPool *memPool) +{ + return CreateExprAddrof(fieldID, symbol.GetStIdx(), memPool); +} + +AddrofNode *MIRBuilder::CreateExprAddrof(FieldID fieldID, StIdx symbolStIdx, MemPool *memPool) +{ + if (memPool == nullptr) { + memPool = GetCurrentFuncCodeMp(); + } + return memPool->New(OP_addrof, PTY_ptr, symbolStIdx, fieldID); +} + +AddroffuncNode *MIRBuilder::CreateExprAddroffunc(PUIdx puIdx, MemPool *memPool) +{ + if (memPool == nullptr) { + memPool = GetCurrentFuncCodeMp(); + } + return memPool->New(PTY_ptr, puIdx); +} + +AddrofNode *MIRBuilder::CreateExprDread(const MIRType &type, FieldID fieldID, const MIRSymbol &symbol) +{ + return CreateExprDread(type.GetPrimType(), fieldID, symbol); +} + +AddrofNode *MIRBuilder::CreateExprDread(PrimType ptyp, FieldID fieldID, const MIRSymbol &symbol) +{ + auto *node = GetCurrentFuncCodeMp()->New(OP_dread, kPtyInvalid, symbol.GetStIdx(), fieldID); + node->SetPrimType(GetRegPrimType(ptyp)); + return node; +} + +RegreadNode *MIRBuilder::CreateExprRegread(PrimType pty, PregIdx regIdx) +{ + return GetCurrentFuncCodeMp()->New(pty, regIdx); +} + +AddrofNode *MIRBuilder::CreateExprDread(MIRType &type, MIRSymbol &symbol) +{ + return CreateExprDread(type, 0, symbol); +} + +AddrofNode *MIRBuilder::CreateExprDread(MIRSymbol &symbol, uint16 fieldID) +{ + if (fieldID == 0) { + return CreateExprDread(symbol); + } + DEBUG_ASSERT(false, "NYI"); + return nullptr; +} + +AddrofNode *MIRBuilder::CreateExprDread(MIRSymbol &symbol) +{ + return CreateExprDread(*symbol.GetType(), 0, symbol); +} + +AddrofNode *MIRBuilder::CreateExprDread(PregIdx pregID, PrimType pty) +{ + auto *dread = GetCurrentFuncCodeMp()->New(OP_dread, pty); + dread->SetStFullIdx(pregID); + return dread; +} + +DreadoffNode *MIRBuilder::CreateExprDreadoff(Opcode op, PrimType pty, const MIRSymbol &symbol, int32 offset) +{ + DreadoffNode *node = GetCurrentFuncCodeMp()->New(op, pty); + node->stIdx = symbol.GetStIdx(); + node->offset = offset; + return node; +} + +IreadNode *MIRBuilder::CreateExprIread(const MIRType &returnType, const MIRType &ptrType, FieldID fieldID, + BaseNode *addr) +{ + TyIdx returnTypeIdx = returnType.GetTypeIndex(); + CHECK(returnTypeIdx < GlobalTables::GetTypeTable().GetTypeTable().size(), + "index out of range in MIRBuilder::CreateExprIread"); + DEBUG_ASSERT(fieldID != 0 || ptrType.GetPrimType() != PTY_agg, + "Error: Fieldid should not be 0 when trying to iread a field from type "); + PrimType type = GetRegPrimType(returnType.GetPrimType()); + return GetCurrentFuncCodeMp()->New(OP_iread, type, ptrType.GetTypeIndex(), fieldID, addr); +} + +IreadoffNode *MIRBuilder::CreateExprIreadoff(PrimType pty, int32 offset, BaseNode *opnd0) +{ + return GetCurrentFuncCodeMp()->New(pty, opnd0, offset); +} + +IreadFPoffNode *MIRBuilder::CreateExprIreadFPoff(PrimType pty, int32 offset) +{ + return GetCurrentFuncCodeMp()->New(pty, offset); +} + +IaddrofNode *MIRBuilder::CreateExprIaddrof(const MIRType &returnType, const MIRType &ptrType, FieldID fieldID, + BaseNode *addr) +{ + IaddrofNode *iAddrOfNode = CreateExprIread(returnType, ptrType, fieldID, addr); + iAddrOfNode->SetOpCode(OP_iaddrof); + return iAddrOfNode; +} + +IaddrofNode *MIRBuilder::CreateExprIaddrof(PrimType returnTypePty, TyIdx ptrTypeIdx, FieldID fieldID, BaseNode *addr) +{ + return GetCurrentFuncCodeMp()->New(OP_iaddrof, returnTypePty, ptrTypeIdx, fieldID, addr); +} + +UnaryNode *MIRBuilder::CreateExprUnary(Opcode opcode, const MIRType &type, BaseNode *opnd) +{ + return GetCurrentFuncCodeMp()->New(opcode, type.GetPrimType(), opnd); +} + +GCMallocNode *MIRBuilder::CreateExprGCMalloc(Opcode opcode, const MIRType &pType, const MIRType &type) +{ + return GetCurrentFuncCodeMp()->New(opcode, pType.GetPrimType(), type.GetTypeIndex()); +} + +JarrayMallocNode *MIRBuilder::CreateExprJarrayMalloc(Opcode opcode, const MIRType &pType, const MIRType &type, + BaseNode *opnd) +{ + return GetCurrentFuncCodeMp()->New(opcode, pType.GetPrimType(), type.GetTypeIndex(), opnd); +} + +TypeCvtNode *MIRBuilder::CreateExprTypeCvt(Opcode o, PrimType toPrimType, PrimType fromPrimType, BaseNode &opnd) +{ + return GetCurrentFuncCodeMp()->New(o, toPrimType, fromPrimType, &opnd); +} + +TypeCvtNode *MIRBuilder::CreateExprTypeCvt(Opcode o, const MIRType &type, const MIRType &fromType, BaseNode *opnd) +{ + return CreateExprTypeCvt(o, type.GetPrimType(), fromType.GetPrimType(), *opnd); +} + +ExtractbitsNode *MIRBuilder::CreateExprExtractbits(Opcode o, const MIRType &type, uint32 bOffset, uint32 bSize, + BaseNode *opnd) +{ + return CreateExprExtractbits(o, type.GetPrimType(), bOffset, bSize, opnd); +} + +ExtractbitsNode *MIRBuilder::CreateExprExtractbits(Opcode o, PrimType type, uint32 bOffset, uint32 bSize, + BaseNode *opnd) +{ + return GetCurrentFuncCodeMp()->New(o, type, bOffset, bSize, opnd); +} + +DepositbitsNode *MIRBuilder::CreateExprDepositbits(Opcode o, PrimType type, uint32 bOffset, uint32 bSize, + BaseNode *leftOpnd, BaseNode *rightOpnd) +{ + return GetCurrentFuncCodeMp()->New(o, type, bOffset, bSize, leftOpnd, rightOpnd); +} + +RetypeNode *MIRBuilder::CreateExprRetype(const MIRType &type, const MIRType &fromType, BaseNode *opnd) +{ + return CreateExprRetype(type, fromType.GetPrimType(), opnd); +} + +RetypeNode *MIRBuilder::CreateExprRetype(const MIRType &type, PrimType fromType, BaseNode *opnd) +{ + return GetCurrentFuncCodeMp()->New(type.GetPrimType(), fromType, type.GetTypeIndex(), opnd); +} + +BinaryNode *MIRBuilder::CreateExprBinary(Opcode opcode, const MIRType &type, BaseNode *opnd0, BaseNode *opnd1) +{ + return GetCurrentFuncCodeMp()->New(opcode, type.GetPrimType(), opnd0, opnd1); +} + +TernaryNode *MIRBuilder::CreateExprTernary(Opcode opcode, const MIRType &type, BaseNode *opnd0, BaseNode *opnd1, + BaseNode *opnd2) +{ + return GetCurrentFuncCodeMp()->New(opcode, type.GetPrimType(), opnd0, opnd1, opnd2); +} + +CompareNode *MIRBuilder::CreateExprCompare(Opcode opcode, const MIRType &type, const MIRType &opndType, BaseNode *opnd0, + BaseNode *opnd1) +{ + return GetCurrentFuncCodeMp()->New(opcode, type.GetPrimType(), opndType.GetPrimType(), opnd0, opnd1); +} + +ArrayNode *MIRBuilder::CreateExprArray(const MIRType &arrayType) +{ + MIRType *addrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(arrayType); + DEBUG_ASSERT(addrType != nullptr, "addrType is null"); + auto *arrayNode = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), addrType->GetPrimType(), + addrType->GetTypeIndex()); + arrayNode->SetNumOpnds(0); + return arrayNode; +} + +ArrayNode *MIRBuilder::CreateExprArray(const MIRType &arrayType, BaseNode *op) +{ + ArrayNode *arrayNode = CreateExprArray(arrayType); + arrayNode->GetNopnd().push_back(op); + arrayNode->SetNumOpnds(1); + return arrayNode; +} + +ArrayNode *MIRBuilder::CreateExprArray(const MIRType &arrayType, BaseNode *op1, BaseNode *op2) +{ + ArrayNode *arrayNode = CreateExprArray(arrayType, op1); + arrayNode->GetNopnd().push_back(op2); + arrayNode->SetNumOpnds(2); + return arrayNode; +} + +ArrayNode *MIRBuilder::CreateExprArray(const MIRType &arrayType, std::vector ops) +{ + MIRType *addrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(arrayType); + DEBUG_ASSERT(addrType != nullptr, "addrType is null"); + auto *arrayNode = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), addrType->GetPrimType(), + addrType->GetTypeIndex()); + arrayNode->GetNopnd().insert(arrayNode->GetNopnd().begin(), ops.begin(), ops.end()); + arrayNode->SetNumOpnds(static_cast(ops.size())); + return arrayNode; +} + +IntrinsicopNode *MIRBuilder::CreateExprIntrinsicop(MIRIntrinsicID id, Opcode op, PrimType primType, TyIdx tyIdx, + const MapleVector &ops) +{ + auto *expr = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op, primType); + expr->SetIntrinsic(id); + expr->SetNOpnd(ops); + expr->SetNumOpnds(ops.size()); + if (op == OP_intrinsicopwithtype) { + expr->SetTyIdx(tyIdx); + } + return expr; +} + +IntrinsicopNode *MIRBuilder::CreateExprIntrinsicop(MIRIntrinsicID idx, Opcode opCode, const MIRType &type, + const MapleVector &ops) +{ + return CreateExprIntrinsicop(idx, opCode, type.GetPrimType(), type.GetTypeIndex(), ops); +} + +DassignNode *MIRBuilder::CreateStmtDassign(const MIRSymbol &symbol, FieldID fieldID, BaseNode *src) +{ + return GetCurrentFuncCodeMp()->New(src, symbol.GetStIdx(), fieldID); +} + +RegassignNode *MIRBuilder::CreateStmtRegassign(PrimType pty, PregIdx regIdx, BaseNode *src) +{ + return GetCurrentFuncCodeMp()->New(pty, regIdx, src); +} + +DassignNode *MIRBuilder::CreateStmtDassign(StIdx sIdx, FieldID fieldID, BaseNode *src) +{ + return GetCurrentFuncCodeMp()->New(src, sIdx, fieldID); +} + +IassignNode *MIRBuilder::CreateStmtIassign(const MIRType &type, FieldID fieldID, BaseNode *addr, BaseNode *src) +{ + return GetCurrentFuncCodeMp()->New(type.GetTypeIndex(), fieldID, addr, src); +} + +IassignoffNode *MIRBuilder::CreateStmtIassignoff(PrimType pty, int32 offset, BaseNode *addr, BaseNode *src) +{ + return GetCurrentFuncCodeMp()->New(pty, offset, addr, src); +} + +IassignFPoffNode *MIRBuilder::CreateStmtIassignFPoff(Opcode op, PrimType pty, int32 offset, BaseNode *src) +{ + return GetCurrentFuncCodeMp()->New(op, pty, offset, src); +} + +CallNode *MIRBuilder::CreateStmtCall(PUIdx puIdx, const MapleVector &args, Opcode opCode) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), opCode, puIdx, TyIdx()); + stmt->SetNOpnd(args); + stmt->SetNumOpnds(args.size()); + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCall(const std::string &callee, const MapleVector &args) +{ + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(callee); + StIdx stIdx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(strIdx); + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + DEBUG_ASSERT(st != nullptr, "MIRSymbol st is null"); + MIRFunction *func = st->GetFunction(); + return CreateStmtCall(func->GetPuidx(), args, OP_call); +} + +IcallNode *MIRBuilder::CreateStmtIcall(const MapleVector &args) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icall); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + return stmt; +} + +IcallNode *MIRBuilder::CreateStmtIcallproto(const MapleVector &args) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icallproto); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + return stmt; +} + +IcallNode *MIRBuilder::CreateStmtIcallAssigned(const MapleVector &args, const MIRSymbol &ret) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icallassigned); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + CHECK_FATAL((ret.GetStorageClass() == kScAuto || ret.GetStorageClass() == kScFormal || + ret.GetStorageClass() == kScExtern || ret.GetStorageClass() == kScGlobal), + "unknown classtype! check it!"); + nrets.emplace_back(CallReturnPair(ret.GetStIdx(), RegFieldPair(0, 0))); + stmt->SetNumOpnds(args.size()); + stmt->GetNopnd().resize(stmt->GetNumOpnds()); + stmt->SetReturnVec(nrets); + for (size_t i = 0; i < stmt->GetNopndSize(); ++i) { + stmt->SetNOpndAt(i, args.at(i)); + } + stmt->SetRetTyIdx(ret.GetTyIdx()); + return stmt; +} + +IcallNode *MIRBuilder::CreateStmtIcallAssigned(const MapleVector &args, PregIdx pregIdx) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icallassigned); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + nrets.emplace_back(StIdx(), RegFieldPair(0, pregIdx)); + stmt->SetNumOpnds(args.size()); + stmt->GetNopnd().resize(stmt->GetNumOpnds()); + stmt->SetReturnVec(nrets); + for (size_t i = 0; i < stmt->GetNopndSize(); ++i) { + stmt->SetNOpndAt(i, args.at(i)); + } + auto *preg = GetCurrentFunction()->GetPregTab()->PregFromPregIdx(pregIdx); + DEBUG_ASSERT(preg, "preg should be created before used"); + if (preg->GetMIRType() == nullptr) { + stmt->SetRetTyIdx(TyIdx(preg->GetPrimType())); + } else { + stmt->SetRetTyIdx(preg->GetMIRType()->GetTypeIndex()); + } + return stmt; +} + +IcallNode *MIRBuilder::CreateStmtIcallprotoAssigned(const MapleVector &args, const MIRSymbol &ret) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icallprotoassigned); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + CHECK_FATAL((ret.GetStorageClass() == kScAuto || ret.GetStorageClass() == kScFormal || + ret.GetStorageClass() == kScExtern || ret.GetStorageClass() == kScGlobal), + "unknown classtype! check it!"); + nrets.emplace_back(CallReturnPair(ret.GetStIdx(), RegFieldPair(0, 0))); + stmt->SetNumOpnds(args.size()); + stmt->GetNopnd().resize(stmt->GetNumOpnds()); + stmt->SetReturnVec(nrets); + for (size_t i = 0; i < stmt->GetNopndSize(); ++i) { + stmt->SetNOpndAt(i, args.at(i)); + } + stmt->SetRetTyIdx(ret.GetTyIdx()); + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtIntrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments, + TyIdx tyIdx) +{ + auto *stmt = GetCurrentFuncCodeMp()->New( + *GetCurrentFuncCodeMpAllocator(), tyIdx == 0u ? OP_intrinsiccall : OP_intrinsiccallwithtype, idx); + stmt->SetTyIdx(tyIdx); + stmt->SetOpnds(arguments); + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtXintrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments) +{ + auto *stmt = + GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_xintrinsiccall, idx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(arguments); + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCallAssigned(PUIdx puIdx, const MIRSymbol *ret, Opcode op) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op, puIdx); + if (ret) { + DEBUG_ASSERT(ret->IsLocal(), "Not Excepted ret"); + stmt->GetReturnVec().push_back(CallReturnPair(ret->GetStIdx(), RegFieldPair(0, 0))); + } + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCallAssigned(PUIdx puIdx, const MapleVector &args, const MIRSymbol *ret, + Opcode opcode, TyIdx tyIdx) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), opcode, puIdx, tyIdx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + if (ret != nullptr) { + DEBUG_ASSERT(ret->IsLocal(), "Not Excepted ret"); + stmt->GetReturnVec().push_back(CallReturnPair(ret->GetStIdx(), RegFieldPair(0, 0))); + } + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCallRegassigned(PUIdx puIdx, PregIdx pRegIdx, Opcode opcode, BaseNode *arg) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), opcode, puIdx); + stmt->GetNopnd().push_back(arg); + stmt->SetNumOpnds(stmt->GetNopndSize()); + if (pRegIdx > 0) { + stmt->GetReturnVec().push_back(CallReturnPair(StIdx(), RegFieldPair(0, pRegIdx))); + } + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCallRegassigned(PUIdx puIdx, const MapleVector &args, PregIdx pRegIdx, + Opcode opcode) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), opcode, puIdx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + if (pRegIdx > 0) { + stmt->GetReturnVec().push_back(CallReturnPair(StIdx(), RegFieldPair(0, pRegIdx))); + } + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtIntrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &args, + PregIdx retPregIdx) +{ + auto *stmt = + GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_intrinsiccallassigned, idx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + if (retPregIdx > 0) { + stmt->GetReturnVec().push_back(CallReturnPair(StIdx(), RegFieldPair(0, retPregIdx))); + } + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtIntrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &args, + const MIRSymbol *ret, TyIdx tyIdx) +{ + auto *stmt = GetCurrentFuncCodeMp()->New( + *GetCurrentFuncCodeMpAllocator(), tyIdx == 0u ? OP_intrinsiccallassigned : OP_intrinsiccallwithtypeassigned, + idx); + stmt->SetTyIdx(tyIdx); + stmt->SetOpnds(args); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + if (ret != nullptr) { + DEBUG_ASSERT(ret->IsLocal(), "Not Excepted ret"); + nrets.push_back(CallReturnPair(ret->GetStIdx(), RegFieldPair(0, 0))); + } + stmt->SetReturnVec(nrets); + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtXintrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &args, + const MIRSymbol *ret) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), + OP_xintrinsiccallassigned, idx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + if (ret != nullptr) { + DEBUG_ASSERT(ret->IsLocal(), "Not Excepted ret"); + nrets.push_back(CallReturnPair(ret->GetStIdx(), RegFieldPair(0, 0))); + } + stmt->SetReturnVec(nrets); + return stmt; +} + +NaryStmtNode *MIRBuilder::CreateStmtReturn(BaseNode *rVal) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_return); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->PushOpnd(rVal); + return stmt; +} + +NaryStmtNode *MIRBuilder::CreateStmtNary(Opcode op, const MapleVector &rVals) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(rVals); + return stmt; +} + +CallAssertBoundaryStmtNode *MIRBuilder::CreateStmtCallAssertBoundary(Opcode op, const MapleVector &rVals, + GStrIdx funcNameIdx, size_t paramIndex, + GStrIdx stmtFuncNameIdx) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op, + funcNameIdx, paramIndex, stmtFuncNameIdx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(rVals); + return stmt; +} + +NaryStmtNode *MIRBuilder::CreateStmtNary(Opcode op, BaseNode *rVal) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->PushOpnd(rVal); + return stmt; +} + +AssertNonnullStmtNode *MIRBuilder::CreateStmtAssertNonnull(Opcode op, BaseNode *rVal, GStrIdx funcNameIdx) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(op, funcNameIdx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetRHS(rVal); + return stmt; +} + +AssertBoundaryStmtNode *MIRBuilder::CreateStmtAssertBoundary(Opcode op, const MapleVector &rVals, + GStrIdx funcNameIdx) +{ + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op, funcNameIdx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(rVals); + return stmt; +} + +CallAssertNonnullStmtNode *MIRBuilder::CreateStmtCallAssertNonnull(Opcode op, BaseNode *rVal, GStrIdx callFuncNameIdx, + size_t paramIndex, GStrIdx stmtFuncNameIdx) +{ + auto *stmt = + GetCurrentFuncCodeMp()->New(op, callFuncNameIdx, paramIndex, stmtFuncNameIdx); + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetRHS(rVal); + return stmt; +} + +UnaryStmtNode *MIRBuilder::CreateStmtUnary(Opcode op, BaseNode *rVal) +{ + return GetCurrentFuncCodeMp()->New(op, kPtyInvalid, rVal); +} + +UnaryStmtNode *MIRBuilder::CreateStmtThrow(BaseNode *rVal) +{ + return CreateStmtUnary(OP_throw, rVal); +} + +IfStmtNode *MIRBuilder::CreateStmtIf(BaseNode *cond) +{ + auto *ifStmt = GetCurrentFuncCodeMp()->New(); + ifStmt->SetOpnd(cond, 0); + BlockNode *thenBlock = GetCurrentFuncCodeMp()->New(); + ifStmt->SetThenPart(thenBlock); + return ifStmt; +} + +IfStmtNode *MIRBuilder::CreateStmtIfThenElse(BaseNode *cond) +{ + auto *ifStmt = GetCurrentFuncCodeMp()->New(); + ifStmt->SetOpnd(cond, 0); + auto *thenBlock = GetCurrentFuncCodeMp()->New(); + ifStmt->SetThenPart(thenBlock); + auto *elseBlock = GetCurrentFuncCodeMp()->New(); + ifStmt->SetElsePart(elseBlock); + ifStmt->SetNumOpnds(3); + return ifStmt; +} + +DoloopNode *MIRBuilder::CreateStmtDoloop(StIdx doVarStIdx, bool isPReg, BaseNode *startExp, BaseNode *contExp, + BaseNode *incrExp) +{ + return GetCurrentFuncCodeMp()->New(doVarStIdx, isPReg, startExp, contExp, incrExp, + GetCurrentFuncCodeMp()->New()); +} + +SwitchNode *MIRBuilder::CreateStmtSwitch(BaseNode *opnd, LabelIdx defaultLabel, const CaseVector &switchTable) +{ + auto *switchNode = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), defaultLabel, opnd); + switchNode->SetSwitchTable(switchTable); + return switchNode; +} + +GotoNode *MIRBuilder::CreateStmtGoto(Opcode o, LabelIdx labIdx) +{ + return GetCurrentFuncCodeMp()->New(o, labIdx); +} + +JsTryNode *MIRBuilder::CreateStmtJsTry(Opcode, LabelIdx cLabIdx, LabelIdx fLabIdx) +{ + return GetCurrentFuncCodeMp()->New(static_cast(cLabIdx), static_cast(fLabIdx)); +} + +TryNode *MIRBuilder::CreateStmtTry(const MapleVector &cLabIdxs) +{ + return GetCurrentFuncCodeMp()->New(cLabIdxs); +} + +CatchNode *MIRBuilder::CreateStmtCatch(const MapleVector &tyIdxVec) +{ + return GetCurrentFuncCodeMp()->New(tyIdxVec); +} + +LabelNode *MIRBuilder::CreateStmtLabel(LabelIdx labIdx) +{ + return GetCurrentFuncCodeMp()->New(labIdx); +} + +StmtNode *MIRBuilder::CreateStmtComment(const std::string &cmnt) +{ + return GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), cmnt); +} + +AddrofNode *MIRBuilder::CreateAddrof(const MIRSymbol &st, PrimType pty) +{ + return GetCurrentFuncCodeMp()->New(OP_addrof, pty, st.GetStIdx(), 0); +} + +AddrofNode *MIRBuilder::CreateDread(const MIRSymbol &st, PrimType pty) +{ + return GetCurrentFuncCodeMp()->New(OP_dread, pty, st.GetStIdx(), 0); +} + +CondGotoNode *MIRBuilder::CreateStmtCondGoto(BaseNode *cond, Opcode op, LabelIdx labIdx) +{ + return GetCurrentFuncCodeMp()->New(op, labIdx, cond); +} + +LabelIdx MIRBuilder::GetOrCreateMIRLabel(const std::string &name) +{ + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + MIRFunction *currentFunctionInner = GetCurrentFunctionNotNull(); + LabelIdx lableIdx = currentFunctionInner->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (lableIdx == 0) { + lableIdx = currentFunctionInner->GetLabelTab()->CreateLabel(); + currentFunctionInner->GetLabelTab()->SetSymbolFromStIdx(lableIdx, strIdx); + currentFunctionInner->GetLabelTab()->AddToStringLabelMap(lableIdx); + } + return lableIdx; +} + +LabelIdx MIRBuilder::CreateLabIdx(MIRFunction &mirFunc) +{ + LabelIdx lableIdx = mirFunc.GetLabelTab()->CreateLabel(); + mirFunc.GetLabelTab()->AddToStringLabelMap(lableIdx); + return lableIdx; +} + +void MIRBuilder::AddStmtInCurrentFunctionBody(StmtNode &stmt) +{ + MIRFunction *fun = GetCurrentFunctionNotNull(); + stmt.GetSrcPos().CondSetLineNum(lineNum); + fun->GetBody()->AddStatement(&stmt); +} + +MemPool *MIRBuilder::GetCurrentFuncCodeMp() +{ + if (MIRFunction *curFunction = GetCurrentFunction()) { + return curFunction->GetCodeMemPool(); + } + return mirModule->GetMemPool(); +} + +MapleAllocator *MIRBuilder::GetCurrentFuncCodeMpAllocator() +{ + if (MIRFunction *curFunction = GetCurrentFunction()) { + return &curFunction->GetCodeMPAllocator(); + } + return &mirModule->GetMPAllocator(); +} + +MemPool *MIRBuilder::GetCurrentFuncDataMp() +{ + if (MIRFunction *curFunction = GetCurrentFunction()) { + return curFunction->GetDataMemPool(); + } + return mirModule->GetMemPool(); +} + +MIRBuilderExt::MIRBuilderExt(MIRModule *module, pthread_mutex_t *mutex) : MIRBuilder(module), mutex(mutex) {} + +MemPool *MIRBuilderExt::GetCurrentFuncCodeMp() +{ + DEBUG_ASSERT(curFunction, "curFunction is null"); + return curFunction->GetCodeMemPool(); +} + +MapleAllocator *MIRBuilderExt::GetCurrentFuncCodeMpAllocator() +{ + DEBUG_ASSERT(curFunction, "curFunction is null"); + return &curFunction->GetCodeMemPoolAllocator(); +} + +void MIRBuilderExt::GlobalLock() +{ + if (mutex) { + DEBUG_ASSERT(pthread_mutex_lock(mutex) == 0, "lock failed"); + } +} + +void MIRBuilderExt::GlobalUnlock() +{ + if (mutex) { + DEBUG_ASSERT(pthread_mutex_unlock(mutex) == 0, "unlock failed"); + } +} +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/mir_const.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/mir_const.cpp new file mode 100644 index 0000000000000000000000000000000000000000..587fc91a2c372892a8eb1f295cc637d3e26c61f9 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/mir_const.cpp @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mir_const.h" +#include "mir_function.h" +#include "global_tables.h" +#include "printing.h" +#if MIR_FEATURE_FULL + +namespace maple { +void MIRIntConst::Dump(const MIRSymbolTable *) const +{ + LogInfo::MapleLogger() << value; +} + +bool MIRIntConst::operator==(const MIRConst &rhs) const +{ + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &intConst = static_cast(rhs); + return ((&intConst.GetType() == &GetType()) && (intConst.value == value)); +} + +uint8 MIRIntConst::GetActualBitWidth() const +{ + if (value == 0) { + return 1; + } + + int64 val = GetExtValue(); + uint64 tmp = val < 0 ? -(val + 1) : val; + + uint8 width = 0; + while (tmp != 0) { + ++width; + tmp = tmp >> 1u; + } + + return width; +} + +void MIRAddrofConst::Dump(const MIRSymbolTable *localSymTab) const +{ + LogInfo::MapleLogger() << "addrof " << GetPrimTypeName(PTY_ptr); + const MIRSymbol *sym = stIdx.IsGlobal() ? GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()) + : localSymTab->GetSymbolFromStIdx(stIdx.Idx()); + DEBUG_ASSERT(stIdx.IsGlobal() || sym->GetStorageClass() == kScPstatic || sym->GetStorageClass() == kScFstatic, + "MIRAddrofConst can only point to a global symbol"); + LogInfo::MapleLogger() << (stIdx.IsGlobal() ? " $" : " %") << sym->GetName(); + if (fldID > 0) { + LogInfo::MapleLogger() << " " << fldID; + } + if (offset != 0) { + LogInfo::MapleLogger() << " (" << offset << ")"; + } +} + +bool MIRAddrofConst::operator==(const MIRConst &rhs) const +{ + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &rhsA = static_cast(rhs); + if (&GetType() != &rhs.GetType()) { + return false; + } + return (stIdx == rhsA.stIdx) && (fldID == rhsA.fldID); +} + +void MIRAddroffuncConst::Dump(const MIRSymbolTable *) const +{ + LogInfo::MapleLogger() << "addroffunc " << GetPrimTypeName(PTY_ptr); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + LogInfo::MapleLogger() << " &" + << GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx())->GetName(); +} + +bool MIRAddroffuncConst::operator==(const MIRConst &rhs) const +{ + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &rhsAf = static_cast(rhs); + return (&GetType() == &rhs.GetType()) && (puIdx == rhsAf.puIdx); +} + +void MIRLblConst::Dump(const MIRSymbolTable *) const +{ + LogInfo::MapleLogger() << "addroflabel " << GetPrimTypeName(PTY_ptr); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + LogInfo::MapleLogger() << " @" << func->GetLabelName(value); +} + +bool MIRLblConst::operator==(const MIRConst &rhs) const +{ + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &lblConst = static_cast(rhs); + return (lblConst.value == value); +} + +bool MIRFloatConst::operator==(const MIRConst &rhs) const +{ + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &floatConst = static_cast(rhs); + if (std::isnan(floatConst.value.floatValue)) { + return std::isnan(value.floatValue); + } + if (std::isnan(value.floatValue)) { + return std::isnan(floatConst.value.floatValue); + } + if (floatConst.value.floatValue == 0.0 && value.floatValue == 0.0) { + return floatConst.IsNeg() == IsNeg(); + } + // Use bitwise comparison instead of approximate comparison for FP to avoid treating 0.0 and FLT_MIN as equal + return (floatConst.value.intValue == value.intValue); +} + +bool MIRDoubleConst::operator==(const MIRConst &rhs) const +{ + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &floatConst = static_cast(rhs); + if (std::isnan(floatConst.value.dValue)) { + return std::isnan(value.dValue); + } + if (std::isnan(value.dValue)) { + return std::isnan(floatConst.value.dValue); + } + if (floatConst.value.dValue == 0.0 && value.dValue == 0.0) { + return floatConst.IsNeg() == IsNeg(); + } + // Use bitwise comparison instead of approximate comparison for FP to avoid treating 0.0 and DBL_MIN as equal + return (floatConst.value.intValue == value.intValue); +} + +bool MIRFloat128Const::operator==(const MIRConst &rhs) const +{ + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &floatConst = static_cast(rhs); + if ((value[0] == floatConst.value[0]) && (value[1] == floatConst.value[1])) { + return true; + } + return false; +} + +bool MIRAggConst::operator==(const MIRConst &rhs) const +{ + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &aggregateConst = static_cast(rhs); + if (aggregateConst.constVec.size() != constVec.size()) { + return false; + } + for (size_t i = 0; i < constVec.size(); ++i) { + if (!(*aggregateConst.constVec[i] == *constVec[i])) { + return false; + } + } + return true; +} + +void MIRFloatConst::Dump(const MIRSymbolTable *) const +{ + LogInfo::MapleLogger() << std::setprecision(std::numeric_limits::max_digits10) << value.floatValue << "f"; +} + +void MIRDoubleConst::Dump(const MIRSymbolTable *) const +{ + LogInfo::MapleLogger() << std::setprecision(std::numeric_limits::max_digits10) << value.dValue; +} + +void MIRFloat128Const::Dump(const MIRSymbolTable *) const +{ + constexpr int fieldWidth = 16; + std::ios::fmtflags f(LogInfo::MapleLogger().flags()); + LogInfo::MapleLogger().setf(std::ios::uppercase); + LogInfo::MapleLogger() << "0xL" << std::hex << std::setfill('0') << std::setw(fieldWidth) << value[0] + << std::setfill('0') << std::setw(fieldWidth) << value[1]; + LogInfo::MapleLogger().flags(f); +} + +void MIRAggConst::Dump(const MIRSymbolTable *localSymTab) const +{ + LogInfo::MapleLogger() << "["; + size_t size = constVec.size(); + for (size_t i = 0; i < size; ++i) { + if (fieldIdVec[i] != 0) { + LogInfo::MapleLogger() << fieldIdVec[i] << "= "; + } + constVec[i]->Dump(localSymTab); + if (i != size - 1) { + LogInfo::MapleLogger() << ", "; + } + } + LogInfo::MapleLogger() << "]"; +} + +MIRStrConst::MIRStrConst(const std::string &str, MIRType &type) + : MIRConst(type, kConstStrConst), value(GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(str)) +{ +} + +void MIRStrConst::Dump(const MIRSymbolTable *) const +{ + LogInfo::MapleLogger() << "conststr " << GetPrimTypeName(GetType().GetPrimType()); + const std::string &dumpStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(value); + PrintString(dumpStr); +} + +bool MIRStrConst::operator==(const MIRConst &rhs) const +{ + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &rhsCs = static_cast(rhs); + return (&rhs.GetType() == &GetType()) && (value == rhsCs.value); +} + +MIRStr16Const::MIRStr16Const(const std::u16string &str, MIRType &type) + : MIRConst(type, kConstStr16Const), value(GlobalTables::GetU16StrTable().GetOrCreateStrIdxFromName(str)) +{ +} + +void MIRStr16Const::Dump(const MIRSymbolTable *) const +{ + LogInfo::MapleLogger() << "conststr16 " << GetPrimTypeName(GetType().GetPrimType()); + std::u16string str16 = GlobalTables::GetU16StrTable().GetStringFromStrIdx(value); + // UTF-16 string are dumped as UTF-8 string in mpl to keep the printable chars in ascii form + std::string str; + (void)namemangler::UTF16ToUTF8(str, str16); + PrintString(str); +} + +bool MIRStr16Const::operator==(const MIRConst &rhs) const +{ + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &rhsCs = static_cast(rhs); + return (&GetType() == &rhs.GetType()) && (value == rhsCs.value); +} + +bool IsDivSafe(const MIRIntConst ÷nd, const MIRIntConst &divisor, PrimType pType) +{ + if (IsUnsignedInteger(pType)) { + return divisor.GetValue() != 0; + } + + return divisor.GetValue() != 0 && (!dividend.GetValue().IsMinValue() || !divisor.GetValue().AreAllBitsOne()); +} + +} // namespace maple +#endif // MIR_FEATURE_FULL diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/mir_function.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/mir_function.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4e95c7562b76df6390d8f3a8707913573a62a7c4 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/mir_function.cpp @@ -0,0 +1,765 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mir_function.h" +#include +#include +#include "mir_nodes.h" +#include "printing.h" +#include "string_utils.h" + +namespace { +using namespace maple; +enum FuncProp : uint32_t { + kFuncPropHasCall = 1U, // the function has call + kFuncPropRetStruct = 1U << 1, // the function returns struct + kFuncPropUserFunc = 1U << 2, // the function is a user func + kFuncPropInfoPrinted = 1U << 3, // to avoid printing frameSize/moduleid/funcSize info more + // than once per function since they + // can only be printed at the beginning of a block + kFuncPropNeverReturn = 1U << 4, // the function when called never returns + kFuncPropHasSetjmp = 1U << 5, // the function contains call to setjmp + kFuncPropHasAsm = 1U << 6, // the function has use of inline asm + kFuncPropStructReturnedInRegs = 1U << 7, // the function returns struct in registers +}; +} // namespace + +namespace maple { +const MIRSymbol *MIRFunction::GetFuncSymbol() const +{ + return GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolTableIdx.Idx()); +} +MIRSymbol *MIRFunction::GetFuncSymbol() +{ + const MIRFunction *mirFunc = const_cast(this); + DEBUG_ASSERT(mirFunc != nullptr, "null ptr check"); + return const_cast(mirFunc->GetFuncSymbol()); +} + +const std::string &MIRFunction::GetName() const +{ + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolTableIdx.Idx()); + DEBUG_ASSERT(mirSymbol != nullptr, "null ptr check"); + return mirSymbol->GetName(); +} + +GStrIdx MIRFunction::GetNameStrIdx() const +{ + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolTableIdx.Idx()); + DEBUG_ASSERT(mirSymbol != nullptr, "null ptr check"); + return mirSymbol->GetNameStrIdx(); +} + +const std::string &MIRFunction::GetBaseClassName() const +{ + return GlobalTables::GetStrTable().GetStringFromStrIdx(baseClassStrIdx); +} + +const std::string &MIRFunction::GetBaseFuncName() const +{ + return GlobalTables::GetStrTable().GetStringFromStrIdx(baseFuncStrIdx); +} + +const std::string &MIRFunction::GetBaseFuncNameWithType() const +{ + return GlobalTables::GetStrTable().GetStringFromStrIdx(baseFuncWithTypeStrIdx); +} + +const std::string &MIRFunction::GetBaseFuncSig() const +{ + return GlobalTables::GetStrTable().GetStringFromStrIdx(baseFuncSigStrIdx); +} + +const std::string &MIRFunction::GetSignature() const +{ + return GlobalTables::GetStrTable().GetStringFromStrIdx(signatureStrIdx); +} + +const MIRType *MIRFunction::GetReturnType() const +{ + CHECK_FATAL(funcType != nullptr, "funcType should not be nullptr"); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx()); +} +MIRType *MIRFunction::GetReturnType() +{ + return const_cast(const_cast(this)->GetReturnType()); +} +const MIRType *MIRFunction::GetClassType() const +{ + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(classTyIdx); +} +const MIRType *MIRFunction::GetNthParamType(size_t i) const +{ + CHECK_FATAL(funcType != nullptr, "funcType should not be nullptr"); + DEBUG_ASSERT(i < funcType->GetParamTypeList().size(), "array index out of range"); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetParamTypeList()[i]); +} +MIRType *MIRFunction::GetNthParamType(size_t i) +{ + return const_cast(const_cast(this)->GetNthParamType(i)); +} + +// reconstruct formals, and return a new MIRFuncType +MIRFuncType *MIRFunction::ReconstructFormals(const std::vector &symbols, bool clearOldArgs) +{ + auto *newFuncType = static_cast(funcType->CopyMIRTypeNode()); + if (clearOldArgs) { + formalDefVec.clear(); + newFuncType->GetParamTypeList().clear(); + newFuncType->GetParamAttrsList().clear(); + } + for (auto *symbol : symbols) { + FormalDef formalDef(symbol->GetNameStrIdx(), symbol, symbol->GetTyIdx(), symbol->GetAttrs()); + formalDefVec.push_back(formalDef); + newFuncType->GetParamTypeList().push_back(symbol->GetTyIdx()); + newFuncType->GetParamAttrsList().push_back(symbol->GetAttrs()); + } + return newFuncType; +} + +void MIRFunction::UpdateFuncTypeAndFormals(const std::vector &symbols, bool clearOldArgs) +{ + auto *newFuncType = ReconstructFormals(symbols, clearOldArgs); + auto newFuncTypeIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(newFuncType); + funcType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(newFuncTypeIdx)); + delete newFuncType; +} + +void MIRFunction::UpdateFuncTypeAndFormalsAndReturnType(const std::vector &symbols, const TyIdx &retTyIdx, + bool clearOldArgs) +{ + auto *newFuncType = ReconstructFormals(symbols, clearOldArgs); + newFuncType->SetRetTyIdx(retTyIdx); + auto newFuncTypeIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(newFuncType); + funcType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(newFuncTypeIdx)); + delete newFuncType; +} + +LabelIdx MIRFunction::GetOrCreateLableIdxFromName(const std::string &name) +{ + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + LabelIdx labelIdx = GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labelIdx == 0) { + labelIdx = GetLabelTab()->CreateLabel(); + GetLabelTab()->SetSymbolFromStIdx(labelIdx, strIdx); + GetLabelTab()->AddToStringLabelMap(labelIdx); + } + return labelIdx; +} + +bool MIRFunction::HasCall() const +{ + return flag & kFuncPropHasCall; +} +void MIRFunction::SetHasCall() +{ + flag |= kFuncPropHasCall; +} + +bool MIRFunction::IsReturnStruct() const +{ + return flag & kFuncPropRetStruct; +} +void MIRFunction::SetReturnStruct() +{ + flag |= kFuncPropRetStruct; +} +void MIRFunction::SetReturnStruct(const MIRType &retType) +{ + if (retType.IsStructType()) { + flag |= kFuncPropRetStruct; + } +} +void MIRFunction::SetReturnStruct(const MIRType *retType) +{ + switch (retType->GetKind()) { + case kTypeUnion: + case kTypeStruct: + case kTypeStructIncomplete: + case kTypeClass: + case kTypeClassIncomplete: + case kTypeInterface: + case kTypeInterfaceIncomplete: + flag |= kFuncPropRetStruct; + break; + default:; + } +} + +bool MIRFunction::IsUserFunc() const +{ + return flag & kFuncPropUserFunc; +} +void MIRFunction::SetUserFunc() +{ + flag |= kFuncPropUserFunc; +} + +bool MIRFunction::IsInfoPrinted() const +{ + return flag & kFuncPropInfoPrinted; +} +void MIRFunction::SetInfoPrinted() +{ + flag |= kFuncPropInfoPrinted; +} +void MIRFunction::ResetInfoPrinted() +{ + flag &= ~kFuncPropInfoPrinted; +} + +void MIRFunction::SetNoReturn() +{ + flag |= kFuncPropNeverReturn; +} +bool MIRFunction::NeverReturns() const +{ + return flag & kFuncPropNeverReturn; +} + +void MIRFunction::SetHasSetjmp() +{ + flag |= kFuncPropHasSetjmp; +} + +bool MIRFunction::HasSetjmp() const +{ + return ((flag & kFuncPropHasSetjmp) != kTypeflagZero); +} + +void MIRFunction::SetHasAsm() +{ + flag |= kFuncPropHasAsm; +} + +bool MIRFunction::HasAsm() const +{ + return ((flag & kFuncPropHasAsm) != kTypeflagZero); +} + +void MIRFunction::SetStructReturnedInRegs() +{ + flag |= kFuncPropStructReturnedInRegs; +} + +bool MIRFunction::StructReturnedInRegs() const +{ + return ((flag & kFuncPropStructReturnedInRegs) != kTypeflagZero); +} + +void MIRFunction::SetAttrsFromSe(uint8 specialEffect) +{ + // NoPrivateDefEffect + if ((specialEffect & kDefEffect) == kDefEffect) { + funcAttrs.SetAttr(FUNCATTR_noprivate_defeffect); + } + // NoPrivateUseEffect + if ((specialEffect & kUseEffect) == kUseEffect) { + funcAttrs.SetAttr(FUNCATTR_noretarg); + } + // IpaSeen + if ((specialEffect & kIpaSeen) == kIpaSeen) { + funcAttrs.SetAttr(FUNCATTR_ipaseen); + } + // Pure + if ((specialEffect & kPureFunc) == kPureFunc) { + funcAttrs.SetAttr(FUNCATTR_pure); + } + // NoDefArgEffect + if ((specialEffect & kNoDefArgEffect) == kNoDefArgEffect) { + funcAttrs.SetAttr(FUNCATTR_nodefargeffect); + } + // NoDefEffect + if ((specialEffect & kNoDefEffect) == kNoDefEffect) { + funcAttrs.SetAttr(FUNCATTR_nodefeffect); + } + // NoRetNewlyAllocObj + if ((specialEffect & kNoRetNewlyAllocObj) == kNoRetNewlyAllocObj) { + funcAttrs.SetAttr(FUNCATTR_noretglobal); + } + // NoThrowException + if ((specialEffect & kNoThrowException) == kNoThrowException) { + funcAttrs.SetAttr(FUNCATTR_nothrow_exception); + } +} + +void FuncAttrs::DumpAttributes() const +{ +// parse no content of attr +#define STRING(s) #s +#define FUNC_ATTR +#define NOCONTENT_ATTR +#define ATTR(AT) \ + if (GetAttr(FUNCATTR_##AT)) { \ + LogInfo::MapleLogger() << " " << STRING(AT); \ + } +#include "all_attributes.def" +#undef ATTR +#undef NOCONTENT_ATTR +#undef FUNC_ATTR + // parse content of attr + if (GetAttr(FUNCATTR_alias) && !GetAliasFuncName().empty()) { + LogInfo::MapleLogger() << " alias ( \"" << GetAliasFuncName() << "\" )"; + } + if (GetAttr(FUNCATTR_constructor_priority) && GetConstructorPriority() != -1) { + LogInfo::MapleLogger() << " constructor_priority ( " << GetConstructorPriority() << " )"; + } + if (GetAttr(FUNCATTR_destructor_priority) && GetDestructorPriority() != -1) { + LogInfo::MapleLogger() << " destructor_priority ( " << GetDestructorPriority() << " )"; + } + if (GetAttr(FUNCATTR_frame_pointer) && !framePointer.empty()) { + LogInfo::MapleLogger() << " frame-pointer ( " << framePointer << " )"; + } + if (GetAttr(FUNCATTR_frame_reserved_slots) && GetFrameResverdSlot() != 0) { + LogInfo::MapleLogger() << " frame-reserved-slots ( " << GetFrameResverdSlot() << " )"; + } +} + +void MIRFunction::DumpFlavorLoweredThanMmpl() const +{ + LogInfo::MapleLogger() << " ("; + + // Dump arguments + bool hasPrintedFormal = false; + for (uint32 i = 0; i < formalDefVec.size(); i++) { + MIRSymbol *symbol = formalDefVec[i].formalSym; + if (symbol == nullptr && + (formalDefVec[i].formalStrIdx.GetIdx() == 0 || + GlobalTables::GetStrTable().GetStringFromStrIdx(formalDefVec[i].formalStrIdx).empty())) { + break; + } + hasPrintedFormal = true; + if (symbol == nullptr) { + LogInfo::MapleLogger() << "var %" + << GlobalTables::GetStrTable().GetStringFromStrIdx(formalDefVec[i].formalStrIdx) + << " "; + } else { + if (symbol->GetSKind() != kStPreg) { + LogInfo::MapleLogger() << "var %" << symbol->GetName() << " "; + } else { + LogInfo::MapleLogger() << "reg %" << symbol->GetPreg()->GetPregNo() << " "; + } + } + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(formalDefVec[i].formalTyIdx); + constexpr uint8 indent = 2; + ty->Dump(indent); + if (symbol != nullptr) { + symbol->GetAttrs().DumpAttributes(); + } else { + formalDefVec[i].formalAttrs.DumpAttributes(); + } + if (i != (formalDefVec.size() - 1)) { + LogInfo::MapleLogger() << ", "; + } + } + if (IsVarargs()) { + if (!hasPrintedFormal) { + LogInfo::MapleLogger() << "..."; + } else { + LogInfo::MapleLogger() << ", ..."; + } + } + + LogInfo::MapleLogger() << ") "; + GetReturnType()->Dump(1); +} + +void MIRFunction::Dump(bool withoutBody) +{ + // skip the functions that are added during process methods in + // class and interface decls. these has nothing in formals + // they do have paramtypelist_. this can not skip ones without args + // but for them at least the func decls are valid + if ((module->IsJavaModule() && GetParamSize() != formalDefVec.size()) || GetAttr(FUNCATTR_optimized)) { + return; + } + + // save the module's curFunction and set it to the one currently Dump()ing + MIRFunction *savedFunc = module->CurFunction(); + module->SetCurFunction(this); + + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolTableIdx.Idx()); + DEBUG_ASSERT(symbol != nullptr, "symbol MIRSymbol is null"); + if (!withoutBody) { + symbol->GetSrcPosition().DumpLoc(MIRSymbol::LastPrintedLineNumRef(), MIRSymbol::LastPrintedColumnNumRef()); + } + LogInfo::MapleLogger() << "func " + << "&" << symbol->GetName(); + theMIRModule = module; + funcAttrs.DumpAttributes(); + + if (symbol->GetWeakrefAttr().first) { + LogInfo::MapleLogger() << " weakref"; + if (symbol->GetWeakrefAttr().second != UStrIdx(0)) { + LogInfo::MapleLogger() << " ("; + PrintString(GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->GetWeakrefAttr().second)); + LogInfo::MapleLogger() << " )"; + } + } + + if (symbol->sectionAttr != UStrIdx(0)) { + LogInfo::MapleLogger() << " section ("; + PrintString(GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->sectionAttr)); + LogInfo::MapleLogger() << " )"; + } + + if (module->GetFlavor() != kMmpl) { + DumpFlavorLoweredThanMmpl(); + } + + // codeMemPool is nullptr, means maple_ir has been released for memory's sake + if (codeMemPool == nullptr) { + LogInfo::MapleLogger() << '\n'; + } else if (GetBody() != nullptr && !withoutBody && symbol->GetStorageClass() != kScExtern) { + ResetInfoPrinted(); // this ensures funcinfo will be printed + GetBody()->Dump(0, module->GetFlavor() == kMmpl ? nullptr : GetSymTab(), + module->GetFlavor() < kMmpl ? GetPregTab() : nullptr, false, true, + module->GetFlavor()); // Dump body + } else { + LogInfo::MapleLogger() << '\n'; + } + + // restore the curFunction + module->SetCurFunction(savedFunc); +} + +void MIRFunction::DumpUpFormal(int32 indent) const +{ + PrintIndentation(indent + 1); + + LogInfo::MapleLogger() << "upformalsize " << GetUpFormalSize() << '\n'; + if (localWordsTypeTagged != nullptr) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "formalWordsTypeTagged = [ "; + const auto *p = reinterpret_cast(localWordsTypeTagged); + LogInfo::MapleLogger() << std::hex; + while (p < + reinterpret_cast(localWordsTypeTagged + BlockSize2BitVectorSize(GetUpFormalSize()))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + + if (formalWordsRefCounted != nullptr) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "formalWordsRefCounted = [ "; + const uint32 *p = reinterpret_cast(formalWordsRefCounted); + LogInfo::MapleLogger() << std::hex; + while (p < + reinterpret_cast(formalWordsRefCounted + BlockSize2BitVectorSize(GetUpFormalSize()))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } +} + +void MIRFunction::DumpFrame(int32 indent) const +{ + PrintIndentation(indent + 1); + + LogInfo::MapleLogger() << "framesize " << static_cast(GetFrameSize()) << '\n'; + if (localWordsTypeTagged != nullptr) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "localWordsTypeTagged = [ "; + const uint32 *p = reinterpret_cast(localWordsTypeTagged); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(localWordsTypeTagged + BlockSize2BitVectorSize(GetFrameSize()))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + + if (localWordsRefCounted != nullptr) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "localWordsRefCounted = [ "; + const uint32 *p = reinterpret_cast(localWordsRefCounted); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(localWordsRefCounted + BlockSize2BitVectorSize(GetFrameSize()))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } +} + +void MIRFunction::DumpScope() +{ + scope->Dump(0); +} + +void MIRFunction::DumpFuncBody(int32 indent) +{ + LogInfo::MapleLogger() << " funcid " << GetPuidxOrigin() << '\n'; + + if (IsInfoPrinted()) { + return; + } + + SetInfoPrinted(); + + if (GetUpFormalSize() > 0) { + DumpUpFormal(indent); + } + + if (GetFrameSize() > 0) { + DumpFrame(indent); + } + + if (GetOutParmSize() > 0) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "outparmsize " << GetOutParmSize() << '\n'; + } + + if (GetModuleId() > 0) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "moduleID " << static_cast(GetModuleId()) << '\n'; + } + + if (GetFuncSize() > 0) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "funcSize " << GetFuncSize() << '\n'; + } + + if (GetInfoVector().empty()) { + return; + } + + const MIRInfoVector &funcInfo = GetInfoVector(); + const MapleVector &funcInfoIsString = InfoIsString(); + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "funcinfo {\n"; + size_t size = funcInfo.size(); + constexpr int kIndentOffset = 2; + for (size_t i = 0; i < size; ++i) { + PrintIndentation(indent + kIndentOffset); + LogInfo::MapleLogger() << "@" << GlobalTables::GetStrTable().GetStringFromStrIdx(funcInfo[i].first) << " "; + if (!funcInfoIsString[i]) { + LogInfo::MapleLogger() << funcInfo[i].second; + } else { + LogInfo::MapleLogger() << "\"" + << GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(funcInfo[i].second)) + << "\""; + } + if (i < size - 1) { + LogInfo::MapleLogger() << ",\n"; + } else { + LogInfo::MapleLogger() << "}\n"; + } + } + LogInfo::MapleLogger() << '\n'; +} + +bool MIRFunction::IsEmpty() const +{ + return (body == nullptr || body->IsEmpty()); +} + +bool MIRFunction::IsClinit() const +{ + const std::string clinitPostfix = "_7C_3Cclinit_3E_7C_28_29V"; + const std::string &funcName = this->GetName(); + // this does not work for smali files like test/511-clinit-interface/smali/BogusInterface.smali, + // which is decorated without "constructor". + return StringUtils::EndsWith(funcName, clinitPostfix); +} + +uint32 MIRFunction::GetInfo(GStrIdx strIdx) const +{ + for (const auto &item : info) { + if (item.first == strIdx) { + return item.second; + } + } + DEBUG_ASSERT(false, "get info error"); + return 0; +} + +uint32 MIRFunction::GetInfo(const std::string &string) const +{ + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(string); + return GetInfo(strIdx); +} + +void MIRFunction::OverrideBaseClassFuncNames(GStrIdx strIdx) +{ + baseClassStrIdx.reset(); + baseFuncStrIdx.reset(); + SetBaseClassFuncNames(strIdx); +} + +// there are two ways to represent the delimiter: '|' or "_7C" +// where 7C is the ascii value of char '|' in hex +void MIRFunction::SetBaseClassFuncNames(GStrIdx strIdx) +{ + if (baseClassStrIdx != 0u || baseFuncStrIdx != 0u) { + return; + } + const std::string name = GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx); + std::string delimiter = "|"; + uint32 width = 1; // delimiter width + size_t pos = name.find(delimiter); + if (pos == std::string::npos) { + delimiter = namemangler::kNameSplitterStr; + width = 3; // delimiter width + pos = name.find(delimiter); + // make sure it is not __7C, but ___7C ok + while (pos != std::string::npos && (name[pos - 1] == '_' && name[pos - 2] != '_')) { + pos = name.find(delimiter, pos + width); + } + } + if (pos != std::string::npos && pos > 0) { + const std::string className = name.substr(0, pos); + baseClassStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(className); + std::string funcNameWithType = name.substr(pos + width, name.length() - pos - width); + baseFuncWithTypeStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcNameWithType); + size_t index = name.find(namemangler::kRightBracketStr); + if (index != std::string::npos) { + size_t posEnd = index + (std::string(namemangler::kRightBracketStr)).length(); + funcNameWithType = name.substr(pos + width, posEnd - pos - width); + } + baseFuncSigStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcNameWithType); + size_t newPos = name.find(delimiter, pos + width); + while (newPos != std::string::npos && (name[newPos - 1] == '_' && name[newPos - 2] != '_')) { + newPos = name.find(delimiter, newPos + width); + } + if (newPos != 0) { + std::string funcName = name.substr(pos + width, newPos - pos - width); + baseFuncStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + std::string signature = name.substr(newPos + width, name.length() - newPos - width); + signatureStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(signature); + } + return; + } + baseFuncStrIdx = strIdx; +} + +const MIRSymbol *MIRFunction::GetLocalOrGlobalSymbol(const StIdx &idx, bool checkFirst) const +{ + return idx.Islocal() ? GetSymbolTabItem(idx.Idx(), checkFirst) + : GlobalTables::GetGsymTable().GetSymbolFromStidx(idx.Idx(), checkFirst); +} +MIRSymbol *MIRFunction::GetLocalOrGlobalSymbol(const StIdx &idx, bool checkFirst) +{ + return const_cast(const_cast(this)->GetLocalOrGlobalSymbol(idx, checkFirst)); +} + +const MIRType *MIRFunction::GetNodeType(const BaseNode &node) const +{ + if (node.GetOpCode() == OP_dread) { + const MIRSymbol *sym = GetLocalOrGlobalSymbol(static_cast(node).GetStIdx()); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + } + if (node.GetOpCode() == OP_regread) { + const auto &nodeReg = static_cast(node); + const MIRPreg *pReg = GetPregTab()->PregFromPregIdx(nodeReg.GetRegIdx()); + if (pReg->GetPrimType() == PTY_ref) { + return pReg->GetMIRType(); + } + } + return nullptr; +} + +void MIRFunction::EnterFormals() +{ + for (auto &formalDef : formalDefVec) { + formalDef.formalSym = symTab->CreateSymbol(kScopeLocal); + formalDef.formalSym->SetStorageClass(kScFormal); + formalDef.formalSym->SetNameStrIdx(formalDef.formalStrIdx); + formalDef.formalSym->SetTyIdx(formalDef.formalTyIdx); + formalDef.formalSym->SetAttrs(formalDef.formalAttrs); + const std::string &formalName = GlobalTables::GetStrTable().GetStringFromStrIdx(formalDef.formalStrIdx); + if (!isdigit(formalName.front())) { + formalDef.formalSym->SetSKind(kStVar); + (void)symTab->AddToStringSymbolMap(*formalDef.formalSym); + } else { + formalDef.formalSym->SetSKind(kStPreg); + uint32 thepregno = static_cast(std::stoi(formalName)); + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(formalDef.formalTyIdx); + PrimType pType = mirType->GetPrimType(); + // if mirType info is not needed, set mirType to nullptr + if (pType != PTY_ref && pType != PTY_ptr) { + mirType = nullptr; + } else if (pType == PTY_ptr && mirType->IsMIRPtrType()) { + MIRType *pointedType = static_cast(mirType)->GetPointedType(); + if (pointedType == nullptr || pointedType->GetKind() != kTypeFunction) { + mirType = nullptr; + } + } + PregIdx pregIdx = pregTab->EnterPregNo(thepregno, pType, mirType); + MIRPreg *preg = pregTab->PregFromPregIdx(pregIdx); + formalDef.formalSym->SetPreg(preg); + } + } +} + +void MIRFunction::NewBody() +{ + codeMemPool = GetCodeMemPool(); + SetBody(codeMemPool->New()); + SetLastPosBody(codeMemPool->New()); + // If mir_function.has been seen as a declaration, its symtab has to be moved + // from module mempool to function mempool. + MIRSymbolTable *oldSymTable = GetSymTab(); + MIRPregTable *oldPregTable = GetPregTab(); + MIRTypeNameTable *oldTypeNameTable = typeNameTab; + MIRLabelTable *oldLabelTable = GetLabelTab(); + symTab = module->GetMemPool()->New(module->GetMPAllocator()); + pregTab = module->GetMemPool()->New(&module->GetMPAllocator()); + typeNameTab = module->GetMemPool()->New(module->GetMPAllocator()); + labelTab = module->GetMemPool()->New(module->GetMPAllocator()); + + if (oldSymTable == nullptr) { + // formals not yet entered into symTab; enter them now + EnterFormals(); + } else { + for (size_t i = 1; i < oldSymTable->GetSymbolTableSize(); ++i) { + (void)GetSymTab()->AddStOutside(oldSymTable->GetSymbolFromStIdx(i)); + } + } + if (oldPregTable != nullptr) { + for (size_t i = 1; i < oldPregTable->Size(); ++i) { + (void)GetPregTab()->AddPreg(*oldPregTable->PregFromPregIdx(static_cast(i))); + } + } + if (oldTypeNameTable != nullptr) { + DEBUG_ASSERT(oldTypeNameTable->Size() == typeNameTab->Size(), + "Does not expect to process typeNameTab in MIRFunction::NewBody"); + } + if (oldLabelTable != nullptr) { + DEBUG_ASSERT(oldLabelTable->Size() == GetLabelTab()->Size(), + "Does not expect to process labelTab in MIRFunction::NewBody"); + } +} + +#ifdef DEBUGME +void MIRFunction::SetUpGDBEnv() +{ + if (codeMemPool != nullptr) { + delete codeMemPool; + } + codeMemPool = new ThreadLocalMemPool(memPoolCtrler, "tmp debug"); + codeMemPoolAllocator.SetMemPool(codeMemPool); +} + +void MIRFunction::ResetGDBEnv() +{ + delete codeMemPool; + codeMemPool = nullptr; +} +#endif +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/mir_lower.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/mir_lower.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f46a1de206ac977f4bc5c0d02d26571182dfcf0b --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/mir_lower.cpp @@ -0,0 +1,1150 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mir_lower.h" +#include "constantfold.h" +#include "ext_constantfold.h" +#include "me_option.h" + +#define DO_LT_0_CHECK 1 + +namespace maple { + +static constexpr uint64 RoundUpConst(uint64 offset, uint32 align) +{ + return (-align) & (offset + align - 1); +} + +static inline uint64 RoundUp(uint64 offset, uint32 align) +{ + if (align == 0) { + return offset; + } + return RoundUpConst(offset, align); +} + +// Remove intrinsicop __builtin_expect and record likely info to brStmt +// Target condExpr example: +// ne u1 i64 ( +// intrinsicop i64 C___builtin_expect ( +// cvt i64 i32 (dread i32 %levVar_9354), cvt i64 i32 (constval i32 0)), +// constval i64 0) +void LowerCondGotoStmtWithBuiltinExpect(CondGotoNode &brStmt) +{ + BaseNode *condExpr = brStmt.Opnd(0); + // Poke ne for dread shortCircuit + // Example: + // dassign %shortCircuit 0 (ne u1 i64 ( + // intrinsicop i64 C___builtin_expect ( + // cvt i64 i32 (dread i32 %levVar_32349), + // cvt i64 i32 (constval i32 0)), + // constval i64 0)) + // dassign %shortCircuit 0 (ne u1 u32 (dread u32 %shortCircuit, constval u1 0)) + if (condExpr->GetOpCode() == OP_ne && condExpr->Opnd(0)->GetOpCode() == OP_dread && + condExpr->Opnd(1)->GetOpCode() == OP_constval) { + auto *constVal = static_cast(condExpr->Opnd(1))->GetConstVal(); + if (constVal->GetKind() == kConstInt && static_cast(constVal)->GetValue() == 0) { + condExpr = condExpr->Opnd(0); + } + } + if (condExpr->GetOpCode() == OP_dread) { + // Example: + // dassign %shortCircuit 0 (ne u1 i64 ( + // intrinsicop i64 C___builtin_expect ( + // cvt i64 i32 (dread i32 %levVar_9488), + // cvt i64 i32 (constval i32 1)), + // constval i64 0)) + // brfalse @shortCircuit_label_13351 (dread u32 %shortCircuit) + StIdx stIdx = static_cast(condExpr)->GetStIdx(); + FieldID fieldId = static_cast(condExpr)->GetFieldID(); + if (fieldId != 0) { + return; + } + if (brStmt.GetPrev() == nullptr || brStmt.GetPrev()->GetOpCode() != OP_dassign) { + return; // prev stmt may be a label, we skip it too + } + auto *dassign = static_cast(brStmt.GetPrev()); + if (stIdx != dassign->GetStIdx() || dassign->GetFieldID() != 0) { + return; + } + condExpr = dassign->GetRHS(); + } + if (condExpr->GetOpCode() == OP_ne) { + // opnd1 must be int const 0 + BaseNode *opnd1 = condExpr->Opnd(1); + if (opnd1->GetOpCode() != OP_constval) { + return; + } + auto *constVal = static_cast(opnd1)->GetConstVal(); + if (constVal->GetKind() != kConstInt || static_cast(constVal)->GetValue() != 0) { + return; + } + // opnd0 must be intrinsicop C___builtin_expect + BaseNode *opnd0 = condExpr->Opnd(0); + if (opnd0->GetOpCode() != OP_intrinsicop || + static_cast(opnd0)->GetIntrinsic() != INTRN_C___builtin_expect) { + return; + } + // We trust constant fold + auto *expectedConstExpr = opnd0->Opnd(1); + if (expectedConstExpr->GetOpCode() == OP_cvt) { + expectedConstExpr = expectedConstExpr->Opnd(0); + } + if (expectedConstExpr->GetOpCode() != OP_constval) { + return; + } + auto *expectedConstNode = static_cast(expectedConstExpr)->GetConstVal(); + CHECK_FATAL(expectedConstNode->GetKind() == kConstInt, "must be"); + auto expectedVal = static_cast(expectedConstNode)->GetValue(); + if (expectedVal != 0 && expectedVal != 1) { + return; + } + bool likelyTrue = (expectedVal == 1); // The condition is likely to be true + bool likelyBranch = (brStmt.GetOpCode() == OP_brtrue ? likelyTrue : !likelyTrue); // High probability jump + if (likelyBranch) { + brStmt.SetBranchProb(kProbLikely); + } else { + brStmt.SetBranchProb(kProbUnlikely); + } + // Remove __builtin_expect + condExpr->SetOpnd(opnd0->Opnd(0), 0); + } +} + +void MIRLower::LowerBuiltinExpect(BlockNode &block) +{ + auto *stmt = block.GetFirst(); + auto *last = block.GetLast(); + while (stmt != last) { + if (stmt->GetOpCode() == OP_brtrue || stmt->GetOpCode() == OP_brfalse) { + LowerCondGotoStmtWithBuiltinExpect(*static_cast(stmt)); + } + stmt = stmt->GetNext(); + } +} + +LabelIdx MIRLower::CreateCondGotoStmt(Opcode op, BlockNode &blk, const IfStmtNode &ifStmt) +{ + auto *brStmt = mirModule.CurFuncCodeMemPool()->New(op); + brStmt->SetOpnd(ifStmt.Opnd(), 0); + brStmt->SetSrcPos(ifStmt.GetSrcPos()); + LabelIdx lableIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lableIdx); + brStmt->SetOffset(lableIdx); + blk.AddStatement(brStmt); + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(brStmt->GetStmtID(), ifStmt.GetStmtID()); + } + bool thenEmpty = (ifStmt.GetThenPart() == nullptr) || (ifStmt.GetThenPart()->GetFirst() == nullptr); + if (thenEmpty) { + blk.AppendStatementsFromBlock(*ifStmt.GetElsePart()); + } else { + blk.AppendStatementsFromBlock(*ifStmt.GetThenPart()); + } + return lableIdx; +} + +void MIRLower::CreateBrFalseStmt(BlockNode &blk, const IfStmtNode &ifStmt) +{ + LabelIdx labelIdx = CreateCondGotoStmt(OP_brfalse, blk, ifStmt); + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(labelIdx); + blk.AddStatement(lableStmt); + // set stmtfreqs + if (GetFuncProfData()) { + DEBUG_ASSERT(GetFuncProfData()->GetStmtFreq(ifStmt.GetThenPart()->GetStmtID()) >= 0, "sanity check"); + int64_t freq = GetFuncProfData()->GetStmtFreq(ifStmt.GetStmtID()) - + GetFuncProfData()->GetStmtFreq(ifStmt.GetThenPart()->GetStmtID()); + GetFuncProfData()->SetStmtFreq(lableStmt->GetStmtID(), freq); + } +} + +void MIRLower::CreateBrTrueStmt(BlockNode &blk, const IfStmtNode &ifStmt) +{ + LabelIdx labelIdx = CreateCondGotoStmt(OP_brtrue, blk, ifStmt); + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(labelIdx); + blk.AddStatement(lableStmt); + // set stmtfreqs + if (GetFuncProfData()) { + DEBUG_ASSERT(GetFuncProfData()->GetStmtFreq(ifStmt.GetElsePart()->GetStmtID()) >= 0, "sanity check"); + int64_t freq = GetFuncProfData()->GetStmtFreq(ifStmt.GetStmtID()) - + GetFuncProfData()->GetStmtFreq(ifStmt.GetElsePart()->GetStmtID()); + GetFuncProfData()->SetStmtFreq(lableStmt->GetStmtID(), freq); + } +} + +void MIRLower::CreateBrFalseAndGotoStmt(BlockNode &blk, const IfStmtNode &ifStmt) +{ + LabelIdx labelIdx = CreateCondGotoStmt(OP_brfalse, blk, ifStmt); + bool fallThroughFromThen = !IfStmtNoFallThrough(ifStmt); + LabelIdx gotoLableIdx = 0; + if (fallThroughFromThen) { + auto *gotoStmt = mirModule.CurFuncCodeMemPool()->New(OP_goto); + gotoLableIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(gotoLableIdx); + gotoStmt->SetOffset(gotoLableIdx); + blk.AddStatement(gotoStmt); + // set stmtfreqs + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(gotoStmt->GetStmtID(), ifStmt.GetThenPart()->GetStmtID()); + } + } + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(labelIdx); + blk.AddStatement(lableStmt); + blk.AppendStatementsFromBlock(*ifStmt.GetElsePart()); + // set stmtfreqs + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(lableStmt->GetStmtID(), ifStmt.GetElsePart()->GetStmtID()); + } + if (fallThroughFromThen) { + lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(gotoLableIdx); + blk.AddStatement(lableStmt); + // set endlabel stmtfreqs + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(lableStmt->GetStmtID(), ifStmt.GetStmtID()); + } + } +} + +BlockNode *MIRLower::LowerIfStmt(IfStmtNode &ifStmt, bool recursive) +{ + bool thenEmpty = (ifStmt.GetThenPart() == nullptr) || (ifStmt.GetThenPart()->GetFirst() == nullptr); + bool elseEmpty = (ifStmt.GetElsePart() == nullptr) || (ifStmt.GetElsePart()->GetFirst() == nullptr); + if (recursive) { + if (!thenEmpty) { + ifStmt.SetThenPart(LowerBlock(*ifStmt.GetThenPart())); + } + if (!elseEmpty) { + ifStmt.SetElsePart(LowerBlock(*ifStmt.GetElsePart())); + } + } + auto *blk = mirModule.CurFuncCodeMemPool()->New(); + if (thenEmpty && elseEmpty) { + // generate EVAL statement + auto *evalStmt = mirModule.CurFuncCodeMemPool()->New(OP_eval); + evalStmt->SetOpnd(ifStmt.Opnd(), 0); + evalStmt->SetSrcPos(ifStmt.GetSrcPos()); + blk->AddStatement(evalStmt); + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(evalStmt->GetStmtID(), ifStmt.GetStmtID()); + } + } else if (elseEmpty) { + // brfalse + // + // label + CreateBrFalseStmt(*blk, ifStmt); + } else if (thenEmpty) { + // brtrue + // + // label + CreateBrTrueStmt(*blk, ifStmt); + } else { + // brfalse + // + // goto + // label + // + // label + CreateBrFalseAndGotoStmt(*blk, ifStmt); + } + return blk; +} + +static bool ConsecutiveCaseValsAndSameTarget(const CaseVector *switchTable) +{ + size_t caseNum = switchTable->size(); + int lastVal = static_cast((*switchTable)[0].first); + LabelIdx lblIdx = (*switchTable)[0].second; + for (size_t id = 1; id < caseNum; id++) { + lastVal++; + if (lastVal != (*switchTable)[id].first) { + return false; + } + if (lblIdx != (*switchTable)[id].second) { + return false; + } + } + return true; +} + +// if there is only 1 case branch, replace with conditional branch(es) and +// return the optimized multiple statements; otherwise, return nullptr +BlockNode *MIRLower::LowerSwitchStmt(SwitchNode *switchNode) +{ + CaseVector *switchTable = &switchNode->GetSwitchTable(); + if (switchTable->empty()) { // goto @defaultLabel + BlockNode *blk = mirModule.CurFuncCodeMemPool()->New(); + LabelIdx defaultLabel = switchNode->GetDefaultLabel(); + MIRBuilder *builder = mirModule.GetMIRBuilder(); + GotoNode *gotoStmt = builder->CreateStmtGoto(OP_goto, defaultLabel); + blk->AddStatement(gotoStmt); + return blk; + } + if (!ConsecutiveCaseValsAndSameTarget(switchTable)) { + return nullptr; + } + BlockNode *blk = mirModule.CurFuncCodeMemPool()->New(); + LabelIdx caseGotoLabel = switchTable->front().second; + LabelIdx defaultLabel = switchNode->GetDefaultLabel(); + int64 minCaseVal = switchTable->front().first; + int64 maxCaseVal = switchTable->back().first; + BaseNode *switchOpnd = switchNode->Opnd(0); + MIRBuilder *builder = mirModule.GetMIRBuilder(); + ConstvalNode *minCaseNode = builder->CreateIntConst(minCaseVal, switchOpnd->GetPrimType()); + ConstvalNode *maxCaseNode = builder->CreateIntConst(maxCaseVal, switchOpnd->GetPrimType()); + if (minCaseVal == maxCaseVal) { + // brtrue (x == minCaseVal) @case_goto_label + // goto @default_label + CompareNode *eqNode = builder->CreateExprCompare( + OP_eq, *GlobalTables::GetTypeTable().GetInt32(), + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(switchOpnd->GetPrimType())), switchOpnd, minCaseNode); + CondGotoNode *condGoto = builder->CreateStmtCondGoto(eqNode, OP_brtrue, caseGotoLabel); + blk->AddStatement(condGoto); + GotoNode *gotoStmt = builder->CreateStmtGoto(OP_goto, defaultLabel); + blk->AddStatement(gotoStmt); + } else { + // brtrue (x < minCaseVal) @default_label + // brtrue (x > maxCaseVal) @default_label + // goto @case_goto_label + CompareNode *ltNode = builder->CreateExprCompare( + OP_lt, *GlobalTables::GetTypeTable().GetInt32(), + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(switchOpnd->GetPrimType())), switchOpnd, minCaseNode); + CondGotoNode *condGoto = builder->CreateStmtCondGoto(ltNode, OP_brtrue, defaultLabel); + blk->AddStatement(condGoto); + CompareNode *gtNode = builder->CreateExprCompare( + OP_gt, *GlobalTables::GetTypeTable().GetInt32(), + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(switchOpnd->GetPrimType())), switchOpnd, maxCaseNode); + condGoto = builder->CreateStmtCondGoto(gtNode, OP_brtrue, defaultLabel); + blk->AddStatement(condGoto); + GotoNode *gotoStmt = builder->CreateStmtGoto(OP_goto, caseGotoLabel); + blk->AddStatement(gotoStmt); + } + return blk; +} + +// while +// is lowered to: +// brfalse +// label +// +// brtrue +// label +BlockNode *MIRLower::LowerWhileStmt(WhileStmtNode &whileStmt) +{ + DEBUG_ASSERT(whileStmt.GetBody() != nullptr, "nullptr check"); + whileStmt.SetBody(LowerBlock(*whileStmt.GetBody())); + auto *blk = mirModule.CurFuncCodeMemPool()->New(); + auto *brFalseStmt = mirModule.CurFuncCodeMemPool()->New(OP_brfalse); + brFalseStmt->SetOpnd(whileStmt.Opnd(0), 0); + brFalseStmt->SetSrcPos(whileStmt.GetSrcPos()); + LabelIdx lalbeIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lalbeIdx); + brFalseStmt->SetOffset(lalbeIdx); + blk->AddStatement(brFalseStmt); + blk->AppendStatementsFromBlock(*whileStmt.GetBody()); + if (MeOption::optForSize) { + // still keep while-do format to avoid coping too much condition-related stmt + LabelIdx whileLalbeIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(whileLalbeIdx); + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(whileLalbeIdx); + blk->InsertBefore(brFalseStmt, lableStmt); + auto *whilegotonode = mirModule.CurFuncCodeMemPool()->New(OP_goto, whileLalbeIdx); + if (GetFuncProfData() && blk->GetLast()) { + GetFuncProfData()->CopyStmtFreq(whilegotonode->GetStmtID(), blk->GetLast()->GetStmtID()); + } + blk->AddStatement(whilegotonode); + } else { + LabelIdx bodyLableIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(bodyLableIdx); + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(bodyLableIdx); + blk->InsertAfter(brFalseStmt, lableStmt); + // update frequency + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(lableStmt->GetStmtID(), whileStmt.GetStmtID()); + GetFuncProfData()->CopyStmtFreq(brFalseStmt->GetStmtID(), whileStmt.GetStmtID()); + } + auto *brTrueStmt = mirModule.CurFuncCodeMemPool()->New(OP_brtrue); + brTrueStmt->SetOpnd(whileStmt.Opnd(0)->CloneTree(mirModule.GetCurFuncCodeMPAllocator()), 0); + brTrueStmt->SetOffset(bodyLableIdx); + if (GetFuncProfData() && blk->GetLast()) { + GetFuncProfData()->CopyStmtFreq(brTrueStmt->GetStmtID(), whileStmt.GetBody()->GetStmtID()); + } + blk->AddStatement(brTrueStmt); + } + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(lalbeIdx); + blk->AddStatement(lableStmt); + if (GetFuncProfData()) { + int64_t freq = GetFuncProfData()->GetStmtFreq(whileStmt.GetStmtID()) - + GetFuncProfData()->GetStmtFreq(blk->GetLast()->GetStmtID()); + DEBUG_ASSERT(freq >= 0, "sanity check"); + GetFuncProfData()->SetStmtFreq(lableStmt->GetStmtID(), freq); + } + return blk; +} + +// doloop (,,) {} +// is lowered to: +// dassign () +// brfalse +// label +// +// dassign () +// brtrue +// label +BlockNode *MIRLower::LowerDoloopStmt(DoloopNode &doloop) +{ + DEBUG_ASSERT(doloop.GetDoBody() != nullptr, "nullptr check"); + doloop.SetDoBody(LowerBlock(*doloop.GetDoBody())); + int64_t doloopnodeFreq = 0, bodynodeFreq = 0; + if (GetFuncProfData()) { + doloopnodeFreq = GetFuncProfData()->GetStmtFreq(doloop.GetStmtID()); + bodynodeFreq = GetFuncProfData()->GetStmtFreq(doloop.GetDoBody()->GetStmtID()); + } + auto *blk = mirModule.CurFuncCodeMemPool()->New(); + if (doloop.IsPreg()) { + PregIdx regIdx = static_cast(doloop.GetDoVarStIdx().FullIdx()); + MIRPreg *mirPreg = mirModule.CurFunction()->GetPregTab()->PregFromPregIdx(regIdx); + PrimType primType = mirPreg->GetPrimType(); + DEBUG_ASSERT(primType != kPtyInvalid, "runtime check error"); + auto *startRegassign = mirModule.CurFuncCodeMemPool()->New(); + startRegassign->SetRegIdx(regIdx); + startRegassign->SetPrimType(primType); + startRegassign->SetOpnd(doloop.GetStartExpr(), 0); + startRegassign->SetSrcPos(doloop.GetSrcPos()); + blk->AddStatement(startRegassign); + } else { + auto *startDassign = mirModule.CurFuncCodeMemPool()->New(); + startDassign->SetStIdx(doloop.GetDoVarStIdx()); + startDassign->SetRHS(doloop.GetStartExpr()); + startDassign->SetSrcPos(doloop.GetSrcPos()); + blk->AddStatement(startDassign); + } + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(blk->GetLast()->GetStmtID(), doloopnodeFreq - bodynodeFreq); + } + auto *brFalseStmt = mirModule.CurFuncCodeMemPool()->New(OP_brfalse); + brFalseStmt->SetOpnd(doloop.GetCondExpr(), 0); + LabelIdx lIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lIdx); + brFalseStmt->SetOffset(lIdx); + blk->AddStatement(brFalseStmt); + // udpate stmtFreq + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(brFalseStmt->GetStmtID(), (doloopnodeFreq - bodynodeFreq)); + } + LabelIdx bodyLabelIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(bodyLabelIdx); + auto *labelStmt = mirModule.CurFuncCodeMemPool()->New(); + labelStmt->SetLabelIdx(bodyLabelIdx); + blk->AddStatement(labelStmt); + // udpate stmtFreq + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(labelStmt->GetStmtID(), bodynodeFreq); + } + blk->AppendStatementsFromBlock(*doloop.GetDoBody()); + if (doloop.IsPreg()) { + PregIdx regIdx = (PregIdx)doloop.GetDoVarStIdx().FullIdx(); + MIRPreg *mirPreg = mirModule.CurFunction()->GetPregTab()->PregFromPregIdx(regIdx); + PrimType doVarPType = mirPreg->GetPrimType(); + DEBUG_ASSERT(doVarPType != kPtyInvalid, "runtime check error"); + auto *readDoVar = mirModule.CurFuncCodeMemPool()->New(); + readDoVar->SetRegIdx(regIdx); + readDoVar->SetPrimType(doVarPType); + auto *add = + mirModule.CurFuncCodeMemPool()->New(OP_add, doVarPType, doloop.GetIncrExpr(), readDoVar); + auto *endRegassign = mirModule.CurFuncCodeMemPool()->New(); + endRegassign->SetRegIdx(regIdx); + endRegassign->SetPrimType(doVarPType); + endRegassign->SetOpnd(add, 0); + blk->AddStatement(endRegassign); + } else { + const MIRSymbol *doVarSym = mirModule.CurFunction()->GetLocalOrGlobalSymbol(doloop.GetDoVarStIdx()); + PrimType doVarPType = doVarSym->GetType()->GetPrimType(); + auto *readDovar = + mirModule.CurFuncCodeMemPool()->New(OP_dread, doVarPType, doloop.GetDoVarStIdx(), 0); + auto *add = + mirModule.CurFuncCodeMemPool()->New(OP_add, doVarPType, readDovar, doloop.GetIncrExpr()); + auto *endDassign = mirModule.CurFuncCodeMemPool()->New(); + endDassign->SetStIdx(doloop.GetDoVarStIdx()); + endDassign->SetRHS(add); + blk->AddStatement(endDassign); + } + auto *brTrueStmt = mirModule.CurFuncCodeMemPool()->New(OP_brtrue); + brTrueStmt->SetOpnd(doloop.GetCondExpr()->CloneTree(mirModule.GetCurFuncCodeMPAllocator()), 0); + brTrueStmt->SetOffset(bodyLabelIdx); + blk->AddStatement(brTrueStmt); + // udpate stmtFreq + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(brTrueStmt->GetStmtID(), bodynodeFreq); + } + labelStmt = mirModule.CurFuncCodeMemPool()->New(); + labelStmt->SetLabelIdx(lIdx); + blk->AddStatement(labelStmt); + // udpate stmtFreq + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(labelStmt->GetStmtID(), (doloopnodeFreq - bodynodeFreq)); + } + return blk; +} + +// dowhile +// is lowered to: +// label +// +// brtrue +BlockNode *MIRLower::LowerDowhileStmt(WhileStmtNode &doWhileStmt) +{ + DEBUG_ASSERT(doWhileStmt.GetBody() != nullptr, "nullptr check"); + doWhileStmt.SetBody(LowerBlock(*doWhileStmt.GetBody())); + auto *blk = mirModule.CurFuncCodeMemPool()->New(); + LabelIdx lIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lIdx); + auto *labelStmt = mirModule.CurFuncCodeMemPool()->New(); + labelStmt->SetLabelIdx(lIdx); + blk->AddStatement(labelStmt); + blk->AppendStatementsFromBlock(*doWhileStmt.GetBody()); + auto *brTrueStmt = mirModule.CurFuncCodeMemPool()->New(OP_brtrue); + brTrueStmt->SetOpnd(doWhileStmt.Opnd(0), 0); + brTrueStmt->SetOffset(lIdx); + blk->AddStatement(brTrueStmt); + return blk; +} + +BlockNode *MIRLower::LowerBlock(BlockNode &block) +{ + auto *newBlock = mirModule.CurFuncCodeMemPool()->New(); + BlockNode *tmp = nullptr; + if (block.GetFirst() == nullptr) { + newBlock->SetStmtID(block.GetStmtID()); // keep original block stmtid + return newBlock; + } + StmtNode *nextStmt = block.GetFirst(); + DEBUG_ASSERT(nextStmt != nullptr, "nullptr check"); + do { + StmtNode *stmt = nextStmt; + nextStmt = stmt->GetNext(); + switch (stmt->GetOpCode()) { + case OP_if: + tmp = LowerIfStmt(static_cast(*stmt), true); + newBlock->AppendStatementsFromBlock(*tmp); + break; + case OP_switch: + tmp = LowerSwitchStmt(static_cast(stmt)); + if (tmp != nullptr) { + newBlock->AppendStatementsFromBlock(*tmp); + } else { + newBlock->AddStatement(stmt); + } + break; + case OP_while: + newBlock->AppendStatementsFromBlock(*LowerWhileStmt(static_cast(*stmt))); + break; + case OP_dowhile: + newBlock->AppendStatementsFromBlock(*LowerDowhileStmt(static_cast(*stmt))); + break; + case OP_doloop: + newBlock->AppendStatementsFromBlock(*LowerDoloopStmt(static_cast(*stmt))); + break; + case OP_icallassigned: + case OP_icall: { + if (mirModule.IsCModule()) { + // convert to icallproto/icallprotoassigned + IcallNode *ic = static_cast(stmt); + ic->SetOpCode(stmt->GetOpCode() == OP_icall ? OP_icallproto : OP_icallprotoassigned); + MIRFuncType *funcType = FuncTypeFromFuncPtrExpr(stmt->Opnd(0)); + CHECK_FATAL(funcType != nullptr, "MIRLower::LowerBlock: cannot find prototype for icall"); + ic->SetRetTyIdx(funcType->GetTypeIndex()); + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx()); + if (retType->GetPrimType() == PTY_agg && retType->GetSize() > k16BitSize) { + funcType->funcAttrs.SetAttr(FUNCATTR_firstarg_return); + } + } + newBlock->AddStatement(stmt); + break; + } + case OP_block: + tmp = LowerBlock(static_cast(*stmt)); + newBlock->AppendStatementsFromBlock(*tmp); + break; + default: + newBlock->AddStatement(stmt); + break; + } + } while (nextStmt != nullptr); + newBlock->SetStmtID(block.GetStmtID()); // keep original block stmtid + return newBlock; +} + +// for lowering OP_cand and OP_cior embedded in the expression x which belongs +// to curstmt +BaseNode *MIRLower::LowerEmbeddedCandCior(BaseNode *x, StmtNode *curstmt, BlockNode *blk) +{ + if (x->GetOpCode() == OP_cand || x->GetOpCode() == OP_cior) { + MIRBuilder *builder = mirModule.GetMIRBuilder(); + BinaryNode *bnode = static_cast(x); + bnode->SetOpnd(LowerEmbeddedCandCior(bnode->Opnd(0), curstmt, blk), 0); + PregIdx pregIdx = mirFunc->GetPregTab()->CreatePreg(x->GetPrimType()); + RegassignNode *regass = builder->CreateStmtRegassign(x->GetPrimType(), pregIdx, bnode->Opnd(0)); + blk->InsertBefore(curstmt, regass); + LabelIdx labIdx = mirFunc->GetLabelTab()->CreateLabel(); + mirFunc->GetLabelTab()->AddToStringLabelMap(labIdx); + BaseNode *cond = builder->CreateExprRegread(x->GetPrimType(), pregIdx); + CondGotoNode *cgoto = + mirFunc->GetCodeMempool()->New(x->GetOpCode() == OP_cior ? OP_brtrue : OP_brfalse); + cgoto->SetOpnd(cond, 0); + cgoto->SetOffset(labIdx); + blk->InsertBefore(curstmt, cgoto); + + bnode->SetOpnd(LowerEmbeddedCandCior(bnode->Opnd(1), curstmt, blk), 1); + regass = builder->CreateStmtRegassign(x->GetPrimType(), pregIdx, bnode->Opnd(1)); + blk->InsertBefore(curstmt, regass); + LabelNode *lbl = mirFunc->GetCodeMempool()->New(); + lbl->SetLabelIdx(labIdx); + blk->InsertBefore(curstmt, lbl); + return builder->CreateExprRegread(x->GetPrimType(), pregIdx); + } else { + for (size_t i = 0; i < x->GetNumOpnds(); i++) { + x->SetOpnd(LowerEmbeddedCandCior(x->Opnd(i), curstmt, blk), i); + } + return x; + } +} + +// for lowering all appearances of OP_cand and OP_cior associated with condional +// branches in the block +void MIRLower::LowerCandCior(BlockNode &block) +{ + if (block.GetFirst() == nullptr) { + return; + } + StmtNode *nextStmt = block.GetFirst(); + do { + StmtNode *stmt = nextStmt; + nextStmt = stmt->GetNext(); + if (stmt->IsCondBr() && (stmt->Opnd(0)->GetOpCode() == OP_cand || stmt->Opnd(0)->GetOpCode() == OP_cior)) { + CondGotoNode *condGoto = static_cast(stmt); + BinaryNode *cond = static_cast(condGoto->Opnd(0)); + if ((stmt->GetOpCode() == OP_brfalse && cond->GetOpCode() == OP_cand) || + (stmt->GetOpCode() == OP_brtrue && cond->GetOpCode() == OP_cior)) { + // short-circuit target label is same as original condGoto stmt + condGoto->SetOpnd(cond->GetBOpnd(0), 0); + auto *newCondGoto = mirModule.CurFuncCodeMemPool()->New(Opcode(stmt->GetOpCode())); + newCondGoto->SetOpnd(cond->GetBOpnd(1), 0); + newCondGoto->SetOffset(condGoto->GetOffset()); + block.InsertAfter(condGoto, newCondGoto); + nextStmt = stmt; // so it will be re-processed if another cand/cior + } else { // short-circuit target is next statement + LabelIdx lIdx; + LabelNode *labelStmt = nullptr; + if (nextStmt->GetOpCode() == OP_label) { + labelStmt = static_cast(nextStmt); + lIdx = labelStmt->GetLabelIdx(); + } else { + lIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lIdx); + labelStmt = mirModule.CurFuncCodeMemPool()->New(); + labelStmt->SetLabelIdx(lIdx); + block.InsertAfter(condGoto, labelStmt); + } + auto *newCondGoto = mirModule.CurFuncCodeMemPool()->New( + stmt->GetOpCode() == OP_brfalse ? OP_brtrue : OP_brfalse); + newCondGoto->SetOpnd(cond->GetBOpnd(0), 0); + newCondGoto->SetOffset(lIdx); + block.InsertBefore(condGoto, newCondGoto); + condGoto->SetOpnd(cond->GetBOpnd(1), 0); + nextStmt = newCondGoto; // so it will be re-processed if another cand/cior + } + } else { // call LowerEmbeddedCandCior() for all the expression operands + for (size_t i = 0; i < stmt->GetNumOpnds(); i++) { + stmt->SetOpnd(LowerEmbeddedCandCior(stmt->Opnd(i), stmt, &block), i); + } + } + } while (nextStmt != nullptr); +} + +void MIRLower::LowerFunc(MIRFunction &func) +{ + if (GetOptLevel() > 0) { + ExtConstantFold ecf(func.GetModule()); + (void)ecf.ExtSimplify(func.GetBody()); + ; + } + + mirModule.SetCurFunction(&func); + if (IsLowerExpandArray()) { + ExpandArrayMrt(func); + } + BlockNode *origBody = func.GetBody(); + DEBUG_ASSERT(origBody != nullptr, "nullptr check"); + BlockNode *newBody = LowerBlock(*origBody); + DEBUG_ASSERT(newBody != nullptr, "nullptr check"); + LowerBuiltinExpect(*newBody); + if (!InLFO()) { + LowerCandCior(*newBody); + } + func.SetBody(newBody); +} + +BaseNode *MIRLower::LowerFarray(ArrayNode *array) +{ + auto *farrayType = static_cast(array->GetArrayType(GlobalTables::GetTypeTable())); + size_t eSize = GlobalTables::GetTypeTable().GetTypeFromTyIdx(farrayType->GetElemTyIdx())->GetSize(); + MIRType &arrayType = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType())); + /* how about multi-dimension array? */ + if (array->GetIndex(0)->GetOpCode() == OP_constval) { + const ConstvalNode *constvalNode = static_cast(array->GetIndex(0)); + if (constvalNode->GetConstVal()->GetKind() == kConstInt) { + const MIRIntConst *pIntConst = static_cast(constvalNode->GetConstVal()); + CHECK_FATAL(mirModule.IsJavaModule() || !pIntConst->IsNegative(), "Array index should >= 0."); + int64 eleOffset = pIntConst->GetExtValue() * eSize; + + BaseNode *baseNode = array->GetBase(); + if (eleOffset == 0) { + return baseNode; + } + + MIRIntConst *eleConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(eleOffset, arrayType); + BaseNode *offsetNode = mirModule.CurFuncCodeMemPool()->New(eleConst); + offsetNode->SetPrimType(array->GetPrimType()); + + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(OP_add); + rAdd->SetPrimType(array->GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(offsetNode, 1); + return rAdd; + } + } + + BaseNode *rMul = nullptr; + + BaseNode *baseNode = array->GetBase(); + + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(OP_add); + rAdd->SetPrimType(array->GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(rMul, 1); + auto *newAdd = ConstantFold(mirModule).Fold(rAdd); + rAdd = (newAdd != nullptr ? newAdd : rAdd); + return rAdd; +} + +BaseNode *MIRLower::LowerCArray(ArrayNode *array) +{ + MIRType *aType = array->GetArrayType(GlobalTables::GetTypeTable()); + if (aType->GetKind() == kTypeJArray) { + return array; + } + if (aType->GetKind() == kTypeFArray) { + return LowerFarray(array); + } + + MIRArrayType *arrayType = static_cast(aType); + /* There are two cases where dimension > 1. + * 1) arrayType->dim > 1. Process the current arrayType. (nestedArray = false) + * 2) arrayType->dim == 1, but arraytype->eTyIdx is another array. (nestedArray = true) + * Assume at this time 1) and 2) cannot mix. + * Along with the array dimension, there is the array indexing. + * It is allowed to index arrays less than the dimension. + * This is dictated by the number of indexes. + */ + bool nestedArray = false; + uint64 dim = arrayType->GetDim(); + MIRType *innerType = nullptr; + MIRArrayType *innerArrayType = nullptr; + uint64 elemSize = 0; + if (dim == 1) { + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx()); + if (innerType->GetKind() == kTypeArray) { + nestedArray = true; + do { + innerArrayType = static_cast(innerType); + elemSize = RoundUp(innerArrayType->GetElemType()->GetSize(), arrayType->GetElemType()->GetAlign()); + dim++; + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(innerArrayType->GetElemTyIdx()); + } while (innerType->GetKind() == kTypeArray); + } + } + + size_t numIndex = array->NumOpnds() - 1; + MIRArrayType *curArrayType = arrayType; + BaseNode *resNode = array->GetIndex(0); + if (dim > 1) { + BaseNode *prevNode = nullptr; + for (size_t i = 0; (i < dim) && (i < numIndex); ++i) { + uint32 mpyDim = 1; + if (nestedArray) { + CHECK_FATAL(arrayType->GetSizeArrayItem(0) > 0, "Zero size array dimension"); + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curArrayType->GetElemTyIdx()); + curArrayType = static_cast(innerType); + while (innerType->GetKind() == kTypeArray) { + innerArrayType = static_cast(innerType); + mpyDim *= innerArrayType->GetSizeArrayItem(0); + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(innerArrayType->GetElemTyIdx()); + } + } else { + CHECK_FATAL(arrayType->GetSizeArrayItem(static_cast(i)) > 0, "Zero size array dimension"); + for (size_t j = i + 1; j < dim; ++j) { + mpyDim *= arrayType->GetSizeArrayItem(static_cast(j)); + } + } + + BaseNode *index = static_cast(array->GetIndex(i)); + bool isConst = false; + uint64 indexVal = 0; + if (index->op == OP_constval) { + ConstvalNode *constNode = static_cast(index); + indexVal = (static_cast(constNode->GetConstVal()))->GetExtValue(); + isConst = true; + MIRIntConst *newConstNode = mirModule.GetMemPool()->New( + indexVal * mpyDim, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType()))); + BaseNode *newValNode = mirModule.CurFuncCodeMemPool()->New(newConstNode); + newValNode->SetPrimType(array->GetPrimType()); + if (i == 0) { + prevNode = newValNode; + continue; + } else { + resNode = newValNode; + } + } + if (i > 0 && isConst == false) { + resNode = array->GetIndex(i); + } + + BaseNode *mpyNode; + if (isConst) { + MIRIntConst *mulConst = mirModule.GetMemPool()->New( + static_cast(mpyDim) * indexVal, + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType()))); + BaseNode *mulSize = mirModule.CurFuncCodeMemPool()->New(mulConst); + mulSize->SetPrimType(array->GetPrimType()); + mpyNode = mulSize; + } else if (mpyDim == 1 && prevNode) { + mpyNode = prevNode; + prevNode = resNode; + } else { + mpyNode = mirModule.CurFuncCodeMemPool()->New(OP_mul); + mpyNode->SetPrimType(array->GetPrimType()); + MIRIntConst *mulConst = mirModule.GetMemPool()->New( + mpyDim, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType()))); + BaseNode *mulSize = mirModule.CurFuncCodeMemPool()->New(mulConst); + mulSize->SetPrimType(array->GetPrimType()); + mpyNode->SetOpnd(mulSize, 1); + PrimType signedInt4AddressCompute = GetSignedPrimType(array->GetPrimType()); + if (!IsPrimitiveInteger(resNode->GetPrimType())) { + resNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt, signedInt4AddressCompute, + resNode->GetPrimType(), resNode); + } else if (GetPrimTypeSize(resNode->GetPrimType()) != GetPrimTypeSize(array->GetPrimType())) { + resNode = mirModule.CurFuncCodeMemPool()->New( + OP_cvt, array->GetPrimType(), GetRegPrimType(resNode->GetPrimType()), resNode); + } + mpyNode->SetOpnd(resNode, 0); + } + if (i == 0) { + prevNode = mpyNode; + continue; + } + BaseNode *newResNode = mirModule.CurFuncCodeMemPool()->New(OP_add); + newResNode->SetPrimType(array->GetPrimType()); + newResNode->SetOpnd(mpyNode, 0); + if (NeedCvtOrRetype(prevNode->GetPrimType(), array->GetPrimType())) { + prevNode = mirModule.CurFuncCodeMemPool()->New( + OP_cvt, array->GetPrimType(), GetRegPrimType(prevNode->GetPrimType()), prevNode); + } + newResNode->SetOpnd(prevNode, 1); + prevNode = newResNode; + } + resNode = prevNode; + } + + BaseNode *rMul = nullptr; + // esize is the size of the array element (eg. int = 4 long = 8) + uint64 esize; + if (nestedArray) { + esize = elemSize; + } else { + esize = arrayType->GetElemType()->GetSize(); + } + Opcode opadd = OP_add; + MIRIntConst *econst = mirModule.GetMemPool()->New( + esize, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType()))); + BaseNode *eSize = mirModule.CurFuncCodeMemPool()->New(econst); + eSize->SetPrimType(array->GetPrimType()); + rMul = mirModule.CurFuncCodeMemPool()->New(OP_mul); + PrimType signedInt4AddressCompute = GetSignedPrimType(array->GetPrimType()); + if (!IsPrimitiveInteger(resNode->GetPrimType())) { + resNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt, signedInt4AddressCompute, + resNode->GetPrimType(), resNode); + } else if (GetPrimTypeSize(resNode->GetPrimType()) != GetPrimTypeSize(array->GetPrimType())) { + resNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt, array->GetPrimType(), + GetRegPrimType(resNode->GetPrimType()), resNode); + } + rMul->SetPrimType(resNode->GetPrimType()); + rMul->SetOpnd(resNode, 0); + rMul->SetOpnd(eSize, 1); + BaseNode *baseNode = array->GetBase(); + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(opadd); + rAdd->SetPrimType(array->GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(rMul, 1); + auto *newAdd = ConstantFold(mirModule).Fold(rAdd); + rAdd = (newAdd != nullptr ? newAdd : rAdd); + return rAdd; +} + +IfStmtNode *MIRLower::ExpandArrayMrtIfBlock(IfStmtNode &node) +{ + if (node.GetThenPart() != nullptr) { + node.SetThenPart(ExpandArrayMrtBlock(*node.GetThenPart())); + } + if (node.GetElsePart() != nullptr) { + node.SetElsePart(ExpandArrayMrtBlock(*node.GetElsePart())); + } + return &node; +} + +WhileStmtNode *MIRLower::ExpandArrayMrtWhileBlock(WhileStmtNode &node) +{ + if (node.GetBody() != nullptr) { + node.SetBody(ExpandArrayMrtBlock(*node.GetBody())); + } + return &node; +} + +DoloopNode *MIRLower::ExpandArrayMrtDoloopBlock(DoloopNode &node) +{ + if (node.GetDoBody() != nullptr) { + node.SetDoBody(ExpandArrayMrtBlock(*node.GetDoBody())); + } + return &node; +} + +ForeachelemNode *MIRLower::ExpandArrayMrtForeachelemBlock(ForeachelemNode &node) +{ + if (node.GetLoopBody() != nullptr) { + node.SetLoopBody(ExpandArrayMrtBlock(*node.GetLoopBody())); + } + return &node; +} + +void MIRLower::AddArrayMrtMpl(BaseNode &exp, BlockNode &newBlock) +{ + MIRModule &mod = mirModule; + MIRBuilder *builder = mod.GetMIRBuilder(); + for (size_t i = 0; i < exp.NumOpnds(); ++i) { + DEBUG_ASSERT(exp.Opnd(i) != nullptr, "nullptr check"); + AddArrayMrtMpl(*exp.Opnd(i), newBlock); + } + if (exp.GetOpCode() == OP_array) { + auto &arrayNode = static_cast(exp); + if (arrayNode.GetBoundsCheck()) { + BaseNode *arrAddr = arrayNode.Opnd(0); + BaseNode *index = arrayNode.Opnd(1); + DEBUG_ASSERT(index != nullptr, "null ptr check"); + MIRType *indexType = GlobalTables::GetTypeTable().GetPrimType(index->GetPrimType()); + UnaryStmtNode *nullCheck = builder->CreateStmtUnary(OP_assertnonnull, arrAddr); + newBlock.AddStatement(nullCheck); +#if DO_LT_0_CHECK + ConstvalNode *indexZero = builder->GetConstUInt32(0); + CompareNode *lessZero = + builder->CreateExprCompare(OP_lt, *GlobalTables::GetTypeTable().GetUInt1(), + *GlobalTables::GetTypeTable().GetUInt32(), index, indexZero); +#endif + MIRType *infoLenType = GlobalTables::GetTypeTable().GetInt32(); + MapleVector arguments(builder->GetCurrentFuncCodeMpAllocator()->Adapter()); + arguments.push_back(arrAddr); + BaseNode *arrLen = + builder->CreateExprIntrinsicop(INTRN_JAVA_ARRAY_LENGTH, OP_intrinsicop, *infoLenType, arguments); + BaseNode *cpmIndex = index; + if (arrLen->GetPrimType() != index->GetPrimType()) { + cpmIndex = builder->CreateExprTypeCvt(OP_cvt, *infoLenType, *indexType, index); + } + CompareNode *largeLen = + builder->CreateExprCompare(OP_ge, *GlobalTables::GetTypeTable().GetUInt1(), + *GlobalTables::GetTypeTable().GetUInt32(), cpmIndex, arrLen); + // maybe should use cior +#if DO_LT_0_CHECK + BinaryNode *indexCon = + builder->CreateExprBinary(OP_lior, *GlobalTables::GetTypeTable().GetUInt1(), lessZero, largeLen); +#endif + MapleVector args(builder->GetCurrentFuncCodeMpAllocator()->Adapter()); +#if DO_LT_0_CHECK + args.push_back(indexCon); + IntrinsiccallNode *boundaryTrinsicCall = builder->CreateStmtIntrinsicCall(INTRN_MPL_BOUNDARY_CHECK, args); +#else + args.push_back(largeLen); + IntrinsiccallNode *boundaryTrinsicCall = builder->CreateStmtIntrinsicCall(INTRN_MPL_BOUNDARY_CHECK, args); +#endif + newBlock.AddStatement(boundaryTrinsicCall); + } + } +} + +BlockNode *MIRLower::ExpandArrayMrtBlock(BlockNode &block) +{ + auto *newBlock = mirModule.CurFuncCodeMemPool()->New(); + if (block.GetFirst() == nullptr) { + return newBlock; + } + StmtNode *nextStmt = block.GetFirst(); + do { + StmtNode *stmt = nextStmt; + DEBUG_ASSERT(stmt != nullptr, "nullptr check"); + nextStmt = stmt->GetNext(); + switch (stmt->GetOpCode()) { + case OP_if: + newBlock->AddStatement(ExpandArrayMrtIfBlock(static_cast(*stmt))); + break; + case OP_while: + newBlock->AddStatement(ExpandArrayMrtWhileBlock(static_cast(*stmt))); + break; + case OP_dowhile: + newBlock->AddStatement(ExpandArrayMrtWhileBlock(static_cast(*stmt))); + break; + case OP_doloop: + newBlock->AddStatement(ExpandArrayMrtDoloopBlock(static_cast(*stmt))); + break; + case OP_foreachelem: + newBlock->AddStatement(ExpandArrayMrtForeachelemBlock(static_cast(*stmt))); + break; + case OP_block: + newBlock->AddStatement(ExpandArrayMrtBlock(static_cast(*stmt))); + break; + default: + AddArrayMrtMpl(*stmt, *newBlock); + newBlock->AddStatement(stmt); + break; + } + } while (nextStmt != nullptr); + return newBlock; +} + +void MIRLower::ExpandArrayMrt(MIRFunction &func) +{ + if (ShouldOptArrayMrt(func)) { + BlockNode *origBody = func.GetBody(); + DEBUG_ASSERT(origBody != nullptr, "nullptr check"); + BlockNode *newBody = ExpandArrayMrtBlock(*origBody); + func.SetBody(newBody); + } +} + +MIRFuncType *MIRLower::FuncTypeFromFuncPtrExpr(BaseNode *x) +{ + MIRFuncType *res = nullptr; + MIRFunction *func = mirModule.CurFunction(); + switch (x->GetOpCode()) { + case OP_regread: { + RegreadNode *regread = static_cast(x); + MIRPreg *preg = func->GetPregTab()->PregFromPregIdx(regread->GetRegIdx()); + // see if it is promoted from a symbol + if (preg->GetOp() == OP_dread) { + const MIRSymbol *symbol = preg->rematInfo.sym; + MIRType *mirType = symbol->GetType(); + if (preg->fieldID != 0) { + MIRStructType *structty = static_cast(mirType); + FieldPair thepair = structty->TraverseToField(preg->fieldID); + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + if (res != nullptr) { + break; + } + } + // check if a formal promoted to preg + for (FormalDef &formalDef : func->GetFormalDefVec()) { + if (!formalDef.formalSym->IsPreg()) { + continue; + } + if (formalDef.formalSym->GetPreg() == preg) { + MIRType *mirType = formalDef.formalSym->GetType(); + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + break; + } + } + break; + } + case OP_dread: { + DreadNode *dread = static_cast(x); + MIRSymbol *symbol = func->GetLocalOrGlobalSymbol(dread->GetStIdx()); + MIRType *mirType = symbol->GetType(); + if (dread->GetFieldID() != 0) { + MIRStructType *structty = static_cast(mirType); + FieldPair thepair = structty->TraverseToField(dread->GetFieldID()); + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + break; + } + case OP_iread: { + IreadNode *iread = static_cast(x); + MIRPtrType *ptrType = static_cast(iread->GetType()); + MIRType *mirType = ptrType->GetPointedType(); + if (mirType->GetKind() == kTypeFunction) { + res = static_cast(mirType); + } else if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + break; + } + case OP_addroffunc: { + AddroffuncNode *addrofFunc = static_cast(x); + PUIdx puIdx = addrofFunc->GetPUIdx(); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + res = f->GetMIRFuncType(); + break; + } + case OP_retype: { + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(x)->GetTyIdx()); + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + if (res == nullptr) { + res = FuncTypeFromFuncPtrExpr(x->Opnd(kNodeFirstOpnd)); + } + break; + } + case OP_select: { + res = FuncTypeFromFuncPtrExpr(x->Opnd(kNodeSecondOpnd)); + if (res == nullptr) { + res = FuncTypeFromFuncPtrExpr(x->Opnd(kNodeThirdOpnd)); + } + break; + } + default: + CHECK_FATAL(false, "LMBCLowerer::FuncTypeFromFuncPtrExpr: NYI"); + } + return res; +} + +const std::set MIRLower::kSetArrayHotFunc = {}; + +bool MIRLower::ShouldOptArrayMrt(const MIRFunction &func) +{ + return (MIRLower::kSetArrayHotFunc.find(func.GetName()) != MIRLower::kSetArrayHotFunc.end()); +} +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/mir_module.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/mir_module.cpp new file mode 100644 index 0000000000000000000000000000000000000000..da6fc29a40f5496c87fd013f9be093c0c33e2e9c --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/mir_module.cpp @@ -0,0 +1,831 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mir_module.h" +#include "mir_const.h" +#include "mir_preg.h" +#include "mir_function.h" +#include "mir_builder.h" +#include "debug_info.h" +#include "intrinsics.h" +#include "bin_mplt.h" + +namespace maple { +#if MIR_FEATURE_FULL // to avoid compilation error when MIR_FEATURE_FULL=0 +MIRModule::MIRModule(const std::string &fn) + : memPool(new ThreadShareMemPool(memPoolCtrler, "maple_ir mempool")), + pragmaMemPool(memPoolCtrler.NewMemPool("pragma mempool", false /* isLcalPool */)), + memPoolAllocator(memPool), + pragmaMemPoolAllocator(pragmaMemPool), + functionList(memPoolAllocator.Adapter()), + importedMplt(memPoolAllocator.Adapter()), + typeDefOrder(memPoolAllocator.Adapter()), + externStructTypeSet(std::less(), memPoolAllocator.Adapter()), + symbolSet(std::less(), memPoolAllocator.Adapter()), + symbolDefOrder(memPoolAllocator.Adapter()), + out(LogInfo::MapleLogger()), + fileName(fn), + fileInfo(memPoolAllocator.Adapter()), + fileInfoIsString(memPoolAllocator.Adapter()), + fileData(memPoolAllocator.Adapter()), + srcFileInfo(memPoolAllocator.Adapter()), + importFiles(memPoolAllocator.Adapter()), + importPaths(memPoolAllocator.Adapter()), + asmDecls(memPoolAllocator.Adapter()), + classList(memPoolAllocator.Adapter()), + optimizedFuncs(memPoolAllocator.Adapter()), + optimizedFuncsType(memPoolAllocator.Adapter()), + puIdxFieldInitializedMap(std::less(), memPoolAllocator.Adapter()), + inliningGlobals(memPoolAllocator.Adapter()), + partO2FuncList(memPoolAllocator.Adapter()), + safetyWarningMap(memPoolAllocator.Adapter()) +{ + GlobalTables::GetGsymTable().SetModule(this); + typeNameTab = memPool->New(memPoolAllocator); + mirBuilder = memPool->New(this); + dbgInfo = memPool->New(this); + IntrinDesc::InitMIRModule(this); +} + +MIRModule::~MIRModule() +{ + for (MIRFunction *mirFunc : functionList) { + mirFunc->ReleaseCodeMemory(); + } + ReleasePragmaMemPool(); + delete memPool; + delete binMplt; +} + +MemPool *MIRModule::CurFuncCodeMemPool() const +{ + if (useFuncCodeMemPoolTmp) { + return CurFunction()->GetCodeMemPoolTmp(); + } + return CurFunction()->GetCodeMemPool(); +} + +MapleAllocator *MIRModule::CurFuncCodeMemPoolAllocator() const +{ + MIRFunction *curFunc = CurFunction(); + CHECK_FATAL(curFunc != nullptr, "curFunction is null"); + return &curFunc->GetCodeMempoolAllocator(); +} + +MapleAllocator &MIRModule::GetCurFuncCodeMPAllocator() const +{ + MIRFunction *curFunc = CurFunction(); + CHECK_FATAL(curFunc != nullptr, "curFunction is null"); + return curFunc->GetCodeMPAllocator(); +} + +void MIRModule::AddExternStructType(TyIdx tyIdx) +{ + (void)externStructTypeSet.insert(tyIdx); +} + +void MIRModule::AddExternStructType(const MIRType *t) +{ + DEBUG_ASSERT(t != nullptr, "MIRType is null"); + (void)externStructTypeSet.insert(t->GetTypeIndex()); +} + +void MIRModule::AddSymbol(StIdx stIdx) +{ + auto it = symbolSet.find(stIdx); + if (it == symbolSet.end()) { + symbolDefOrder.push_back(stIdx); + } + (void)symbolSet.insert(stIdx); +} + +void MIRModule::AddSymbol(const MIRSymbol *s) +{ + DEBUG_ASSERT(s != nullptr, "s is null"); + AddSymbol(s->GetStIdx()); +} + +void MIRModule::DumpGlobals(bool emitStructureType) const +{ + if (flavor != kFlavorUnknown) { + LogInfo::MapleLogger() << "flavor " << flavor << '\n'; + } + if (srcLang != kSrcLangUnknown) { + LogInfo::MapleLogger() << "srclang " << srcLang << '\n'; + } + LogInfo::MapleLogger() << "id " << id << '\n'; + if (globalMemSize != 0) { + LogInfo::MapleLogger() << "globalmemsize " << globalMemSize << '\n'; + } + if (globalBlkMap != nullptr) { + LogInfo::MapleLogger() << "globalmemmap = [ "; + auto *p = reinterpret_cast(globalBlkMap); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(globalBlkMap + globalMemSize)) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + p++; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + if (globalWordsTypeTagged != nullptr) { + LogInfo::MapleLogger() << "globalwordstypetagged = [ "; + auto *p = reinterpret_cast(globalWordsTypeTagged); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(globalWordsTypeTagged + BlockSize2BitVectorSize(globalMemSize))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + if (globalWordsRefCounted != nullptr) { + LogInfo::MapleLogger() << "globalwordsrefcounted = [ "; + auto *p = reinterpret_cast(globalWordsRefCounted); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(globalWordsRefCounted + BlockSize2BitVectorSize(globalMemSize))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + LogInfo::MapleLogger() << "numfuncs " << numFuncs << '\n'; + if (!importFiles.empty()) { + // Output current module's mplt on top, imported ones at below + for (auto it = importFiles.rbegin(); it != importFiles.rend(); ++it) { + LogInfo::MapleLogger() << "import \"" << GlobalTables::GetStrTable().GetStringFromStrIdx(*it) << "\"\n"; + } + } + if (!importPaths.empty()) { + size_t size = importPaths.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << "importpath \"" << GlobalTables::GetStrTable().GetStringFromStrIdx(importPaths[i]) + << "\"\n"; + } + } + if (!asmDecls.empty()) { + size_t size = asmDecls.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << "asmdecl "; + EmitStr(asmDecls[i]); + } + } + if (entryFuncName.length()) { + LogInfo::MapleLogger() << "entryfunc &" << entryFuncName << '\n'; + } + if (!fileInfo.empty()) { + LogInfo::MapleLogger() << "fileinfo {\n"; + size_t size = fileInfo.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << " @" << GlobalTables::GetStrTable().GetStringFromStrIdx(fileInfo[i].first) + << " "; + if (!fileInfoIsString[i]) { + LogInfo::MapleLogger() << "0x" << std::hex << fileInfo[i].second; + } else { + LogInfo::MapleLogger() << "\"" + << GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(fileInfo[i].second)) + << "\""; + } + if (i < size - 1) { + LogInfo::MapleLogger() << ",\n"; + } else { + LogInfo::MapleLogger() << "}\n"; + } + } + LogInfo::MapleLogger() << std::dec; + } + if (!srcFileInfo.empty()) { + LogInfo::MapleLogger() << "srcfileinfo {\n"; + size_t size = srcFileInfo.size(); + size_t i = 0; + for (auto infoElem : srcFileInfo) { + LogInfo::MapleLogger() << " " << infoElem.second; + LogInfo::MapleLogger() << " \"" << GlobalTables::GetStrTable().GetStringFromStrIdx(infoElem.first) << "\""; + if (i++ < size - 1) { + LogInfo::MapleLogger() << ",\n"; + } else { + LogInfo::MapleLogger() << "}\n"; + } + } + } + if (!fileData.empty()) { + LogInfo::MapleLogger() << "filedata {\n"; + size_t size = fileData.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << " @" << GlobalTables::GetStrTable().GetStringFromStrIdx(fileData[i].first) + << " "; + size_t dataSize = fileData[i].second.size(); + for (size_t j = 0; j < dataSize; ++j) { + uint8 data = fileData[i].second[j]; + LogInfo::MapleLogger() << "0x" << std::hex << static_cast(data); + if (j < dataSize - 1) { + LogInfo::MapleLogger() << ' '; + } + } + if (i < size - 1) { + LogInfo::MapleLogger() << ",\n"; + } else { + LogInfo::MapleLogger() << "}\n"; + } + } + LogInfo::MapleLogger() << std::dec; + } + if (flavor < kMmpl || flavor == kFlavorLmbc) { + for (auto it = typeDefOrder.begin(); it != typeDefOrder.end(); ++it) { + TyIdx tyIdx = typeNameTab->GetTyIdxFromGStrIdx(*it); + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(*it); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DEBUG_ASSERT(type != nullptr, "type should not be nullptr here"); + bool isStructType = type->IsStructType(); + if (isStructType) { + auto *structType = static_cast(type); + // still emit what in extern_structtype_set_ + if (!emitStructureType && + externStructTypeSet.find(structType->GetTypeIndex()) == externStructTypeSet.end()) { + continue; + } + if (structType->IsImported()) { + continue; + } + } + + LogInfo::MapleLogger() << "type $" << name << " "; + if (type->GetKind() == kTypeByName) { + LogInfo::MapleLogger() << "void"; + } else if (type->GetNameStrIdx() == *it) { + type->Dump(1, true); + } else { + type->Dump(1); + } + LogInfo::MapleLogger() << '\n'; + } + if (someSymbolNeedForwDecl) { + // an extra pass thru the global symbol table to print forward decl + for (auto sit = symbolSet.begin(); sit != symbolSet.end(); ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx((*sit).Idx()); + if (s->IsNeedForwDecl()) { + s->Dump(false, 0, true); + } + } + } + // dump javaclass and javainterface first + for (auto sit = symbolDefOrder.begin(); sit != symbolDefOrder.end(); ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx((*sit).Idx()); + DEBUG_ASSERT(s != nullptr, "null ptr check"); + if (!s->IsJavaClassInterface()) { + continue; + } + // Verify: all wpofake variables should have been deleted from globaltable + if (!s->IsDeleted()) { + s->Dump(false, 0); + } + } + for (auto sit = symbolDefOrder.begin(); sit != symbolDefOrder.end(); ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx((*sit).Idx()); + CHECK_FATAL(s != nullptr, "nullptr check"); + if (s->IsJavaClassInterface()) { + continue; + } + if (!s->IsDeleted() && !s->GetIsImported() && !s->GetIsImportedDecl()) { + s->Dump(false, 0); + } + } + } +} + +void MIRModule::Dump(bool emitStructureType, const std::unordered_set *dumpFuncSet) const +{ + DumpGlobals(emitStructureType); + DumpFunctionList(dumpFuncSet); +} + +void MIRModule::DumpGlobalArraySymbol() const +{ + for (StIdx stIdx : symbolSet) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + DEBUG_ASSERT(symbol != nullptr, "null ptr check"); + MIRType *symbolType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(symbol->GetTyIdx()); + DEBUG_ASSERT(symbolType != nullptr, "null ptr check"); + if (symbolType == nullptr || symbolType->GetKind() != kTypeArray) { + continue; + } + symbol->Dump(false, 0); + } +} + +void MIRModule::Emit(const std::string &outFileName) const +{ + std::ofstream file; + // Change cout's buffer to file. + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(file.rdbuf()); + file.open(outFileName, std::ios::trunc); + DumpGlobals(); + for (MIRFunction *mirFunc : functionList) { + mirFunc->Dump(); + } + // Restore cout's buffer. + LogInfo::MapleLogger().rdbuf(backup); + file.close(); +} + +void MIRModule::DumpFunctionList(const std::unordered_set *dumpFuncSet) const +{ + for (MIRFunction *mirFunc : functionList) { + if (dumpFuncSet == nullptr || dumpFuncSet->empty()) { + mirFunc->Dump(); + } else { // dump only if this func matches any name in *dumpFuncSet + const std::string &name = mirFunc->GetName(); + bool matched = false; + for (std::string elem : *dumpFuncSet) { + if (name.find(elem.c_str()) != std::string::npos) { + matched = true; + break; + } + } + if (matched) { + mirFunc->Dump(); + } + } + } +} + +void MIRModule::OutputFunctionListAsciiMpl(const std::string &phaseName) +{ + std::string fileStem; + std::string::size_type lastDot = fileName.find_last_of('.'); + if (lastDot == std::string::npos) { + fileStem = fileName.append(phaseName); + } else { + fileStem = fileName.substr(0, lastDot).append(phaseName); + } + std::string outfileName; + if (flavor >= kMmpl) { + outfileName = fileStem.append(".mmpl"); + } else { + outfileName = fileStem.append(".mpl"); + } + std::ofstream mplFile; + mplFile.open(outfileName, std::ios::app); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mplFile.rdbuf()); // change cout's buffer to that of file + DumpGlobalArraySymbol(); + DumpFunctionList(nullptr); + LogInfo::MapleLogger().rdbuf(backup); // restore cout's buffer + mplFile.close(); +} + +void MIRModule::DumpToFile(const std::string &fileNameStr, bool emitStructureType) const +{ + std::ofstream file; + file.open(fileNameStr, std::ios::trunc); + if (!file.is_open()) { + ERR(kLncErr, "Cannot open %s", fileNameStr.c_str()); + return; + } + // Change cout's buffer to file. + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(file.rdbuf()); + Dump(emitStructureType); + // Restore cout's buffer. + LogInfo::MapleLogger().rdbuf(backup); + file.close(); +} + +void MIRModule::DumpDefType() +{ + for (auto it = typeDefOrder.begin(); it != typeDefOrder.end(); ++it) { + TyIdx tyIdx = typeNameTab->GetTyIdxFromGStrIdx(*it); + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(*it); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DEBUG_ASSERT(type != nullptr, "type should not be nullptr here"); + bool isStructType = type->IsStructType(); + if (isStructType) { + auto *structType = static_cast(type); + if (structType->IsImported()) { + continue; + } + } + LogInfo::MapleLogger() << "type $" << name << " "; + if (type->GetKind() == kTypeByName) { + LogInfo::MapleLogger() << "void"; + } else if (type->GetNameStrIdx() == *it) { + type->Dump(1, true); + } else { + type->Dump(1); + } + LogInfo::MapleLogger() << '\n'; + } +} + +void MIRModule::DumpInlineCandidateToFile(const std::string &fileNameStr) +{ + if (optimizedFuncs.empty()) { + return; + } + std::ofstream file; + // Change cout's buffer to file. + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(file.rdbuf()); + file.open(fileNameStr, std::ios::trunc); + if (IsCModule()) { + DumpDefType(); + } + // dump global variables needed for inlining file + for (auto symbolIdx : inliningGlobals) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolIdx); + DEBUG_ASSERT(s != nullptr, "null ptr check"); + if (s->GetStorageClass() == kScFstatic) { + if (s->IsNeedForwDecl()) { + // const string, including initialization + s->Dump(false, 0, false); + } + } + } + for (auto symbolIdx : inliningGlobals) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolIdx); + DEBUG_ASSERT(s != nullptr, "null ptr check"); + MIRStorageClass sc = s->GetStorageClass(); + if (s->GetStorageClass() == kScFstatic) { + if (!s->IsNeedForwDecl()) { + // const string, including initialization + s->Dump(false, 0, false); + } + } else if (s->GetSKind() == kStFunc) { + s->GetFunction()->Dump(true); + } else { + // static fields as extern + s->SetStorageClass(kScExtern); + s->Dump(false, 0, true); + } + s->SetStorageClass(sc); + } + for (auto *func : optimizedFuncs) { + func->SetWithLocInfo(false); + func->Dump(); + } + // Restore cout's buffer. + LogInfo::MapleLogger().rdbuf(backup); + file.close(); +} + +// This is not efficient. Only used in debug mode for now. +const std::string &MIRModule::GetFileNameFromFileNum(uint32 fileNum) const +{ + GStrIdx nameIdx(0); + for (auto &info : srcFileInfo) { + if (info.second == fileNum) { + nameIdx = info.first; + break; + } + } + return GlobalTables::GetStrTable().GetStringFromStrIdx(nameIdx); +} + +void MIRModule::DumpToHeaderFile(bool binaryMplt, const std::string &outputName) +{ + std::string outfileName; + std::string fileNameLocal = !outputName.empty() ? outputName : fileName; + std::string::size_type lastDot = fileNameLocal.find_last_of('.'); + if (lastDot == std::string::npos) { + outfileName = fileNameLocal.append(".mplt"); + } else { + outfileName = fileNameLocal.substr(0, lastDot).append(".mplt"); + } + if (binaryMplt) { + BinaryMplt binaryMpltTmp(*this); + binaryMpltTmp.Export(outfileName); + } else { + std::ofstream mpltFile; + mpltFile.open(outfileName, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mpltFile.rdbuf()); // change cout's buffer to that of file + for (std::pair entity : GlobalTables::GetConstPool().GetConstU16StringPool()) { + LogInfo::MapleLogger() << "var $"; + entity.second->DumpAsLiteralVar(); + LogInfo::MapleLogger() << '\n'; + } + for (auto it = classList.begin(); it != classList.end(); ++it) { + TyIdx curTyIdx(*it); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curTyIdx); + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(type->GetNameStrIdx()); + if (type->GetKind() == kTypeClass || type->GetKind() == kTypeInterface) { + auto *structType = static_cast(type); + // skip imported class/interface and incomplete types + if (!structType->IsImported() && !structType->IsIncomplete()) { + LogInfo::MapleLogger() << "type $" << name << " "; + type->Dump(1, true); + LogInfo::MapleLogger() << '\n'; + } + } + } + /* restore cout */ + LogInfo::MapleLogger().rdbuf(backup); + mpltFile.close(); + } +} + +/* + We use MIRStructType (kTypeStruct) to represent C/C++ structs + as well as C++ classes. + + We use MIRClassType (kTypeClass) to represent Java classes, specifically. + MIRClassType has parents which encode Java class's parent (exploiting + the fact Java classes have at most one parent class. + */ +void MIRModule::DumpTypeTreeToCxxHeaderFile(MIRType &ty, std::unordered_set &dumpedClasses) const +{ + if (dumpedClasses.find(&ty) != dumpedClasses.end()) { + return; + } + // first, insert ty to the dumped_classes to prevent infinite recursion + (void)dumpedClasses.insert(&ty); + DEBUG_ASSERT(ty.GetKind() == kTypeClass || ty.GetKind() == kTypeStruct || ty.GetKind() == kTypeUnion || + ty.GetKind() == kTypeInterface, + "Unexpected MIRType."); + /* No need to emit interfaces; because "interface variables are + final and static by default and methods are public and abstract" + */ + if (ty.GetKind() == kTypeInterface) { + return; + } + // dump all of its parents + if (IsJavaModule()) { + DEBUG_ASSERT(ty.GetKind() != kTypeStruct, "type is not supposed to be struct"); + DEBUG_ASSERT(ty.GetKind() != kTypeUnion, "type is not supposed to be union"); + DEBUG_ASSERT(ty.GetKind() != kTypeInterface, "type is not supposed to be interface"); + } else if (srcLang == kSrcLangC || srcLang == kSrcLangCPlusPlus) { + DEBUG_ASSERT((ty.GetKind() == kTypeStruct || ty.GetKind() == kTypeUnion), + "type should be either struct or union"); + } else { + DEBUG_ASSERT(false, "source languages other than C/C++ are not supported yet"); + } + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(ty.GetNameStrIdx()); + if (IsJavaModule()) { + // Java class has at most one parent + auto &classType = static_cast(ty); + MIRClassType *parentType = nullptr; + // find parent and generate its type as well as those of its ancestors + if (classType.GetParentTyIdx() != 0u /* invalid type idx */) { + parentType = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(classType.GetParentTyIdx())); + CHECK_FATAL(parentType != nullptr, "nullptr check"); + DumpTypeTreeToCxxHeaderFile(*parentType, dumpedClasses); + } + LogInfo::MapleLogger() << "struct " << name << " "; + if (parentType != nullptr) { + LogInfo::MapleLogger() << ": " << parentType->GetName() << " "; + } + if (!classType.IsIncomplete()) { + /* dump class type; it will dump as '{ ... }' */ + classType.DumpAsCxx(1); + LogInfo::MapleLogger() << ";\n"; + } else { + LogInfo::MapleLogger() << " /* incomplete type */\n"; + } + } else if (srcLang == kSrcLangC || srcLang == kSrcLangCPlusPlus) { + // how to access parent fields???? + DEBUG_ASSERT(false, "not yet implemented"); + } +} + +void MIRModule::DumpToCxxHeaderFile(std::set &leafClasses, const std::string &pathToOutf) const +{ + std::ofstream mpltFile; + mpltFile.open(pathToOutf, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mpltFile.rdbuf()); // change cout's buffer to that of file + char *headerGuard = strdup(pathToOutf.c_str()); + CHECK_FATAL(headerGuard != nullptr, "strdup failed"); + for (char *p = headerGuard; *p; ++p) { + if (!isalnum(*p)) { + *p = '_'; + } else if (isalpha(*p) && islower(*p)) { + *p = toupper(*p); + } + } + // define a hash table + std::unordered_set dumpedClasses; + const char *prefix = "__SRCLANG_UNKNOWN_"; + if (IsJavaModule()) { + prefix = "__SRCLANG_JAVA_"; + } else if (srcLang == kSrcLangC || srcLang == kSrcLangCPlusPlus) { + prefix = "__SRCLANG_CXX_"; + } + LogInfo::MapleLogger() << "#ifndef " << prefix << headerGuard << "__\n"; + LogInfo::MapleLogger() << "#define " << prefix << headerGuard << "__\n"; + LogInfo::MapleLogger() << "/* this file is compiler-generated; do not edit */\n\n"; + LogInfo::MapleLogger() << "#include \n"; + LogInfo::MapleLogger() << "#include \n"; + for (auto &s : leafClasses) { + CHECK_FATAL(!s.empty(), "string is null"); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(s); + TyIdx tyIdx = typeNameTab->GetTyIdxFromGStrIdx(strIdx); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (ty == nullptr) { + continue; + } + DEBUG_ASSERT(ty->GetKind() == kTypeClass || ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion || + ty->GetKind() == kTypeInterface, + ""); + DumpTypeTreeToCxxHeaderFile(*ty, dumpedClasses); + } + LogInfo::MapleLogger() << "#endif /* " << prefix << headerGuard << "__ */\n"; + /* restore cout */ + LogInfo::MapleLogger().rdbuf(backup); + free(headerGuard); + headerGuard = nullptr; + mpltFile.close(); +} + +void MIRModule::DumpClassToFile(const std::string &path) const +{ + std::string strPath(path); + strPath.append("/"); + for (auto it : typeNameTab->GetGStrIdxToTyIdxMap()) { + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(it.first); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(it.second); + std::string outClassFile(name); + /* replace class name / with - */ + std::replace(outClassFile.begin(), outClassFile.end(), '/', '-'); + (void)outClassFile.insert(0, strPath); + outClassFile.append(".mpl"); + std::ofstream mplFile; + mplFile.open(outClassFile, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mplFile.rdbuf()); + /* dump class type */ + LogInfo::MapleLogger() << "type $" << name << " "; + if (type->GetNameStrIdx() == it.first && type->GetKind() != kTypeByName) { + type->Dump(1, true); + } else { + type->Dump(1); + } + LogInfo::MapleLogger() << '\n'; + /* restore cout */ + LogInfo::MapleLogger().rdbuf(backup); + mplFile.close(); + ; + } +} + +MIRFunction *MIRModule::FindEntryFunction() +{ + for (MIRFunction *currFunc : functionList) { + if (currFunc->GetName() == entryFuncName) { + entryFunc = currFunc; + return currFunc; + } + } + return nullptr; +} + +// given the phase name (including '.' at beginning), output the program in the +// module to the file with given file suffix, and file stem from +// this->fileName appended with phaseName +void MIRModule::OutputAsciiMpl(const char *phaseName, const char *suffix, + const std::unordered_set *dumpFuncSet, bool emitStructureType, + bool binaryform) +{ + DEBUG_ASSERT(!(emitStructureType && binaryform), "Cannot emit type info in .bpl"); + std::string fileStem; + std::string::size_type lastDot = fileName.find_last_of('.'); + if (lastDot == std::string::npos) { + fileStem = fileName.append(phaseName); + } else { + fileStem = fileName.substr(0, lastDot).append(phaseName); + } + std::string outfileName; + outfileName = fileStem + suffix; + if (!binaryform) { + std::ofstream mplFile; + mplFile.open(outfileName, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mplFile.rdbuf()); // change LogInfo::MapleLogger()'s buffer to that of file + Dump(emitStructureType, dumpFuncSet); + LogInfo::MapleLogger().rdbuf(backup); // restore LogInfo::MapleLogger()'s buffer + mplFile.close(); + } else { + BinaryMplt binaryMplt(*this); + binaryMplt.GetBinExport().not2mplt = true; + binaryMplt.Export(outfileName); + } + std::ofstream mplFile; + mplFile.open(outfileName, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mplFile.rdbuf()); // change cout's buffer to that of file + Dump(emitStructureType); + if (withDbgInfo) { + dbgInfo->Dump(0); + } + LogInfo::MapleLogger().rdbuf(backup); // restore cout's buffer + mplFile.close(); +} + +uint32 MIRModule::GetFileinfo(GStrIdx strIdx) const +{ + for (auto &infoElem : fileInfo) { + if (infoElem.first == strIdx) { + return infoElem.second; + } + } + DEBUG_ASSERT(false, "should not be here"); + return 0; +} + +std::string MIRModule::GetFileNameAsPostfix() const +{ + std::string fileNameStr = namemangler::kFileNameSplitterStr; + if (!fileInfo.empty()) { + // option 1: file name in INFO + uint32 fileNameIdx = GetFileinfo(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("INFO_filename")); + fileNameStr += GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(fileNameIdx)); + } else { + // option 2: src file name removing ext name. + if (GetSrcFileInfo().size() != 0) { + GStrIdx idx = GetSrcFileInfo()[0].first; + const std::string kStr = GlobalTables::GetStrTable().GetStringFromStrIdx(idx); + DEBUG_ASSERT(kStr.find_last_of('.') != kStr.npos, "not found ."); + fileNameStr += kStr.substr(0, kStr.find_last_of('.')); + } else { + DEBUG_ASSERT(0, "No fileinfo and no srcfileinfo in mpl file"); + } + } + for (char &c : fileNameStr) { + if (!isalpha(c) && !isdigit(c) && c != '_' && c != '$') { + c = '_'; + } + } + return fileNameStr; +} + +void MIRModule::AddClass(TyIdx tyIdx) +{ + (void)classList.insert(tyIdx); +} + +void MIRModule::RemoveClass(TyIdx tyIdx) +{ + (void)classList.erase(tyIdx); +} + +#endif // MIR_FEATURE_FULL +void MIRModule::ReleaseCurFuncMemPoolTmp() +{ + CurFunction()->ReleaseMemory(); +} + +void MIRModule::SetFuncInfoPrinted() const +{ + CurFunction()->SetInfoPrinted(); +} + +void MIRModule::InitPartO2List(const std::string &list) +{ + if (list.empty()) { + return; + } + SetHasPartO2List(true); + std::ifstream infile(list); + if (!infile.is_open()) { + LogInfo::MapleLogger(kLlErr) << "Cannot open partO2 function list file " << list << '\n'; + return; + } + std::string str; + + while (getline(infile, str)) { + if (str.empty()) { + continue; + } + GStrIdx funcStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str); + partO2FuncList.insert(funcStrIdx); + } + infile.close(); +} + +bool MIRModule::HasNotWarned(uint32 position, uint32 stmtOriginalID) +{ + auto warnedOp = safetyWarningMap.find(position); + if (warnedOp == safetyWarningMap.end()) { + MapleSet opSet(memPoolAllocator.Adapter()); + opSet.emplace(stmtOriginalID); + safetyWarningMap.emplace(std::pair>(position, std::move(opSet))); + return true; + } + if (warnedOp->second.find(stmtOriginalID) == warnedOp->second.end()) { + warnedOp->second.emplace(stmtOriginalID); + return true; + } + return false; +} +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/mir_nodes.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/mir_nodes.cpp new file mode 100755 index 0000000000000000000000000000000000000000..1e74453a2a1cf9868c609ccb6ac314ca43be6264 --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/mir_nodes.cpp @@ -0,0 +1,2847 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mir_nodes.h" + +#include +#include + +#include "maple_string.h" +#include "mir_function.h" +#include "namemangler.h" +#include "opcode_info.h" +#include "printing.h" +#include "utils.h" +#include "verification.h" + +namespace maple { +MIRModule *theMIRModule = nullptr; +std::atomic StmtNode::stmtIDNext(1); // 0 is reserved +uint32 StmtNode::lastPrintedLineNum = 0; +uint16 StmtNode::lastPrintedColumnNum = 0; +const int32 CondGotoNode::probAll = 10000; + +const char *GetIntrinsicName(MIRIntrinsicID intrn) +{ + switch (intrn) { + default: +#define DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ...) \ + case INTRN_##STR: \ + return #STR; +#include "intrinsics.def" +#undef DEF_MIR_INTRINSIC + } +} + +const char *BaseNode::GetOpName() const +{ + return kOpcodeInfo.GetTableItemAt(GetOpCode()).name.c_str(); +} + +bool BaseNode::MayThrowException() +{ + if (kOpcodeInfo.MayThrowException(GetOpCode())) { + if (GetOpCode() != OP_array) { + return true; + } + auto *arry = static_cast(this); + if (arry->GetBoundsCheck()) { + return true; + } + } else if (GetOpCode() == OP_intrinsicop) { + auto *inNode = static_cast(this); + if (inNode->GetIntrinsic() == INTRN_JAVA_ARRAY_LENGTH) { + return true; + } + } + for (size_t i = 0; i < NumOpnds(); ++i) { + if (Opnd(i)->MayThrowException()) { + return true; + } + } + return false; +} + +bool AddrofNode::CheckNode(const MIRModule &mod) const +{ + const MIRSymbol *st = mod.CurFunction()->GetLocalOrGlobalSymbol(GetStIdx()); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + MIRType *ty = st->GetType(); + switch (ty->GetKind()) { + case kTypeScalar: { +#ifdef DYNAMICLANG + if (GetPrimType() == PTY_dynany) { + return true; + } + return IsPrimitiveScalar(GetPrimType()); +#else + return IsPrimitiveScalar(GetPrimType()); +#endif + } + case kTypeArray: { + return GetPrimType() == PTY_agg; + } + case kTypeUnion: + case kTypeStruct: + case kTypeStructIncomplete: { + if (GetFieldID() == 0) { + return GetPrimType() == PTY_agg; + } + auto *structType = static_cast(ty); + TyIdx fTyIdx = structType->GetFieldTyIdx(fieldID); + MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fTyIdx); + MIRTypeKind subKind = subType->GetKind(); + return (subKind == kTypeBitField && VerifyPrimType(subType->GetPrimType(), GetPrimType())) || + (subKind == kTypeScalar && IsPrimitiveScalar(GetPrimType())) || + (subKind == kTypePointer && IsPrimitivePoint(GetPrimType())) || + (subKind == kTypeStruct && GetPrimType() == PTY_agg) || (fTyIdx != 0u && GetPrimType() == PTY_agg); + } + case kTypeClass: + case kTypeClassIncomplete: { + if (fieldID == 0) { + return GetPrimType() == PTY_agg; + } + auto *classType = static_cast(ty); + MIRType *subType = classType->GetFieldType(fieldID); + MIRTypeKind subKind = subType->GetKind(); + return (subKind == kTypeBitField && VerifyPrimType(subType->GetPrimType(), GetPrimType())) || + (subKind == kTypeScalar && IsPrimitiveScalar(GetPrimType())) || + (subKind == kTypePointer && IsPrimitivePoint(GetPrimType())) || + (subKind == kTypeStruct && GetPrimType() == PTY_agg); + } + case kTypeInterface: + case kTypeInterfaceIncomplete: { + if (fieldID == 0) { + return GetPrimType() == PTY_agg; + } + auto *interfaceType = static_cast(ty); + MIRType *subType = interfaceType->GetFieldType(fieldID); + MIRTypeKind subKind = subType->GetKind(); + return (subKind == kTypeBitField && VerifyPrimType(subType->GetPrimType(), GetPrimType())) || + (subKind == kTypeScalar && IsPrimitiveScalar(GetPrimType())) || + (subKind == kTypePointer && IsPrimitivePoint(GetPrimType())) || + (subKind == kTypeStruct && GetPrimType() == PTY_agg); + } + case kTypePointer: + return IsPrimitivePoint(GetPrimType()); + case kTypeParam: + case kTypeGenericInstant: + return true; + default: + return false; + } +} + +MIRType *IreadNode::GetType() const +{ + MIRPtrType *ptrtype = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)); + if (fieldID == 0) { + return ptrtype->GetPointedType(); + } + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrtype->GetPointedTyIdxWithFieldID(fieldID)); +} + +bool IreadNode::IsVolatile() const +{ + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DEBUG_ASSERT(type != nullptr, "null ptr check"); + DEBUG_ASSERT(type->IsMIRPtrType(), "type of iread should be pointer type"); + return static_cast(type)->IsPointedTypeVolatile(fieldID); +} + +bool AddrofNode::IsVolatile(const MIRModule &mod) const +{ + auto *symbol = mod.CurFunction()->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(symbol != nullptr, "null ptr check on symbol"); + return symbol->IsVolatile(); +} + +bool DreadoffNode::IsVolatile(const MIRModule &mod) const +{ + auto *symbol = mod.CurFunction()->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(symbol != nullptr, "null ptr check on symbol"); + return symbol->IsVolatile(); +} + +bool DassignNode::AssigningVolatile(const MIRModule &mod) const +{ + auto *symbol = mod.CurFunction()->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(symbol != nullptr, "null ptr check on symbol"); + return symbol->IsVolatile(); +} + +bool IassignNode::AssigningVolatile() const +{ + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DEBUG_ASSERT(type != nullptr, "null ptr check"); + DEBUG_ASSERT(type->IsMIRPtrType(), "type of iassign should be pointer type"); + return static_cast(type)->IsPointedTypeVolatile(fieldID); +} + +void BlockNode::AddStatement(StmtNode *stmt) +{ + DEBUG_ASSERT(stmt != nullptr, "null ptr check"); + stmtNodeList.push_back(stmt); +} + +void BlockNode::AppendStatementsFromBlock(BlockNode &blk) +{ + if (blk.GetStmtNodes().empty()) { + return; + } + stmtNodeList.splice(stmtNodeList.end(), blk.GetStmtNodes()); +} + +/// Insert stmt as the first +void BlockNode::InsertFirst(StmtNode *stmt) +{ + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmtNodeList.push_front(stmt); +} + +/// Insert stmt as the last +void BlockNode::InsertLast(StmtNode *stmt) +{ + DEBUG_ASSERT(stmt != nullptr, "stmt is null"); + stmtNodeList.push_back(stmt); +} + +void BlockNode::ReplaceStmtWithBlock(StmtNode &stmtNode, BlockNode &blk) +{ + stmtNodeList.splice(&stmtNode, blk.GetStmtNodes()); + stmtNodeList.erase(&stmtNode); + stmtNode.SetNext(blk.GetLast()->GetNext()); +} + +void BlockNode::ReplaceStmt1WithStmt2(const StmtNode *stmtNode1, StmtNode *stmtNode2) +{ + if (stmtNode2 == stmtNode1) { + // do nothing + } else if (stmtNode2 == nullptr) { + // delete stmtNode1 + stmtNodeList.erase(stmtNode1); + } else { + // replace stmtNode1 with stmtNode2 + stmtNodeList.insert(stmtNode1, stmtNode2); + (void)stmtNodeList.erase(stmtNode1); + } +} + +// remove sstmtNode1 from block +void BlockNode::RemoveStmt(const StmtNode *stmtNode1) +{ + DEBUG_ASSERT(stmtNode1 != nullptr, "delete a null stmtment"); + (void)stmtNodeList.erase(stmtNode1); +} + +/// Insert stmtNode2 before stmtNode1 in current block. +void BlockNode::InsertBefore(const StmtNode *stmtNode1, StmtNode *stmtNode2) +{ + stmtNodeList.insert(stmtNode1, stmtNode2); +} + +/// Insert stmtNode2 after stmtNode1 in current block. +void BlockNode::InsertAfter(const StmtNode *stmtNode1, StmtNode *stmtNode2) +{ + stmtNodeList.insertAfter(stmtNode1, stmtNode2); +} + +// insert all the stmts in inblock to the current block after stmt1 +void BlockNode::InsertBlockAfter(BlockNode &inblock, const StmtNode *stmt1) +{ + DEBUG_ASSERT(stmt1 != nullptr, "null ptr check"); + DEBUG_ASSERT(!inblock.IsEmpty(), "NYI"); + stmtNodeList.splice(stmt1, inblock.GetStmtNodes()); +} + +BlockNode *BlockNode::CloneTreeWithFreqs(MapleAllocator &allocator, std::unordered_map &toFreqs, + std::unordered_map &fromFreqs, uint64_t numer, + uint64_t denom, uint32_t updateOp) +{ + auto *nnode = allocator.GetMemPool()->New(); + nnode->SetStmtID(stmtIDNext++); + if (fromFreqs.count(GetStmtID()) > 0) { + uint64_t oldFreq = fromFreqs[GetStmtID()]; + uint64_t newFreq; + if (updateOp & kUpdateUnrollRemainderFreq) { + newFreq = denom > 0 ? (oldFreq * numer % denom) : oldFreq; + } else { + newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / denom) : oldFreq); + } + toFreqs[nnode->GetStmtID()] = (newFreq > 0 || (numer == 0)) ? newFreq : 1; + if (updateOp & kUpdateOrigFreq) { // upateOp & 1 : update from + int64_t left = ((oldFreq - newFreq) > 0 || (oldFreq == 0)) ? (oldFreq - newFreq) : 1; + fromFreqs[GetStmtID()] = static_cast(left); + } + } + for (auto &stmt : stmtNodeList) { + StmtNode *newStmt; + if (stmt.GetOpCode() == OP_block) { + newStmt = static_cast( + (static_cast(&stmt)) + ->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + } else if (stmt.GetOpCode() == OP_if) { + newStmt = static_cast( + (static_cast(&stmt)) + ->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + } else if (stmt.GetOpCode() == OP_while) { + newStmt = static_cast( + (static_cast(&stmt)) + ->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + } else if (stmt.GetOpCode() == OP_doloop) { + newStmt = static_cast( + (static_cast(&stmt)) + ->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + } else { + newStmt = static_cast(stmt.CloneTree(allocator)); + if (fromFreqs.count(stmt.GetStmtID()) > 0) { + uint64_t oldFreq = fromFreqs[stmt.GetStmtID()]; + uint64_t newFreq; + if (updateOp & kUpdateUnrollRemainderFreq) { + newFreq = denom > 0 ? (oldFreq * numer % denom) : oldFreq; + } else { + newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / denom) : oldFreq); + } + toFreqs[newStmt->GetStmtID()] = + (newFreq > 0 || oldFreq == 0 || numer == 0) ? static_cast(newFreq) : 1; + if (updateOp & kUpdateOrigFreq) { + int64_t left = ((oldFreq - newFreq) > 0 || oldFreq == 0) ? (oldFreq - newFreq) : 1; + fromFreqs[stmt.GetStmtID()] = static_cast(left); + } + } + } + DEBUG_ASSERT(newStmt != nullptr, "null ptr check"); + newStmt->SetSrcPos(stmt.GetSrcPos()); + newStmt->SetPrev(nullptr); + newStmt->SetNext(nullptr); + nnode->AddStatement(newStmt); + } + return nnode; +} + +void BaseNode::DumpBase(int32 indent) const +{ + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); +} + +void CatchNode::Dump(int32 indent) const +{ + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " {"; + size_t size = exceptionTyIdxVec.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(exceptionTyIdxVec[i])->Dump(indent + 1); + } + LogInfo::MapleLogger() << " }\n"; +} + +void CppCatchNode::Dump(int32 indent) const +{ + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetName(op); + if (exceptionTyIdx.GetIdx() != 0) { + LogInfo::MapleLogger() << " { "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(exceptionTyIdx)->Dump(indent + 1); + LogInfo::MapleLogger() << " }"; + } + LogInfo::MapleLogger() << std::endl; +} + +void UnaryNode::DumpOpnd(const MIRModule &, int32 indent) const +{ + DumpOpnd(indent); +} + +void UnaryNode::DumpOpnd(int32 indent) const +{ + LogInfo::MapleLogger() << " ("; + if (uOpnd != nullptr) { + uOpnd->Dump(indent); + } + LogInfo::MapleLogger() << ")"; +} + +void UnaryNode::Dump(int32 indent) const +{ + BaseNode::DumpBase(0); + DumpOpnd(*theMIRModule, indent); +} + +void TypeCvtNode::Dump(int32 indent) const +{ + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " "; + LogInfo::MapleLogger() << GetPrimTypeName(GetPrimType()) << " " << GetPrimTypeName(FromType()); + DumpOpnd(*theMIRModule, indent); +} + +void RetypeNode::Dump(int32 indent) const +{ + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " "; + LogInfo::MapleLogger() << GetPrimTypeName(GetPrimType()) << " "; + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (ty->GetKind() == kTypeScalar) { + LogInfo::MapleLogger() << "<"; + ty->Dump(indent + 1); + LogInfo::MapleLogger() << ">"; + } else { + ty->Dump(indent + 1); + } + DumpOpnd(*theMIRModule, indent); +} + +void ExtractbitsNode::Dump(int32 indent) const +{ + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + if (GetOpCode() == OP_extractbits) { + LogInfo::MapleLogger() << " " << static_cast(bitsOffset) << " " << static_cast(bitsSize); + } else { + LogInfo::MapleLogger() << " " << static_cast(bitsSize); + } + DumpOpnd(*theMIRModule, indent); +} + +void IreadNode::Dump(int32 indent) const +{ + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); + LogInfo::MapleLogger() << " " << fieldID; + DumpOpnd(*theMIRModule, indent); +} + +void IreadoffNode::Dump(int32 indent) const +{ + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " " << offset; + DumpOpnd(*theMIRModule, indent); +} + +void IreadFPoffNode::Dump(int32) const +{ + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " " << offset; +} + +void BinaryNode::Dump(int32 indent) const +{ + BaseNode::DumpBase(0); + BinaryOpnds::Dump(indent); +} + +void BinaryOpnds::Dump(int32 indent) const +{ + LogInfo::MapleLogger() << " ("; + if (bOpnd[0]->IsLeaf() && bOpnd[1]->IsLeaf()) { + bOpnd[0]->Dump(0); + LogInfo::MapleLogger() << ", "; + bOpnd[1]->Dump(0); + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + bOpnd[0]->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + bOpnd[1]->Dump(indent + 1); + } + LogInfo::MapleLogger() << ")"; +} + +void ResolveFuncNode::Dump(int32 indent) const +{ + BaseNode::DumpBase(0); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + LogInfo::MapleLogger() << " &" << func->GetName(); + BinaryOpnds::Dump(indent); +} + +void CompareNode::Dump(int32 indent) const +{ + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " " << GetPrimTypeName(opndType); + BinaryOpnds::Dump(indent); +} + +void DepositbitsNode::Dump(int32 indent) const +{ + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " " << static_cast(bitsOffset) << " " << static_cast(bitsSize) << " ("; + if (GetBOpnd(0)->IsLeaf() && GetBOpnd(1)->IsLeaf()) { + GetBOpnd(0)->Dump(0); + LogInfo::MapleLogger() << ", "; + GetBOpnd(1)->Dump(0); + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + GetBOpnd(0)->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + GetBOpnd(1)->Dump(indent + 1); + } + LogInfo::MapleLogger() << ")"; +} + +void TernaryNode::Dump(int32 indent) const +{ + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " ("; + if (topnd[kFirstOpnd]->IsLeaf() && topnd[kSecondOpnd]->IsLeaf() && topnd[kThirdOpnd]->IsLeaf()) { + topnd[kFirstOpnd]->Dump(0); + LogInfo::MapleLogger() << ", "; + topnd[kSecondOpnd]->Dump(0); + LogInfo::MapleLogger() << ", "; + topnd[kThirdOpnd]->Dump(0); + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + topnd[kFirstOpnd]->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + topnd[kSecondOpnd]->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + topnd[kThirdOpnd]->Dump(indent + 1); + } + LogInfo::MapleLogger() << ")"; +} + +void NaryOpnds::Dump(int32 indent) const +{ + LogInfo::MapleLogger() << " ("; + if (GetNopndSize() == 0) { + LogInfo::MapleLogger() << ")"; + return; + } + if (GetNopndSize() == 1) { + GetNopndAt(0)->Dump(indent); + } else { + bool allisLeaf = true; + for (size_t i = 0; i < GetNopndSize(); ++i) + if (!GetNopndAt(i)->IsLeaf()) { + allisLeaf = false; + break; + } + if (allisLeaf) { + GetNopndAt(0)->Dump(0); + for (size_t i = 1; i < GetNopndSize(); ++i) { + LogInfo::MapleLogger() << ", "; + GetNopndAt(i)->Dump(0); + } + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + GetNopndAt(0)->Dump(indent + 1); + for (size_t i = 1; i < GetNopndSize(); ++i) { + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + GetNopndAt(i)->Dump(indent + 1); + } + } + } + LogInfo::MapleLogger() << ")"; +} + +void DeoptBundleInfo::Dump(int32 indent) const +{ + size_t deoptBundleSize = deoptBundleInfo.size(); + if (deoptBundleSize == 0) { + return; + } + LogInfo::MapleLogger() << " deopt: ("; + bool isFirstItem = true; + for (const auto &elem : deoptBundleInfo) { + if (!isFirstItem) { + LogInfo::MapleLogger() << ", "; + } else { + isFirstItem = false; + } + LogInfo::MapleLogger() << elem.first << ": "; + auto valueKind = elem.second.GetMapleValueKind(); + if (valueKind == MapleValue::kPregKind) { + LogInfo::MapleLogger() << "%" << elem.second.GetPregIdx() << " "; + } else if (valueKind == MapleValue::kConstKind) { + if (elem.second.GetConstValue().GetKind() != kConstInt) { + CHECK_FATAL(false, "not supported currently"); + } + LogInfo::MapleLogger() << static_cast(elem.second.GetConstValue()).GetValue() << " "; + } + } + LogInfo::MapleLogger() << ")"; +} + +bool NaryOpnds::VerifyOpnds() const +{ + bool nOpndsVerify = true; + for (size_t i = 0; i < GetNopndSize(); ++i) { + if (!GetNopndAt(i)->Verify()) { + nOpndsVerify = false; + break; + } + } + return nOpndsVerify; +} + +void NaryNode::Dump(int32 indent) const +{ + BaseNode::DumpBase(0); + NaryOpnds::Dump(indent); +} + +const MIRType *ArrayNode::GetArrayType(const TypeTable &tt) const +{ + const MIRType *type = tt.GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(type->GetKind() == kTypePointer, "expect array type pointer"); + const auto *pointType = static_cast(type); + return tt.GetTypeFromTyIdx(pointType->GetPointedTyIdx()); +} +MIRType *ArrayNode::GetArrayType(const TypeTable &tt) +{ + return const_cast(const_cast(this)->GetArrayType(tt)); +} + +const BaseNode *ArrayNode::GetDim(const MIRModule &mod, TypeTable &tt, int i) const +{ + const auto *arrayType = static_cast(GetArrayType(tt)); + auto *mirConst = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(i, *tt.GetTypeFromTyIdx(arrayType->GetElemTyIdx())); + return mod.CurFuncCodeMemPool()->New(mirConst); +} +BaseNode *ArrayNode::GetDim(const MIRModule &mod, TypeTable &tt, int i) +{ + return const_cast(const_cast(this)->GetDim(mod, tt, i)); +} + +void ArrayNode::Dump(int32 indent) const +{ + PrintIndentation(0); + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " "; + if (boundsCheck) { + LogInfo::MapleLogger() << "1 "; + } else { + LogInfo::MapleLogger() << "0 "; + } + LogInfo::MapleLogger() << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); + NaryOpnds::Dump(indent); +} + +bool ArrayNode::IsSameBase(ArrayNode *arry) +{ + DEBUG_ASSERT(arry != nullptr, "null ptr check"); + if (arry == this) { + return true; + } + BaseNode *curBase = this->GetBase(); + BaseNode *otherBase = arry->GetBase(); + if (curBase->GetOpCode() != OP_addrof || otherBase->GetOpCode() != OP_addrof) { + return false; + } + return static_cast(curBase)->GetStIdx() == static_cast(otherBase)->GetStIdx(); +} + +void IntrinsicopNode::Dump(int32 indent) const +{ + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + if (GetOpCode() == OP_intrinsicopwithtype) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(indent + 1); + } + LogInfo::MapleLogger() << " " << GetIntrinsicName(GetIntrinsic()); + NaryOpnds::Dump(indent); +} + +void ConstvalNode::Dump(int32) const +{ + if (GetConstVal()->GetType().GetKind() != kTypePointer) { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + } + GetConstVal()->Dump(); +} + +void ConststrNode::Dump(int32) const +{ + BaseNode::DumpBase(0); + const std::string kStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(UStrIdx(strIdx)); + PrintString(kStr); +} + +void Conststr16Node::Dump(int32) const +{ + BaseNode::DumpBase(0); + const std::u16string kStr16 = GlobalTables::GetU16StrTable().GetStringFromStrIdx(U16StrIdx(strIdx)); + // UTF-16 string are dumped as UTF-8 string in mpl to keep the printable chars in ascii form + std::string str; + (void)namemangler::UTF16ToUTF8(str, kStr16); + PrintString(str); +} + +void SizeoftypeNode::Dump(int32) const +{ + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); +} + +void FieldsDistNode::Dump(int32) const +{ + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); + LogInfo::MapleLogger() << " " << fieldID1 << " " << fieldID2; +} + +void AddrofNode::Dump(int32) const +{ + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(GetStIdx()); + LogInfo::MapleLogger() << (GetStIdx().Islocal() ? " %" : " $"); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << st->GetName(); + if (fieldID != 0) { + LogInfo::MapleLogger() << " " << fieldID; + } +} + +void DreadoffNode::Dump(int32) const +{ + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(stIdx); + LogInfo::MapleLogger() << (stIdx.Islocal() ? " %" : " $"); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << st->GetName(); + LogInfo::MapleLogger() << " " << offset; +} + +void RegreadNode::Dump(int32) const +{ + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + if (regIdx >= 0) { + LogInfo::MapleLogger() << " %" + << theMIRModule->CurFunction()->GetPregTab()->PregFromPregIdx(regIdx)->GetPregNo(); + return; + } + LogInfo::MapleLogger() << " %%"; + switch (regIdx) { + case -kSregSp: + LogInfo::MapleLogger() << "SP"; + break; + case -kSregFp: + LogInfo::MapleLogger() << "FP"; + break; + case -kSregGp: + LogInfo::MapleLogger() << "GP"; + break; + case -kSregThrownval: + LogInfo::MapleLogger() << "thrownval"; + break; + case -kSregMethodhdl: + LogInfo::MapleLogger() << "methodhdl"; + break; + default: + int32 retValIdx = (-regIdx) - kSregRetval0; + LogInfo::MapleLogger() << "retval" << retValIdx; + break; + } +} + +void AddroffuncNode::Dump(int32) const +{ + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + LogInfo::MapleLogger() << " &" + << GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx())->GetName(); +} + +void AddroflabelNode::Dump(int32) const +{ + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(static_cast(offset)); +} + +void StmtNode::DumpBase(int32 indent) const +{ + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + // dump stmtFreqs + if (Options::profileUse && theMIRModule->CurFunction()->GetFuncProfData() && + theMIRModule->CurFunction()->GetFuncProfData()->GetStmtFreq(GetStmtID()) >= 0) { + LogInfo::MapleLogger() << "stmtID " << GetStmtID() << " freq " + << theMIRModule->CurFunction()->GetFuncProfData()->GetStmtFreq(GetStmtID()) << "\n"; + } + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name; +} + +void StmtNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << '\n'; +} + +// Get the next stmt skip the comment stmt. +StmtNode *StmtNode::GetRealNext() const +{ + StmtNode *stmt = this->GetNext(); + while (stmt != nullptr) { + if (stmt->GetOpCode() != OP_comment) { + break; + } + stmt = stmt->GetNext(); + } + return stmt; +} + +// insert this before pos +void StmtNode::InsertAfterThis(StmtNode &pos) +{ + this->SetNext(&pos); + if (pos.GetPrev()) { + this->SetPrev(pos.GetPrev()); + pos.GetPrev()->SetNext(this); + } + pos.SetPrev(this); +} + +// insert stmtnode after pos +void StmtNode::InsertBeforeThis(StmtNode &pos) +{ + this->SetPrev(&pos); + if (pos.GetNext()) { + this->SetNext(pos.GetNext()); + pos.GetNext()->SetPrev(this); + } + pos.SetNext(this); +} + +void DassignNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << (st->IsLocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName() << " " << fieldID; + LogInfo::MapleLogger() << " ("; + if (GetRHS() != nullptr) { + GetRHS()->Dump(indent + 1); + } else { + LogInfo::MapleLogger() << "/*empty-rhs*/"; + } + LogInfo::MapleLogger() << ")\n"; +} + +void DassignoffNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << GetPrimTypeName(GetPrimType()); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << (st->IsLocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName() << " " << offset; + LogInfo::MapleLogger() << " ("; + if (GetRHS() != nullptr) { + GetRHS()->Dump(indent + 1); + } else { + LogInfo::MapleLogger() << "/*empty-rhs*/"; + } + LogInfo::MapleLogger() << ")\n"; +} + +void RegassignNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << GetPrimTypeName(GetPrimType()); + if (regIdx >= 0) { + LogInfo::MapleLogger() << " %" + << theMIRModule->CurFunction()->GetPregTab()->PregFromPregIdx(regIdx)->GetPregNo(); + } else { + LogInfo::MapleLogger() << " %%"; + switch (regIdx) { + case -kSregSp: + LogInfo::MapleLogger() << "SP"; + break; + case -kSregFp: + LogInfo::MapleLogger() << "FP"; + break; + case -kSregGp: + LogInfo::MapleLogger() << "GP"; + break; + case -kSregThrownval: + LogInfo::MapleLogger() << "thrownval"; + break; + case -kSregMethodhdl: + LogInfo::MapleLogger() << "methodhdl"; + break; + case -kSregRetval0: + LogInfo::MapleLogger() << "retval0"; + break; + // no default + default: + break; + } + } + LogInfo::MapleLogger() << " ("; + UnaryStmtNode::Opnd(0)->Dump(indent + 1); + LogInfo::MapleLogger() << ")\n"; +} + +void IassignNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); + LogInfo::MapleLogger() << " " << fieldID; + LogInfo::MapleLogger() << " ("; + if (addrExpr->IsLeaf() && rhs->IsLeaf()) { + addrExpr->Dump(0); + LogInfo::MapleLogger() << ", "; + rhs->Dump(0); + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + addrExpr->Dump(indent + 1); + LogInfo::MapleLogger() << ", \n"; + PrintIndentation(indent + 1); + rhs->Dump(indent + 1); + } + LogInfo::MapleLogger() << ")\n"; +} + +void IassignoffNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << GetPrimTypeName(GetPrimType()) << " " << offset; + BinaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void IassignFPoffNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << GetPrimTypeName(GetPrimType()) << " " << offset; + DumpOpnd(*theMIRModule, indent); + LogInfo::MapleLogger() << '\n'; +} + +void BlkassignoffNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << offset << " " << GetAlign() << " " << blockSize; + BinaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void GotoNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + if (offset == 0) { + LogInfo::MapleLogger() << '\n'; + } else { + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(static_cast(offset)) + << '\n'; + } +} + +void JsTryNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + if (catchOffset == 0) { + LogInfo::MapleLogger() << " 0"; + } else { + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(static_cast(catchOffset)); + } + if (finallyOffset == 0) { + LogInfo::MapleLogger() << " 0\n"; + } else { + LogInfo::MapleLogger() << " @" + << theMIRModule->CurFunction()->GetLabelName(static_cast(finallyOffset)) + << '\n'; + } +} + +void TryNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " {"; + for (size_t i = 0; i < offsets.size(); ++i) { + uint32 offset = offsets[i]; + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName((LabelIdx)offset); + } + LogInfo::MapleLogger() << " }\n"; +} + +void CondGotoNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(static_cast(offset)); + LogInfo::MapleLogger() << " ("; + Opnd(0)->Dump(indent); + LogInfo::MapleLogger() << ")\n"; +} + +void SwitchNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " ("; + switchOpnd->Dump(indent); + if (defaultLabel == 0) { + LogInfo::MapleLogger() << ") 0 {"; + } else { + LogInfo::MapleLogger() << ") @" << theMIRModule->CurFunction()->GetLabelName(defaultLabel) << " {"; + } + for (auto it = switchTable.begin(); it != switchTable.end(); it++) { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << std::hex << "0x" << it->first << std::dec; + LogInfo::MapleLogger() << ": goto @" << theMIRModule->CurFunction()->GetLabelName(it->second); + } + LogInfo::MapleLogger() << " }\n"; +} + +void RangeGotoNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " ("; + Opnd(0)->Dump(indent); + LogInfo::MapleLogger() << ") " << tagOffset << " {"; + for (auto it = rangegotoTable.begin(); it != rangegotoTable.end(); it++) { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << std::hex << "0x" << it->first << std::dec; + LogInfo::MapleLogger() << ": goto @" << theMIRModule->CurFunction()->GetLabelName(it->second); + } + LogInfo::MapleLogger() << " }\n"; +} + +void MultiwayNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " ("; + multiWayOpnd->Dump(indent); + if (defaultLabel == 0) { + LogInfo::MapleLogger() << ") 0 {"; + } else { + LogInfo::MapleLogger() << ") @" << theMIRModule->CurFunction()->GetLabelName(defaultLabel) << " {"; + } + for (auto it = multiWayTable.begin(); it != multiWayTable.end(); it++) { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent); + LogInfo::MapleLogger() << " ("; + it->first->Dump(indent + 1); + LogInfo::MapleLogger() << "): goto @" << theMIRModule->CurFunction()->GetLabelName(it->second); + } + LogInfo::MapleLogger() << " }\n"; +} + +void UnaryStmtNode::DumpOpnd(const MIRModule &, int32 indent) const +{ + DumpOpnd(indent); +} + +void UnaryStmtNode::DumpOpnd(int32 indent) const +{ + LogInfo::MapleLogger() << " ("; + if (uOpnd != nullptr) { + uOpnd->Dump(indent); + } + LogInfo::MapleLogger() << ")\n"; +} + +void UnaryStmtNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + DumpOpnd(indent); +} + +void GCMallocNode::Dump(int32) const +{ + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); +} + +void JarrayMallocNode::Dump(int32 indent) const +{ + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0, false); + DumpOpnd(*theMIRModule, indent); +} + +void IfStmtNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " ("; + Opnd()->Dump(indent); + LogInfo::MapleLogger() << ")"; + thenPart->Dump(indent); + if (elsePart) { + PrintIndentation(indent); + LogInfo::MapleLogger() << "else {\n"; + for (auto &stmt : elsePart->GetStmtNodes()) { + stmt.Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "}\n"; + } +} + +void WhileStmtNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + if (GetOpCode() == OP_while) { + LogInfo::MapleLogger() << " ("; + Opnd(0)->Dump(indent); + LogInfo::MapleLogger() << ")"; + body->Dump(indent); + } else { // OP_dowhile + LogInfo::MapleLogger() << " {\n"; + for (auto &stmt : body->GetStmtNodes()) { + stmt.Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "} ("; + Opnd(0)->Dump(indent); + LogInfo::MapleLogger() << ")\n"; + } +} + +void DoloopNode::DumpDoVar(const MIRModule &mod) const +{ + if (isPreg) { + LogInfo::MapleLogger() << " %" + << mod.CurFunction()->GetPregTab()->PregFromPregIdx(doVarStIdx.FullIdx())->GetPregNo() + << " (\n"; + } else { + const MIRSymbol *st = mod.CurFunction()->GetLocalOrGlobalSymbol(doVarStIdx); + LogInfo::MapleLogger() << " %" << st->GetName() << " (\n"; + } +} + +void DoloopNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + DumpDoVar(*theMIRModule); + PrintIndentation(indent + 1); + startExpr->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + condExpr->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + incrExpr->Dump(indent + 1); + LogInfo::MapleLogger() << ")"; + doBody->Dump(indent + 1); +} + +void ForeachelemNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(elemStIdx); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << " %" << st->GetName(); + st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(arrayStIdx); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << (arrayStIdx.Islocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName(); + loopBody->Dump(indent + 1); +} + +void BinaryStmtNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + BinaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void NaryStmtNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + NaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void CallAssertNonnullStmtNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + SafetyCallCheckStmtNode::Dump(); + UnaryStmtNode::DumpOpnd(indent); +} + +void AssertNonnullStmtNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + if (theMIRModule->IsCModule()) { + SafetyCheckStmtNode::Dump(); + } + UnaryStmtNode::DumpOpnd(indent); +} + +void AssertBoundaryStmtNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + SafetyCheckStmtNode::Dump(); + NaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void CallAssertBoundaryStmtNode::Dump(int32 indent) const +{ + StmtNode::DumpBase(indent); + SafetyCallCheckStmtNode::Dump(); + NaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void DumpCallReturns(const MIRModule &mod, CallReturnVector nrets, int32 indent) +{ + const MIRFunction *mirFunc = mod.CurFunction(); + if (nrets.empty()) { + LogInfo::MapleLogger() << " {}\n"; + return; + } else if (nrets.size() == 1) { + StIdx stIdx = nrets.begin()->first; + RegFieldPair regFieldPair = nrets.begin()->second; + if (!regFieldPair.IsReg()) { + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(st != nullptr, "st is null"); + FieldID fieldID = regFieldPair.GetFieldID(); + LogInfo::MapleLogger() << " { dassign "; + LogInfo::MapleLogger() << (stIdx.Islocal() ? "%" : "$"); + LogInfo::MapleLogger() << st->GetName() << " " << fieldID << " }\n"; + return; + } else { + PregIdx regIdx = regFieldPair.GetPregIdx(); + const MIRPreg *mirPreg = mirFunc->GetPregItem(static_cast(regIdx)); + DEBUG_ASSERT(mirPreg != nullptr, "mirPreg is null"); + LogInfo::MapleLogger() << " { regassign"; + LogInfo::MapleLogger() << " " << GetPrimTypeName(mirPreg->GetPrimType()); + LogInfo::MapleLogger() << " %" << mirPreg->GetPregNo() << "}\n"; + return; + } + } + LogInfo::MapleLogger() << " {\n"; + for (auto it = nrets.begin(); it != nrets.end(); it++) { + PrintIndentation(indent + 2); + StIdx stIdx = (it)->first; + RegFieldPair regFieldPair = it->second; + if (!regFieldPair.IsReg()) { + FieldID fieldID = regFieldPair.GetFieldID(); + LogInfo::MapleLogger() << "dassign"; + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(st != nullptr, "st is null"); + LogInfo::MapleLogger() << (stIdx.Islocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName() << " " << fieldID << '\n'; + } else { + PregIdx regIdx = regFieldPair.GetPregIdx(); + const MIRPreg *mirPreg = mirFunc->GetPregItem(static_cast(regIdx)); + DEBUG_ASSERT(mirPreg != nullptr, "mirPreg is null"); + LogInfo::MapleLogger() << "regassign" + << " " << GetPrimTypeName(mirPreg->GetPrimType()); + LogInfo::MapleLogger() << " %" << mirPreg->GetPregNo() << '\n'; + } + } + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "}\n"; +} + +// iread expr has sideeffect, may cause derefference error +bool HasIreadExpr(const BaseNode *expr) +{ + if (expr->GetOpCode() == OP_iread) { + return true; + } + for (size_t i = 0; i < expr->GetNumOpnds(); ++i) { + if (HasIreadExpr(expr->Opnd(i))) { + return true; + } + } + return false; +} + +// layer to leaf node +size_t MaxDepth(const BaseNode *expr) +{ + if (expr->IsLeaf()) { + return 1; + } + size_t maxSubDepth = 0; + for (size_t i = 0; i < expr->GetNumOpnds(); ++i) { + size_t depth = MaxDepth(expr->Opnd(i)); + maxSubDepth = (depth > maxSubDepth) ? depth : maxSubDepth; + } + return maxSubDepth + 1; // expr itself +} + +MIRType *CallNode::GetCallReturnType() +{ + if (!kOpcodeInfo.IsCallAssigned(GetOpCode())) { + return nullptr; + } + DEBUG_ASSERT(GlobalTables::GetFunctionTable().GetFuncTable().empty() == false, "container check"); + MIRFunction *mirFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + return mirFunc->GetReturnType(); +} + +const MIRSymbol *CallNode::GetCallReturnSymbol(const MIRModule &mod) const +{ + if (!kOpcodeInfo.IsCallAssigned(GetOpCode())) { + return nullptr; + } + const CallReturnVector &nRets = this->GetReturnVec(); + if (nRets.size() == 1) { + StIdx stIdx = nRets.begin()->first; + RegFieldPair regFieldPair = nRets.begin()->second; + if (!regFieldPair.IsReg()) { + const MIRFunction *mirFunc = mod.CurFunction(); + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + return st; + } + } + return nullptr; +} + +void CallNode::Dump(int32 indent, bool newline) const +{ + StmtNode::DumpBase(indent); + if (tyIdx != 0u) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(indent + 1); + } + CHECK(puIdx < GlobalTables::GetFunctionTable().GetFuncTable().size(), "index out of range in CallNode::Dump"); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + DumpCallConvInfo(); + LogInfo::MapleLogger() << " &" << func->GetName(); + NaryOpnds::Dump(indent); + DeoptBundleInfo::Dump(indent); + if (kOpcodeInfo.IsCallAssigned(GetOpCode())) { + DumpCallReturns(*theMIRModule, this->GetReturnVec(), indent); + } else if (newline) { + LogInfo::MapleLogger() << '\n'; + } +} + +MIRType *IcallNode::GetCallReturnType() +{ + if (op == OP_icall || op == OP_icallassigned) { + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(retTyIdx); + } + // icallproto or icallprotoassigned + MIRFuncType *funcType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(retTyIdx)); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx()); +} + +const MIRSymbol *IcallNode::GetCallReturnSymbol(const MIRModule &mod) const +{ + if (!kOpcodeInfo.IsCallAssigned(GetOpCode())) { + return nullptr; + } + const CallReturnVector &nRets = this->GetReturnVec(); + if (nRets.size() == 1) { + StIdx stIdx = nRets.begin()->first; + RegFieldPair regFieldPair = nRets.begin()->second; + if (!regFieldPair.IsReg()) { + const MIRFunction *mirFunc = mod.CurFunction(); + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + return st; + } + } + return nullptr; +} + +void IcallNode::Dump(int32 indent, bool newline) const +{ + StmtNode::DumpBase(indent); + DumpCallConvInfo(); + if (op == OP_icallproto || op == OP_icallprotoassigned) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(retTyIdx)->Dump(indent + 1); + } + NaryOpnds::Dump(indent); + DeoptBundleInfo::Dump(indent); + if (kOpcodeInfo.IsCallAssigned(GetOpCode())) { + DumpCallReturns(*theMIRModule, this->returnValues, indent); + } else if (newline) { + LogInfo::MapleLogger() << '\n'; + } +} + +MIRType *IntrinsiccallNode::GetCallReturnType() +{ + CHECK_FATAL(intrinsic < INTRN_LAST, "Index out of bound in IntrinsiccallNode::GetCallReturnType"); + IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[intrinsic]; + return intrinDesc->GetReturnType(); +} + +void IntrinsiccallNode::Dump(int32 indent, bool newline) const +{ + StmtNode::DumpBase(indent); + if (tyIdx != 0u) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(indent + 1); + } + if (GetOpCode() == OP_intrinsiccall || GetOpCode() == OP_intrinsiccallassigned || + GetOpCode() == OP_intrinsiccallwithtype || GetOpCode() == OP_intrinsiccallwithtypeassigned) { + LogInfo::MapleLogger() << " " << GetIntrinsicName(intrinsic); + } else { + LogInfo::MapleLogger() << " " << intrinsic; + } + NaryOpnds::Dump(indent); + if (kOpcodeInfo.IsCallAssigned(GetOpCode())) { + DumpCallReturns(*theMIRModule, this->GetReturnVec(), indent); + } else if (newline) { + LogInfo::MapleLogger() << '\n'; + } +} + +void CallinstantNode::Dump(int32 indent, bool newline) const +{ + StmtNode::DumpBase(indent); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(GetPUIdx()); + LogInfo::MapleLogger() << " &" << func->GetName(); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(instVecTyIdx); + LogInfo::MapleLogger() << "<"; + auto *instVecType = static_cast(ty); + instVecType->Dump(indent); + LogInfo::MapleLogger() << ">"; + NaryOpnds::Dump(indent); + if (kOpcodeInfo.IsCallAssigned(GetOpCode())) { + DumpCallReturns(*theMIRModule, this->GetReturnVec(), indent); + } else if (newline) { + LogInfo::MapleLogger() << '\n'; + } +} + +void BlockNode::Dump(int32 indent, const MIRSymbolTable *theSymTab, MIRPregTable *thePregTab, bool withInfo, + bool isFuncbody, MIRFlavor flavor) const +{ + if (!withInfo) { + LogInfo::MapleLogger() << " {\n"; + } + // output puid for debugging purpose + if (isFuncbody) { + theMIRModule->CurFunction()->DumpFuncBody(indent); + if (theSymTab != nullptr || thePregTab != nullptr) { + // print the locally declared type names + if (theMIRModule->CurFunction()->HaveTypeNameTab()) { + for (auto it : theMIRModule->CurFunction()->GetGStrIdxToTyIdxMap()) { + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(it.first); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(it.second); + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "type %" << name << " "; + if (type->GetKind() != kTypeByName) { + type->Dump(indent + 2, true); + } else { + type->Dump(indent + 2); + } + LogInfo::MapleLogger() << '\n'; + } + } + // print the locally declared variables + theSymTab->Dump(true, indent + 1, false, flavor); /* first:isLocal, third:printDeleted */ + if (thePregTab != nullptr) { + thePregTab->DumpPregsWithTypes(indent + 1); + } + } + LogInfo::MapleLogger() << '\n'; + if (theMIRModule->CurFunction()->NeedEmitAliasInfo()) { + theMIRModule->CurFunction()->DumpScope(); + } + } + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + // dump stmtFreqs + if (Options::profileUse && theMIRModule->CurFunction()->GetFuncProfData()) { + LogInfo::MapleLogger() << "stmtID " << GetStmtID() << " freq " + << theMIRModule->CurFunction()->GetFuncProfData()->GetStmtFreq(GetStmtID()) << "\n"; + } + for (auto &stmt : GetStmtNodes()) { + stmt.Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "}\n"; +} + +void LabelNode::Dump(int32) const +{ + if (theMIRModule->CurFunction()->WithLocInfo()) { + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + } + // dump stmtFreqs + if (Options::profileUse && theMIRModule->CurFunction()->GetFuncProfData()) { + LogInfo::MapleLogger() << "stmtID " << GetStmtID() << " freq " + << theMIRModule->CurFunction()->GetFuncProfData()->GetStmtFreq(GetStmtID()) << "\n"; + } + LogInfo::MapleLogger() << "@" << theMIRModule->CurFunction()->GetLabelName(labelIdx) << "\n"; +} + +void CommentNode::Dump(int32 indent) const +{ + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + PrintIndentation(indent); + LogInfo::MapleLogger() << "#" << comment << '\n'; +} + +void EmitStr(const MapleString &mplStr) +{ + const char *str = mplStr.c_str(); + size_t len = mplStr.length(); + LogInfo::MapleLogger() << "\""; + + // don't expand special character; convert all \s to \\s in string + for (size_t i = 0; i < len; ++i) { + /* Referred to GNU AS: 3.6.1.1 Strings */ + constexpr int kBufSize = 5; + constexpr int kFirstChar = 0; + constexpr int kSecondChar = 1; + constexpr int kThirdChar = 2; + constexpr int kLastChar = 4; + char buf[kBufSize]; + if (isprint(*str)) { + buf[kFirstChar] = *str; + buf[kSecondChar] = 0; + if (*str == '\\' || *str == '\"') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = *str; + buf[kThirdChar] = 0; + } + LogInfo::MapleLogger() << buf; + } else if (*str == '\b') { + LogInfo::MapleLogger() << "\\b"; + } else if (*str == '\n') { + LogInfo::MapleLogger() << "\\n"; + } else if (*str == '\r') { + LogInfo::MapleLogger() << "\\r"; + } else if (*str == '\t') { + LogInfo::MapleLogger() << "\\t"; + } else if (*str == '\0') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = '0'; + buf[kThirdChar] = 0; + LogInfo::MapleLogger() << buf; + } else { + /* all others, print as number */ + int ret = snprintf_s(buf, sizeof(buf), kBufSize - 1, "\\%03o", (*str) & 0xFF); + if (ret < 0) { + FATAL(kLncFatal, "snprintf_s failed"); + } + buf[kLastChar] = '\0'; + LogInfo::MapleLogger() << buf; + } + str++; + } + + LogInfo::MapleLogger() << "\"\n"; +} + +AsmNode *AsmNode::CloneTree(MapleAllocator &allocator) const +{ + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < inputConstraints.size(); ++i) { + node->inputConstraints.push_back(inputConstraints[i]); + } + for (size_t i = 0; i < asmOutputs.size(); ++i) { + node->asmOutputs.push_back(asmOutputs[i]); + } + for (size_t i = 0; i < outputConstraints.size(); ++i) { + node->outputConstraints.push_back(outputConstraints[i]); + } + for (size_t i = 0; i < clobberList.size(); ++i) { + node->clobberList.push_back(clobberList[i]); + } + for (size_t i = 0; i < gotoLabels.size(); ++i) { + node->gotoLabels.push_back(gotoLabels[i]); + } + node->SetNumOpnds(static_cast(GetNopndSize())); + return node; +} + +void AsmNode::DumpOutputs(int32 indent, std::string &uStr) const +{ + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << " :"; + size_t numOutputs = asmOutputs.size(); + + const MIRFunction *mirFunc = theMIRModule->CurFunction(); + if (numOutputs == 0) { + LogInfo::MapleLogger() << '\n'; + } else { + for (size_t i = 0; i < numOutputs; i++) { + if (i != 0) { + PrintIndentation(indent + 2); // Increase the indent by 2 bytes. + } + uStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(outputConstraints[i]); + PrintString(uStr); + LogInfo::MapleLogger() << " "; + StIdx stIdx = asmOutputs[i].first; + RegFieldPair regFieldPair = asmOutputs[i].second; + if (!regFieldPair.IsReg()) { + FieldID fieldID = regFieldPair.GetFieldID(); + LogInfo::MapleLogger() << "dassign"; + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(st != nullptr, "st is null"); + LogInfo::MapleLogger() << (stIdx.Islocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName() << " " << fieldID; + } else { + PregIdx regIdx = regFieldPair.GetPregIdx(); + const MIRPreg *mirPreg = mirFunc->GetPregItem(static_cast(regIdx)); + DEBUG_ASSERT(mirPreg != nullptr, "mirPreg is null"); + LogInfo::MapleLogger() << "regassign" + << " " << GetPrimTypeName(mirPreg->GetPrimType()); + LogInfo::MapleLogger() << " %" << mirPreg->GetPregNo(); + } + if (i != numOutputs - 1) { + LogInfo::MapleLogger() << ','; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +void AsmNode::DumpInputOperands(int32 indent, std::string &uStr) const +{ + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << " :"; + if (numOpnds == 0) { + LogInfo::MapleLogger() << '\n'; + } else { + for (size_t i = 0; i < numOpnds; i++) { + if (i != 0) { + PrintIndentation(indent + 2); // Increase the indent by 2 bytes. + } + uStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(inputConstraints[i]); + PrintString(uStr); + LogInfo::MapleLogger() << " ("; + GetNopndAt(i)->Dump(indent + 4); // Increase the indent by 4 bytes. + LogInfo::MapleLogger() << ")"; + if (i != static_cast(static_cast(numOpnds - 1))) { + LogInfo::MapleLogger() << ','; + } + LogInfo::MapleLogger() << "\n"; + } + } +} + +void AsmNode::Dump(int32 indent) const +{ + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetName(op); + if (GetQualifier(kASMvolatile)) { + LogInfo::MapleLogger() << " volatile"; + } + if (GetQualifier(kASMinline)) { + LogInfo::MapleLogger() << " inline"; + } + if (GetQualifier(kASMgoto)) { + LogInfo::MapleLogger() << " goto"; + } + LogInfo::MapleLogger() << " { "; + EmitStr(asmString); + // print outputs + std::string uStr; + DumpOutputs(indent, uStr); + // print input operands + DumpInputOperands(indent, uStr); + // print clobber list + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << " :"; + for (size_t i = 0; i < clobberList.size(); i++) { + uStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(clobberList[i]); + PrintString(uStr); + if (i != clobberList.size() - 1) { + LogInfo::MapleLogger() << ','; + } + } + LogInfo::MapleLogger() << '\n'; + // print labels + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << " :"; + for (size_t i = 0; i < gotoLabels.size(); i++) { + LabelIdx offset = gotoLabels[i]; + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(offset); + if (i != gotoLabels.size() - 1) { + LogInfo::MapleLogger() << ','; + } + } + LogInfo::MapleLogger() << " }\n"; +} + +inline bool IntTypeVerify(PrimType pTyp) +{ + return pTyp == PTY_i32 || pTyp == PTY_u32 || pTyp == PTY_i64 || pTyp == PTY_u64; +} + +inline bool UnaryTypeVerify0(PrimType pTyp) +{ + bool verifyResult = IntTypeVerify(pTyp); + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:result type of bnot,extractbits,sext,zext must be in [i32,u32,i64,u64]\n"; + } + return verifyResult; +} + +bool ArithResTypeVerify(PrimType pTyp) +{ + switch (pTyp) { + case PTY_i32: + case PTY_u32: + case PTY_i64: + case PTY_u64: + case PTY_f32: + case PTY_f64: + return true; + case PTY_a32: + case PTY_a64: + case PTY_ptr: + return theMIRModule->IsCModule(); + default: + break; + } + + // Arithmetic operations on all vector types are allowed + PrimitiveType pt(pTyp); + if (pt.IsVector()) + return true; + + return false; +} + +inline bool UnaryTypeVerify1(PrimType pType) +{ + bool verifyResult = ArithResTypeVerify(pType); + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:result type of abs,neg must be in [i32,u32,i64,u64,f32,f64]\n"; + } + return verifyResult; +} + +inline bool UnaryTypeVerify2(PrimType pType) +{ + bool verifyResult = IsPrimitiveFloat(pType); + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:result-type of recip,sqrt must be in [f32,f64]\n"; + } + return verifyResult; +} + +inline bool BinaryTypeVerify(PrimType pType) +{ + return ArithResTypeVerify(pType) || IsPrimitiveDynType(pType); +} + +inline bool BinaryGenericVerify(const BaseNode &bOpnd0, const BaseNode &bOpnd1) +{ + return bOpnd0.Verify() && bOpnd1.Verify(); +} + +inline bool CompareTypeVerify(PrimType pType) +{ + bool verifyResult = IsPrimitiveInteger(pType); + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:result type of eq,ge,gt,le,lt,ne must be primitive integer\n"; + } + return verifyResult; +} + +enum PTYGroup { + kPTYGi32u32a32, + kPTYGi32u32a32PtrRef, + kPTYGi64u64a64, + kPTYGPtrRef, + kPTYGDynall, + kPTYGu1, + kPTYGSimpleObj, + kPTYGSimpleStr, + kPTYGOthers +}; + +uint8 GetPTYGroup(PrimType primType) +{ + switch (primType) { + case PTY_i32: + case PTY_u32: + case PTY_a32: + return kPTYGi32u32a32; + case PTY_i64: + case PTY_u64: + case PTY_a64: + return kPTYGi64u64a64; + case PTY_ref: + case PTY_ptr: + return kPTYGPtrRef; + case PTY_dynany: + case PTY_dyni32: + case PTY_dynf64: + case PTY_dynstr: + case PTY_dynobj: + case PTY_dynundef: + case PTY_dynbool: + case PTY_dynf32: + case PTY_dynnone: + case PTY_dynnull: + return kPTYGDynall; + case PTY_u1: + return kPTYGu1; + case PTY_simpleobj: + return kPTYGSimpleObj; + case PTY_simplestr: + return kPTYGSimpleStr; + default: + return kPTYGOthers; + } +} + +uint8 GetCompGroupID(const BaseNode &opnd) +{ + return GetPTYGroup(opnd.GetPrimType()); +} + +/* + Refer to C11 Language Specification. + $ 6.3.1.8 Usual arithmetic conversions + */ +bool CompatibleTypeVerify(const BaseNode &opnd1, const BaseNode &opnd2) +{ + uint8 groupID1 = GetCompGroupID(opnd1); + uint8 groupID2 = GetCompGroupID(opnd2); + Opcode opCode2 = opnd2.GetOpCode(); + bool verifyResult = (groupID1 == groupID2); + if (opCode2 == OP_gcmallocjarray || opCode2 == OP_gcpermallocjarray) { + verifyResult = (groupID1 == kPTYGi32u32a32); + } + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:incompatible operand types :\n"; + opnd1.Dump(); + opnd2.Dump(); + } + return verifyResult; +} + +bool FloatIntCvtTypeVerify(PrimType resPType, PrimType opndPType) +{ + bool resTypeVerf = resPType == PTY_i32 || resPType == PTY_u32 || resPType == PTY_i64 || resPType == PTY_u64; + if (!resTypeVerf) { + LogInfo::MapleLogger() << "\n#Error:result-type of ceil,floor,round,trunc must be in [i32,u32,i64,u64]\n"; + } + bool opndTypeVerf = opndPType == PTY_f32 || opndPType == PTY_f64; + if (!opndTypeVerf) { + LogInfo::MapleLogger() << "\n#Error:oerand-type of ceil,floor,round,trunc must be in [f32,f64]\n"; + } + return resTypeVerf && opndTypeVerf; +} + +inline MIRTypeKind GetTypeKind(StIdx stIdx) +{ + const MIRSymbol *var = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(stIdx); + DEBUG_ASSERT(var != nullptr, "null ptr check"); + MIRType *type = var->GetType(); + DEBUG_ASSERT(type != nullptr, "null ptr check"); + return type->GetKind(); +} + +inline MIRTypeKind GetTypeKind(TyIdx tyIdx) +{ + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DEBUG_ASSERT(type != nullptr, "null ptr check"); + return type->GetKind(); +} + +inline MIRType *GetPointedMIRType(TyIdx tyIdx) +{ + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(type->GetKind() == kTypePointer, "TyIdx: %d is not pointer type", static_cast(tyIdx)); + auto *ptrType = static_cast(type); + return ptrType->GetPointedType(); +} + +inline MIRTypeKind GetPointedTypeKind(TyIdx tyIdx) +{ + MIRType *pointedType = GetPointedMIRType(tyIdx); + DEBUG_ASSERT(pointedType != nullptr, "null ptr check"); + return pointedType->GetKind(); +} + +MIRTypeKind GetFieldTypeKind(MIRStructType *structType, FieldID fieldId) +{ + TyIdx fieldTyIdx; + if (fieldId > 0) { + MIRType *mirType = structType->GetFieldType(fieldId); + fieldTyIdx = mirType->GetTypeIndex(); + } else { + DEBUG_ASSERT(static_cast(-fieldId) < structType->GetParentFieldsSize() + 1, + "array index out of range"); + fieldTyIdx = structType->GetParentFieldsElemt(-fieldId - 1).second.first; + } + return GetTypeKind(fieldTyIdx); +} + +inline bool IsStructureTypeKind(MIRTypeKind kind) +{ + return kind == kTypeStruct || kind == kTypeStructIncomplete || kind == kTypeUnion || kind == kTypeClass || + kind == kTypeClassIncomplete || kind == kTypeInterface || kind == kTypeInterfaceIncomplete; +} + +inline bool IsStructureVerify(FieldID fieldID, StIdx stIdx) +{ + if ((fieldID != 0) && (!IsStructureTypeKind(GetTypeKind(stIdx)))) { + LogInfo::MapleLogger() << "\n#Error:if fieldID is not 0, the variable must be a structure\n"; + return false; + } + return true; +} + +inline bool IsStructureVerify(FieldID fieldID, TyIdx tyIdx) +{ + if ((fieldID != 0) && (!IsStructureTypeKind(GetTypeKind(tyIdx)))) { + LogInfo::MapleLogger() << "\n#Error:if fieldID is not 0, the variable must be a structure\n"; + return false; + } + return true; +} + +bool IsSignedType(const BaseNode *opnd) +{ + switch (opnd->GetPrimType()) { + case PTY_i32: + case PTY_i64: + case PTY_f32: + case PTY_f64: + case PTY_dyni32: + case PTY_dynf32: + case PTY_dynf64: + return true; + default: + break; + } + return false; +} + +inline bool BinaryStrictSignVerify0(const BaseNode *bOpnd0, const BaseNode *bOpnd1) +{ + DEBUG_ASSERT(bOpnd0 != nullptr, "bOpnd0 is null"); + DEBUG_ASSERT(bOpnd1 != nullptr, "bOpnd1 is null"); + bool isDynany = (bOpnd0->GetPrimType() == PTY_dynany || bOpnd1->GetPrimType() == PTY_dynany); + return isDynany || (IsSignedType(bOpnd0) && IsSignedType(bOpnd1)) || + (!IsSignedType(bOpnd0) && !IsSignedType(bOpnd1)); +} + +bool BinaryStrictSignVerify1(const BaseNode *bOpnd0, const BaseNode *bOpnd1, const BaseNode *res) +{ + if (GetCompGroupID(*res) == kPTYGDynall) { + return BinaryStrictSignVerify0(bOpnd0, res) && BinaryStrictSignVerify0(bOpnd1, res) && + BinaryStrictSignVerify0(bOpnd0, bOpnd1); + } + return (IsSignedType(bOpnd0) && IsSignedType(bOpnd1) && IsSignedType(res)) || + (!IsSignedType(bOpnd0) && !IsSignedType(bOpnd1) && !IsSignedType(res)); +} + +bool UnaryNode::Verify() const +{ + bool resTypeVerf = true; + if (GetOpCode() == OP_bnot) { + resTypeVerf = UnaryTypeVerify0(GetPrimType()); + } else if (GetOpCode() == OP_lnot) { + if (!IsPrimitiveInteger(GetPrimType())) { + resTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:result-type of lnot must be primitive integer\n"; + } + } else if (GetOpCode() == OP_abs || GetOpCode() == OP_neg) { + resTypeVerf = UnaryTypeVerify1(GetPrimType()); + } else if (GetOpCode() == OP_recip || GetOpCode() == OP_sqrt) { + resTypeVerf = UnaryTypeVerify2(GetPrimType()); + } + + // When an opcode only specifies one type, check for compatibility + // between the operands and the result-type. + bool compVerf = true; + // op_alloca : return type is not compatible with operand, skip + if (GetOpCode() != OP_alloca) { + compVerf = CompatibleTypeVerify(*uOpnd, *this); + } + bool opndExprVerf = uOpnd->Verify(); + return resTypeVerf && compVerf && opndExprVerf; +} + +bool TypeCvtNode::Verify() const +{ + bool opndTypeVerf = true; + bool opndSizeVerf = true; + if (GetOpCode() == OP_ceil || GetOpCode() == OP_floor || GetOpCode() == OP_round || GetOpCode() == OP_trunc) { + opndTypeVerf = FloatIntCvtTypeVerify(GetPrimType(), Opnd(0)->GetPrimType()); + } else if (GetOpCode() == OP_retype) { + if (GetPrimTypeSize(GetPrimType()) != GetPrimTypeSize(Opnd(0)->GetPrimType())) { + opndSizeVerf = false; + LogInfo::MapleLogger() << "\n#Error:The size of opnd0 and prim-type must be the same\n"; + } + } + bool opndExprVerf = Opnd(0)->Verify(); + return opndTypeVerf && opndSizeVerf && opndExprVerf; +} + +void AddRuntimeVerifyError(std::string errMsg, VerifyResult &verifyResult) +{ + LogInfo::MapleLogger() << "\n#Error: " << errMsg << '\n'; + // Throw Verify Error + verifyResult.AddPragmaVerifyError(verifyResult.GetCurrentClassName(), std::move(errMsg)); +} + +bool RetypeNode::VerifyPrimTypesAndOpnd() const +{ + PrimType toPrimType = GetPrimType(); + PrimType fromPrimType = Opnd(0)->GetPrimType(); + if (GetPrimTypeSize(toPrimType) != GetPrimTypeSize(fromPrimType)) { + LogInfo::MapleLogger() << "\n#Error: The size of opnd0 and prim-type must be the same\n"; + return false; + } + + if (!IsPrimitivePoint(toPrimType) || !IsPrimitivePoint(fromPrimType)) { + LogInfo::MapleLogger() << "\n#Error: Wrong prim-type in retype node, should be ref or ptr\n"; + return false; + } + return Opnd(0)->Verify(); +} + +bool RetypeNode::CheckFromJarray(const MIRType &from, const MIRType &to, VerifyResult &verifyResult) const +{ + // Array types are subtypes of Object. + // The intent is also that array types are subtypes of Cloneable and java.io.Serializable. + if (IsInterfaceOrClass(to)) { + Klass &toKlass = utils::ToRef(verifyResult.GetKlassHierarchy().GetKlassFromStrIdx(to.GetNameStrIdx())); + const std::string &toKlassName = toKlass.GetKlassName(); + const std::string &javaLangObject = namemangler::kJavaLangObjectStr; + const std::string javaLangCloneable = "Ljava_2Flang_2FCloneable_3B"; + const std::string javaIoSerializable = "Ljava_2Fio_2FSerializable_3B"; + if (toKlassName == javaLangObject || toKlassName == javaIoSerializable || toKlassName == javaLangCloneable) { + return true; + } + } + + AddRuntimeVerifyError("Java array " + from.GetName() + " is not assignable to " + to.GetName(), verifyResult); + return false; +} + +bool RetypeNode::IsJavaAssignable(const MIRType &from, const MIRType &to, VerifyResult &verifyResult) const +{ + // isJavaAssignable(arrayOf(X), arrayOf(Y)) :- compound(X), compound(Y), isJavaAssignable(X, Y). + // arrayOf(X), arrayOf(Y) should already be X, Y here + if (from.IsMIRJarrayType()) { + return CheckFromJarray(from, to, verifyResult); + } + // isJavaAssignable(arrayOf(X), arrayOf(Y)) :- atom(X), atom(Y), X = Y. + // This rule is not applicable to Maple IR + if (from.IsScalarType() && to.IsScalarType()) { + return true; + } + + if (IsInterfaceOrClass(from) && IsInterfaceOrClass(to)) { + const KlassHierarchy &klassHierarchy = verifyResult.GetKlassHierarchy(); + const std::string javaLangObject = namemangler::kJavaLangObjectStr; + Klass &fromKlass = utils::ToRef(klassHierarchy.GetKlassFromStrIdx(from.GetNameStrIdx())); + Klass &toKlass = utils::ToRef(klassHierarchy.GetKlassFromStrIdx(to.GetNameStrIdx())); + // We can cast everything to java.lang.Object, but interface isn't subclass of that, so we need this branch + if (toKlass.GetKlassName() == javaLangObject) { + return true; + } + // isJavaAssignable(class(_, _), class(To, L)) :- loadedClass(To, L, ToClass), classIsInterface(ToClass). + // isJavaAssignable(From, To) :- isJavaSubclassOf(From, To). + bool isAssignableKlass = klassHierarchy.IsSuperKlass(&toKlass, &fromKlass) || + klassHierarchy.IsSuperKlassForInterface(&toKlass, &fromKlass) || + klassHierarchy.IsInterfaceImplemented(&toKlass, &fromKlass); + if (isAssignableKlass) { + return true; + } + AddRuntimeVerifyError( + "Java type " + fromKlass.GetKlassName() + " is NOT assignable to " + toKlass.GetKlassName(), verifyResult); + return false; + } + AddRuntimeVerifyError(from.GetName() + " is NOT assignable to " + to.GetName(), verifyResult); + return false; +} + +bool RetypeNode::VerifyCompleteMIRType(const MIRType &from, const MIRType &to, bool isJavaRefType, + VerifyResult &verifyResult) const +{ + if (from.IsScalarType() && to.IsScalarType() && !isJavaRefType) { + if (GetPTYGroup(from.GetPrimType()) == GetPTYGroup(to.GetPrimType())) { + return true; + } + LogInfo::MapleLogger() << "\n#Error: retype scalar type failed\n"; + return false; + } + if (!verifyResult.GetMIRModule().IsJavaModule()) { + return true; + } + isJavaRefType |= IsJavaRefType(from) && IsJavaRefType(to); + if (isJavaRefType) { + return IsJavaAssignable(from, to, verifyResult); + } + + if (from.GetKind() != to.GetKind()) { + if (from.GetPrimType() == PTY_void || to.GetPrimType() == PTY_void) { + return true; + } + LogInfo::MapleLogger() << "\n#Error: Retype different kind: from " << from.GetKind() << " to " << to.GetKind() + << "\n"; + return false; + } + return true; +} + +bool RetypeNode::VerifyJarrayDimention(const MIRJarrayType &from, const MIRJarrayType &to, + VerifyResult &verifyResult) const +{ + int fromDim = const_cast(from).GetDim(); + int toDim = const_cast(to).GetDim(); + if (fromDim == toDim) { + return true; + } else if (fromDim > toDim) { + const MIRType *toElemType = to.GetElemType(); + while (toElemType != nullptr && (toElemType->IsMIRJarrayType() || toElemType->IsMIRPtrType())) { + toElemType = toElemType->IsMIRJarrayType() ? static_cast(toElemType)->GetElemType() + : static_cast(toElemType)->GetPointedType(); + } + if (toElemType != nullptr && CheckFromJarray(from, *toElemType, verifyResult)) { + return true; + } + } + Dump(0); + std::string errorMsg = + "Arrays have different dimentions: from " + std::to_string(fromDim) + " to " + std::to_string(toDim); + AddRuntimeVerifyError(std::move(errorMsg), verifyResult); + return false; +} + +bool RetypeNode::Verify(VerifyResult &verifyResult) const +{ + // If RetypeNode::Verify return false, Dump this node to show the wrong IR + if (!VerifyPrimTypesAndOpnd()) { + Dump(0); + LogInfo::MapleLogger() << "\n#Error: Verify PrimTypes and Opnd failed in retype node\n"; + return false; + } + bool isJavaRefType = false; + const MIRType *fromMIRType = verifyResult.GetCurrentFunction()->GetNodeType(*Opnd(0)); + const MIRType *toMIRType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + while (fromMIRType != nullptr && toMIRType != nullptr && BothPointerOrJarray(*fromMIRType, *toMIRType)) { + if (fromMIRType->IsMIRJarrayType()) { + isJavaRefType = true; + if (!VerifyJarrayDimention(static_cast(*fromMIRType), + static_cast(*toMIRType), verifyResult)) { + return false; + } + fromMIRType = static_cast(fromMIRType)->GetElemType(); + toMIRType = static_cast(toMIRType)->GetElemType(); + } else { + fromMIRType = static_cast(fromMIRType)->GetPointedType(); + toMIRType = static_cast(toMIRType)->GetPointedType(); + } + } + if (fromMIRType == nullptr || toMIRType == nullptr) { + Dump(0); + LogInfo::MapleLogger() << "\n#Error: MIRType is nullptr in retype node\n"; + return false; + } + + if (fromMIRType->IsIncomplete() || toMIRType->IsIncomplete()) { + // Add Deferred Check + const std::string ¤tClassName = verifyResult.GetCurrentClassName(); + LogInfo::MapleLogger(kLlDbg) << "Add AssignableCheck from " << fromMIRType->GetName() << " to " + << toMIRType->GetName() << " in class " << currentClassName << '\n'; + verifyResult.AddPragmaAssignableCheck(currentClassName, fromMIRType->GetName(), toMIRType->GetName()); + // Deferred Assignable Check returns true because we should collect all the deferred checks for runtime + return true; + } + + if (VerifyCompleteMIRType(*fromMIRType, *toMIRType, isJavaRefType, verifyResult)) { + return true; + } + Dump(0); + LogInfo::MapleLogger() << "\n#Error: Verify Complete MIRType failed in retype node\n"; + return false; +} + +bool UnaryStmtNode::VerifyThrowable(VerifyResult &verifyResult) const +{ + const BaseNode *rhs = GetRHS(); + if (rhs == nullptr) { + return true; + } + + const MIRType *mirType = verifyResult.GetCurrentFunction()->GetNodeType(*rhs); + if (mirType != nullptr && mirType->IsMIRPtrType()) { + mirType = static_cast(mirType)->GetPointedType(); + } + if (mirType != nullptr) { + if (mirType->GetPrimType() == PTY_void) { + return true; + } + if (mirType->IsIncomplete()) { + // Add Deferred Check + const std::string ¤tClassName = verifyResult.GetCurrentClassName(); + std::string throwableName = "Ljava_2Flang_2FThrowable_3B"; + LogInfo::MapleLogger(kLlDbg) << "Add AssignableCheck from " << mirType->GetName() << " to " << throwableName + << " in class " << currentClassName << '\n'; + verifyResult.AddPragmaAssignableCheck(currentClassName, mirType->GetName(), std::move(throwableName)); + // Deferred Assignable Check returns true because we should collect all the deferred checks for runtime + return true; + } + if (mirType->IsMIRClassType() && static_cast(mirType)->IsExceptionType()) { + return true; + } + } + Dump(0); + std::string errMsg = (mirType == nullptr ? "nullptr" : mirType->GetName()); + errMsg += " is NOT throwable."; + AddRuntimeVerifyError(std::move(errMsg), verifyResult); + return false; +} + +bool IntrinsicopNode::Verify(VerifyResult &verifyResult) const +{ + if (GetIntrinsic() == INTRN_JAVA_ARRAY_LENGTH && !VerifyJArrayLength(verifyResult)) { + return false; + } + return VerifyOpnds(); +} + +bool IntrinsicopNode::VerifyJArrayLength(VerifyResult &verifyResult) const +{ + BaseNode &val = utils::ToRef(Opnd(0)); + const MIRType *valType = verifyResult.GetCurrentFunction()->GetNodeType(val); + if (valType != nullptr && valType->IsMIRPtrType()) { + valType = static_cast(valType)->GetPointedType(); + if (valType != nullptr && !valType->IsMIRJarrayType()) { + Dump(0); + AddRuntimeVerifyError("Operand of array length is not array", verifyResult); + return false; + } + } + return true; +} + +bool IreadNode::Verify() const +{ + bool addrExprVerf = Opnd(0)->Verify(); + bool pTypeVerf = true; + bool structVerf = true; + if (GetTypeKind(tyIdx) != kTypePointer) { + LogInfo::MapleLogger() << "\n#Error: must be a pointer type\n"; + return false; + } + if (GetOpCode() == OP_iaddrof) { + pTypeVerf = IsAddress(GetPrimType()); + if (!pTypeVerf) { + LogInfo::MapleLogger() << "\n#Error:prim-type must be either ptr, ref, a32 or a64\n"; + } + } else { + if (fieldID == 0 && IsStructureTypeKind(GetPointedTypeKind(tyIdx))) { + if (GetPrimType() != PTY_agg) { + pTypeVerf = false; + LogInfo::MapleLogger() + << "\n#Error:If the content dereferenced is a structure, then should specify agg\n"; + } + } + } + if (fieldID != 0) { + if (!IsStructureTypeKind(GetPointedTypeKind(tyIdx))) { + structVerf = false; + LogInfo::MapleLogger() << "\n#Error:If field-id is not 0, then type must specify pointer to a structure\n"; + } else { + MIRType *type = GetPointedMIRType(tyIdx); + auto *stTy = static_cast(type); + if (GetOpCode() == OP_iread && stTy->GetFieldsSize() != 0) { + if (IsStructureTypeKind(GetFieldTypeKind(stTy, fieldID))) { + if (GetPrimType() != PTY_agg) { + pTypeVerf = false; + LogInfo::MapleLogger() + << "\n#Error:If the field itself is a structure, prim-type should specify agg\n"; + } + } + } + } + } + return addrExprVerf && pTypeVerf && structVerf; +} + +bool RegreadNode::Verify() const +{ + return true; +} + +bool IreadoffNode::Verify() const +{ + return true; +} + +bool IreadFPoffNode::Verify() const +{ + return true; +} + +bool ExtractbitsNode::Verify() const +{ + bool opndExprVerf = Opnd(0)->Verify(); + bool compVerf = CompatibleTypeVerify(*Opnd(0), *this); + bool resTypeVerf = UnaryTypeVerify0(GetPrimType()); + constexpr int numBitsInByte = 8; + bool opnd0SizeVerf = (numBitsInByte * GetPrimTypeSize(Opnd(0)->GetPrimType()) >= bitsSize); + if (!opnd0SizeVerf) { + LogInfo::MapleLogger() + << "\n#Error: The operand of extractbits must be large enough to contain the specified bitfield\n"; + } + return opndExprVerf && compVerf && resTypeVerf && opnd0SizeVerf; +} + +bool BinaryNode::Verify() const +{ + bool opndsVerf = BinaryGenericVerify(*GetBOpnd(0), *GetBOpnd(1)); + bool resTypeVerf = BinaryTypeVerify(GetPrimType()); + if (!resTypeVerf && theMIRModule->IsCModule()) { + if ((IsAddress(GetBOpnd(0)->GetPrimType()) && !IsAddress(GetBOpnd(1)->GetPrimType())) || + (!IsAddress(GetBOpnd(0)->GetPrimType()) && IsAddress(GetBOpnd(1)->GetPrimType()))) { + resTypeVerf = true; // don't print the same kind of error message twice + if (GetOpCode() != OP_add && GetOpCode() != OP_sub && GetOpCode() != OP_CG_array_elem_add) { + LogInfo::MapleLogger() << "\n#Error: Only add and sub are allowed for pointer arithemetic\n"; + this->Dump(); + } else if (!IsAddress(GetPrimType())) { + LogInfo::MapleLogger() << "\n#Error: Adding an offset to a pointer or subtracting one from a pointer " + "should result in a pointer " + "value\n"; + this->Dump(); + } + } + } + if (!resTypeVerf) { + LogInfo::MapleLogger() << "\n#Error:result type of [add,div,sub,mul,max,min] and " + "[ashr,band,bior,bxor,land,lior,lshr,shl,rem] must " + "be in [i32,u32,i64,u64,f32,f64,dynamic-type]\n"; + this->Dump(); + } + bool comp0Verf = CompatibleTypeVerify(*GetBOpnd(0), *this); + bool comp1Verf = true; + // Shift operations do not require same-type operands + if (GetOpCode() < OP_ashr || GetOpCode() > OP_shl) { + comp1Verf = CompatibleTypeVerify(*GetBOpnd(1), *this); + } + bool signVerf = true; + bool typeVerf = resTypeVerf && comp0Verf && comp1Verf; + if (typeVerf) { + if (GetOpCode() == OP_div || GetOpCode() == OP_mul || GetOpCode() == OP_rem || GetOpCode() == OP_max || + GetOpCode() == OP_min) { + signVerf = BinaryStrictSignVerify1(GetBOpnd(0), GetBOpnd(1), this); + if (!signVerf) { + LogInfo::MapleLogger() + << "\n#Error:the result and operands of [div,mul,rem,max,min] must be of the same sign\n"; + } + } + } + return opndsVerf && typeVerf && signVerf; +} + +bool CompareNode::Verify() const +{ + bool opndsVerf = BinaryGenericVerify(*GetBOpnd(0), *GetBOpnd(1)); + bool compVerf = CompatibleTypeVerify(*GetBOpnd(0), *GetBOpnd(1)); + bool resTypeVerf = CompareTypeVerify(GetPrimType()); + if (!resTypeVerf) { + this->Dump(); + } + bool signVerf = true; + bool typeVerf = compVerf && resTypeVerf; + if (typeVerf && GetOpCode() != OP_eq && GetOpCode() != OP_ne) { + signVerf = BinaryStrictSignVerify0(GetBOpnd(0), GetBOpnd(1)); + if (!signVerf) { + LogInfo::MapleLogger() << "\n#Error:the operands of [ge,gt,le,lt] must be of the same sign\n"; + } + } + return opndsVerf && typeVerf && signVerf; +} + +bool DepositbitsNode::Verify() const +{ + bool opndsVerf = BinaryGenericVerify(*GetBOpnd(0), *GetBOpnd(1)); + bool resTypeVerf = IntTypeVerify(GetPrimType()); + constexpr int numBitsInByte = 8; + bool opnd0SizeVerf = (numBitsInByte * GetPrimTypeSize(GetBOpnd(0)->GetPrimType()) >= bitsSize); + if (!opnd0SizeVerf) { + LogInfo::MapleLogger() + << "\n#Error:opnd0 of depositbits must be large enough to contain the specified bitfield\n"; + } + return opndsVerf && resTypeVerf && opnd0SizeVerf; +} + +bool IntrinsicopNode::Verify() const +{ + return VerifyOpnds(); +} + +bool TernaryNode::Verify() const +{ + bool comp1Verf = CompatibleTypeVerify(*topnd[kSecondOpnd], *this); + bool comp2Verf = CompatibleTypeVerify(*topnd[kThirdOpnd], *this); + bool opnd0TypeVerf = IsPrimitiveInteger(topnd[kFirstOpnd]->GetPrimType()); + if (!opnd0TypeVerf) { + LogInfo::MapleLogger() << "\n#Error:select-opnd0 must be of integer type\n"; + } + return comp1Verf && comp2Verf && opnd0TypeVerf; +} + +bool SizeoftypeNode::Verify() const +{ + return true; +} + +bool ArrayNode::Verify() const +{ + bool opndsVerf = VerifyOpnds(); + bool resTypeVerf = IsAddress(GetPrimType()); + bool opndsTypeVerf = true; + if (!resTypeVerf) { + LogInfo::MapleLogger() << "\n#Error:result-type of array must be in [ptr,ref,a32,a64]\n"; + } + bool opnd0TypeVerf = IsAddress(GetNopndAt(0)->GetPrimType()); + if (!opnd0TypeVerf) { + LogInfo::MapleLogger() << "\n#Error:result-type of array-opnd0 must be in [ptr,ref,a32,a64]\n"; + } + for (size_t i = 1; i < NumOpnds(); ++i) { + if (!IntTypeVerify(GetNopndAt(i)->GetPrimType())) { + opndsTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:result of the array index operands must be in [i32,u32,i64,u64]\n"; + } + } + return opndsVerf && resTypeVerf && opnd0TypeVerf && opndsTypeVerf; +} + +bool DassignNode::Verify() const +{ + bool structVerf = IsStructureVerify(fieldID, stIdx); + bool rhsVerf = GetRHS()->Verify(); + return structVerf && rhsVerf; +} + +bool AddrofNode::Verify() const +{ + bool pTypeVerf = true; + bool structVerf = IsStructureVerify(fieldID, GetStIdx()); + if (GetOpCode() == OP_dread) { + if (fieldID == 0 && IsStructureTypeKind(GetTypeKind(GetStIdx()))) { + if (GetPrimType() != PTY_agg) { + pTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:if variable is a structure, prim-type should specify agg\n"; + } + } + if (fieldID != 0 && structVerf) { + const MIRSymbol *var = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(GetStIdx()); + DEBUG_ASSERT(var != nullptr, "null ptr check"); + MIRType *type = var->GetType(); + auto *stTy = static_cast(type); + if (IsStructureTypeKind(GetFieldTypeKind(stTy, fieldID))) { + if (GetPrimType() != PTY_agg) { + pTypeVerf = false; + LogInfo::MapleLogger() + << "\n#Error:if the field itself is a structure, prim-type should specify agg\n"; + } + } + } + } else { + pTypeVerf = IsAddress(GetPrimType()); + if (!pTypeVerf) { + LogInfo::MapleLogger() + << "\n#Error:result-type of addrof,addroflabel,addroffunc,iaddrof must be in [ptr,ref,a32,a64]\n"; + } + } + return pTypeVerf && structVerf; +} + +bool AddroffuncNode::Verify() const +{ + bool addrTypeVerf = IsAddress(GetPrimType()); + if (!addrTypeVerf) { + LogInfo::MapleLogger() + << "\n#Error:result-type of addrof,addroflabel,addroffunc,iaddrof must be in [ptr,ref,a32,a64]\n"; + } + return addrTypeVerf; +} + +bool AddroflabelNode::Verify() const +{ + bool addrTypeVerf = IsAddress(GetPrimType()); + if (!addrTypeVerf) { + LogInfo::MapleLogger() + << "\n#Error:result-type of addrof,addroflabel,addroffunc,iaddrof must be in [ptr,ref,a32,a64]\n"; + } + return addrTypeVerf; +} + +bool IassignNode::Verify() const +{ + bool addrExpVerf = addrExpr->Verify(); + bool rhsVerf = rhs->Verify(); + bool structVerf = true; + if (GetTypeKind(tyIdx) != kTypePointer) { + LogInfo::MapleLogger() << "\n#Error: must be a pointer type\n"; + return false; + } + if (fieldID != 0) { + if (!IsStructureTypeKind(GetPointedTypeKind(tyIdx))) { + structVerf = false; + LogInfo::MapleLogger() + << "\n#Error:If field-id is not 0, the computed address must correspond to a structure\n"; + } + } + return addrExpVerf && rhsVerf && structVerf; +} + +bool IassignoffNode::Verify() const +{ + bool addrVerf = GetBOpnd(0)->Verify(); + bool rhsVerf = GetBOpnd(1)->Verify(); + bool compVerf = CompatibleTypeVerify(*this, *GetBOpnd(1)); + return addrVerf && rhsVerf && compVerf; +} + +bool IassignFPoffNode::Verify() const +{ + bool rhsVerf = Opnd(0)->Verify(); + bool compVerf = CompatibleTypeVerify(*this, *Opnd(0)); + return rhsVerf && compVerf; +} + +bool RegassignNode::Verify() const +{ + bool rhsVerf = Opnd(0)->Verify(); + bool compVerf = CompatibleTypeVerify(*this, *Opnd(0)); + return rhsVerf && compVerf; +} + +bool CondGotoNode::Verify() const +{ + bool opndExprVerf = UnaryStmtNode::Opnd(0)->Verify(); + bool opndTypeVerf = true; + if (!IsPrimitiveInteger(UnaryStmtNode::Opnd(0)->GetPrimType())) { + opndTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:the operand of brfalse and trfalse must be primitive integer\n"; + } + return opndExprVerf && opndTypeVerf; +} + +bool SwitchNode::Verify() const +{ + bool opndExprVerf = switchOpnd->Verify(); + bool opndTypeVerf = IntTypeVerify(switchOpnd->GetPrimType()); + if (!opndTypeVerf) { + LogInfo::MapleLogger() << "\n#Error: the operand of switch must be in [i32,u32,i64,u64]\n"; + } + return opndExprVerf && opndTypeVerf; +} + +bool BinaryStmtNode::Verify() const +{ + return GetBOpnd(0)->Verify() && GetBOpnd(1)->Verify() && CompatibleTypeVerify(*GetBOpnd(0), *GetBOpnd(1)) && + BinaryStrictSignVerify0(GetBOpnd(0), GetBOpnd(1)); +} + +bool RangeGotoNode::Verify() const +{ + bool opndExprVerf = Opnd(0)->Verify(); + bool opndTypeVerf = IntTypeVerify(Opnd(0)->GetPrimType()); + if (!opndTypeVerf) { + LogInfo::MapleLogger() << "\n#Error: the operand of rangegoto must be in [i32,u32,i64,u64]\n"; + } + return opndExprVerf && opndTypeVerf; +} + +bool BlockNode::Verify() const +{ + for (auto &stmt : GetStmtNodes()) { + if (!stmt.Verify()) { + return false; + } + } + return true; +} + +bool BlockNode::Verify(VerifyResult &verifyResult) const +{ + auto &nodes = GetStmtNodes(); + return !std::any_of(nodes.begin(), nodes.end(), [&verifyResult](auto &stmt) { return !stmt.Verify(verifyResult); }); +} + +bool DoloopNode::Verify() const +{ + bool startVerf = startExpr->Verify(); + bool contVerf = condExpr->Verify(); + bool incrVerf = incrExpr->Verify(); + bool doBodyVerf = true; + if (doBody) { + doBodyVerf = doBody->Verify(); + } + return startVerf && contVerf && incrVerf && doBodyVerf; +} + +bool IfStmtNode::Verify() const +{ + bool condVerf = Opnd()->Verify(); + bool thenVerf = true; + bool elseVerf = true; + if (thenPart != nullptr) { + thenVerf = thenPart->Verify(); + } + if (elsePart != nullptr) { + elseVerf = elsePart->Verify(); + } + return condVerf && thenVerf && elseVerf; +} + +bool WhileStmtNode::Verify() const +{ + bool condVerf = Opnd(0)->Verify(); + bool bodyVerf = true; + if (body != nullptr) { + bodyVerf = body->Verify(); + } + return condVerf && bodyVerf; +} + +bool NaryStmtNode::Verify() const +{ + return VerifyOpnds(); +} + +bool CallNode::Verify() const +{ + return VerifyOpnds(); +} + +bool IcallNode::Verify() const +{ + bool nOpndsVerf = true; + for (size_t i = 0; i < NumOpnds(); ++i) { + if (!GetNopndAt(i)->Verify()) { + nOpndsVerf = false; + break; + } + } + return nOpndsVerf; +} + +bool IntrinsiccallNode::Verify() const +{ + return VerifyOpnds(); +} + +std::string SafetyCallCheckStmtNode::GetFuncName() const +{ + return GlobalTables::GetStrTable().GetStringFromStrIdx(callFuncNameIdx); +} + +std::string SafetyCallCheckStmtNode::GetStmtFuncName() const +{ + return GlobalTables::GetStrTable().GetStringFromStrIdx(stmtFuncNameIdx); +} + +std::string SafetyCheckStmtNode::GetFuncName() const +{ + return GlobalTables::GetStrTable().GetStringFromStrIdx(funcNameIdx); +} + +bool UnaryNode::IsSameContent(const BaseNode *node) const +{ + auto *unaryNode = dynamic_cast(node); + if ((this == unaryNode) || (unaryNode != nullptr && (GetOpCode() == unaryNode->GetOpCode()) && + (GetPrimType() == unaryNode->GetPrimType()) && + (uOpnd && unaryNode->Opnd(0) && uOpnd->IsSameContent(unaryNode->Opnd(0))))) { + return true; + } else { + return false; + } +} + +bool TypeCvtNode::IsSameContent(const BaseNode *node) const +{ + auto *tyCvtNode = dynamic_cast(node); + if ((this == tyCvtNode) || + (tyCvtNode != nullptr && (fromPrimType == tyCvtNode->FromType()) && UnaryNode::IsSameContent(tyCvtNode))) { + return true; + } else { + return false; + } +} + +bool IreadNode::IsSameContent(const BaseNode *node) const +{ + auto *ireadNode = dynamic_cast(node); + if ((this == ireadNode) || (ireadNode != nullptr && (tyIdx == ireadNode->GetTyIdx()) && + (fieldID == ireadNode->GetFieldID()) && UnaryNode::IsSameContent(ireadNode))) { + return true; + } else { + return false; + } +} + +bool IreadoffNode::IsSameContent(const BaseNode *node) const +{ + auto *ireadoffNode = dynamic_cast(node); + if ((this == ireadoffNode) || (ireadoffNode != nullptr && (GetOffset() == ireadoffNode->GetOffset()) && + UnaryNode::IsSameContent(ireadoffNode))) { + return true; + } else { + return false; + } +} + +bool IreadFPoffNode::IsSameContent(const BaseNode *node) const +{ + auto *ireadFPoffNode = dynamic_cast(node); + if ((this == ireadFPoffNode) || + (ireadFPoffNode != nullptr && (GetOpCode() == ireadFPoffNode->GetOpCode()) && + (GetPrimType() == ireadFPoffNode->GetPrimType()) && (GetOffset() == ireadFPoffNode->GetOffset()))) { + return true; + } else { + return false; + } +} + +bool BinaryOpnds::IsSameContent(const BaseNode *node) const +{ + auto *binaryOpnds = dynamic_cast(node); + if ((this == binaryOpnds) || (binaryOpnds != nullptr && GetBOpnd(0)->IsSameContent(binaryOpnds->GetBOpnd(0)) && + GetBOpnd(1)->IsSameContent(binaryOpnds->GetBOpnd(1)))) { + return true; + } else { + return false; + } +} + +bool BinaryNode::IsSameContent(const BaseNode *node) const +{ + auto *binaryNode = dynamic_cast(node); + if ((this == binaryNode) || + (binaryNode != nullptr && (GetOpCode() == binaryNode->GetOpCode()) && + (GetPrimType() == binaryNode->GetPrimType()) && BinaryOpnds::IsSameContent(binaryNode))) { + return true; + } else { + return false; + } +} + +bool ConstvalNode::IsSameContent(const BaseNode *node) const +{ + auto *constvalNode = dynamic_cast(node); + if (this == constvalNode) { + return true; + } + if (constvalNode == nullptr) { + return false; + } + const MIRConst *mirConst = constvalNode->GetConstVal(); + if (constVal == mirConst) { + return true; + } + if (constVal->GetKind() != mirConst->GetKind()) { + return false; + } + if (constVal->GetKind() == kConstInt) { + // integer may differ in primtype, and they may be different MIRIntConst Node + return static_cast(constVal)->GetValue() == + static_cast(mirConst)->GetValue(); + } else { + return false; + } +} + +bool ConststrNode::IsSameContent(const BaseNode *node) const +{ + if (node->GetOpCode() != OP_conststr) { + return false; + } + auto *cstrNode = static_cast(node); + return strIdx == cstrNode->strIdx; +} + +bool Conststr16Node::IsSameContent(const BaseNode *node) const +{ + if (node->GetOpCode() != OP_conststr16) { + return false; + } + auto *cstr16Node = static_cast(node); + return strIdx == cstr16Node->strIdx; +} + +bool AddrofNode::IsSameContent(const BaseNode *node) const +{ + auto *addrofNode = dynamic_cast(node); + if ((this == addrofNode) || + (addrofNode != nullptr && (GetOpCode() == addrofNode->GetOpCode()) && + (GetPrimType() == addrofNode->GetPrimType()) && (GetNumOpnds() == addrofNode->GetNumOpnds()) && + (stIdx.FullIdx() == addrofNode->GetStIdx().FullIdx()) && (fieldID == addrofNode->GetFieldID()))) { + return true; + } else { + return false; + } +} + +bool DreadoffNode::IsSameContent(const BaseNode *node) const +{ + auto *dreaddoffNode = dynamic_cast(node); + if ((this == dreaddoffNode) || (dreaddoffNode != nullptr && (GetOpCode() == dreaddoffNode->GetOpCode()) && + (GetPrimType() == dreaddoffNode->GetPrimType()) && + (stIdx == dreaddoffNode->stIdx) && (offset == dreaddoffNode->offset))) { + return true; + } else { + return false; + } +} + +bool RegreadNode::IsSameContent(const BaseNode *node) const +{ + auto *regreadNode = dynamic_cast(node); + if ((this == regreadNode) || + (regreadNode != nullptr && (GetOpCode() == regreadNode->GetOpCode()) && + (GetPrimType() == regreadNode->GetPrimType()) && (regIdx == regreadNode->GetRegIdx()))) { + return true; + } else { + return false; + } +} + +bool AddroffuncNode::IsSameContent(const BaseNode *node) const +{ + auto *addroffuncNode = dynamic_cast(node); + if ((this == addroffuncNode) || + (addroffuncNode != nullptr && (GetOpCode() == addroffuncNode->GetOpCode()) && + (GetPrimType() == addroffuncNode->GetPrimType()) && (puIdx == addroffuncNode->GetPUIdx()))) { + return true; + } else { + return false; + } +} + +bool AddroflabelNode::IsSameContent(const BaseNode *node) const +{ + auto *addroflabelNode = dynamic_cast(node); + if ((this == addroflabelNode) || + (addroflabelNode != nullptr && (GetOpCode() == addroflabelNode->GetOpCode()) && + (GetPrimType() == addroflabelNode->GetPrimType()) && (offset == addroflabelNode->GetOffset()))) { + return true; + } else { + return false; + } +} +} // namespace maple diff --git a/ecmascript/compiler/codegen/maple/maple_ir/src/mir_parser.cpp b/ecmascript/compiler/codegen/maple/maple_ir/src/mir_parser.cpp new file mode 100755 index 0000000000000000000000000000000000000000..8ecd39672563020cc786c2004962ba1f63a246eb --- /dev/null +++ b/ecmascript/compiler/codegen/maple/maple_ir/src/mir_parser.cpp @@ -0,0 +1,3632 @@ +/* + * Copyright (c) 2023 Huawei Device Co., Ltd. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mir_parser.h" +#include "mir_function.h" +#include "opcode_info.h" + +namespace maple { +std::map MIRParser::funcPtrMapForParseExpr = + MIRParser::InitFuncPtrMapForParseExpr(); +std::map MIRParser::funcPtrMapForParseStmt = + MIRParser::InitFuncPtrMapForParseStmt(); +std::map MIRParser::funcPtrMapForParseStmtBlock = + MIRParser::InitFuncPtrMapForParseStmtBlock(); + +bool MIRParser::ParseStmtDassign(StmtNodePtr &stmt) +{ + if (lexer.GetTokenKind() != TK_dassign) { + Error("expect dassign but get "); + return false; + } + // parse %i + lexer.NextToken(); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol parsing ParseStmtDassign"); + return false; + } + if (stidx.IsGlobal()) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + DEBUG_ASSERT(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + auto *assignStmt = mod.CurFuncCodeMemPool()->New(); + assignStmt->SetStIdx(stidx); + TokenKind nextToken = lexer.NextToken(); + // parse field id + if (nextToken == TK_intconst) { // may be a field id + assignStmt->SetFieldID(lexer.GetTheIntVal()); + (void)lexer.NextToken(); + } + // parse expression like (constval i32 0) + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + assignStmt->SetRHS(expr); + stmt = assignStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtDassignoff(StmtNodePtr &stmt) +{ + if (lexer.GetTokenKind() != TK_dassignoff) { + Error("expect dassignoff but get "); + return false; + } + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect primitive type but get "); + return false; + } + PrimType primType = GetPrimitiveType(lexer.GetTokenKind()); + // parse %i + lexer.NextToken(); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol parsing ParseStmtDassign"); + return false; + } + if (stidx.IsGlobal()) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + DEBUG_ASSERT(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + DassignoffNode *assignStmt = mod.CurFuncCodeMemPool()->New(); + assignStmt->SetPrimType(primType); + assignStmt->stIdx = stidx; + TokenKind nextToken = lexer.NextToken(); + // parse offset + if (nextToken == TK_intconst) { + assignStmt->offset = static_cast(lexer.GetTheIntVal()); + (void)lexer.NextToken(); + } else { + Error("expect integer offset but get "); + return false; + } + // parse expression like (constval i32 0) + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + assignStmt->SetRHS(expr); + stmt = assignStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtRegassign(StmtNodePtr &stmt) +{ + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing binary operator but get "); + return false; + } + auto *regAssign = mod.CurFuncCodeMemPool()->New(); + regAssign->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + lexer.NextToken(); + if (lexer.GetTokenKind() == TK_specialreg) { + PregIdx tempPregIdx = regAssign->GetRegIdx(); + bool isSuccess = ParseSpecialReg(tempPregIdx); + regAssign->SetRegIdx(tempPregIdx); + if (!isSuccess) { + return false; + } + } else if (lexer.GetTokenKind() == TK_preg) { + PregIdx tempPregIdx = regAssign->GetRegIdx(); + bool isSuccess = ParsePseudoReg(regAssign->GetPrimType(), tempPregIdx); + regAssign->SetRegIdx(tempPregIdx); + if (!isSuccess) { + return false; + } + } else { + Error("expect special or pseudo register but get "); + return false; + } + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + regAssign->SetOpnd(expr, 0); + if (regAssign->GetRegIdx() > 0) { // check type consistenency for the preg + MIRPreg *preg = mod.CurFunction()->GetPregTab()->PregFromPregIdx(regAssign->GetRegIdx()); + if (preg->GetPrimType() == kPtyInvalid) { + preg->SetPrimType(expr->GetPrimType()); + } else if (preg->GetPrimType() == PTY_dynany) { + if (!IsPrimitiveDynType(expr->GetPrimType())) { + Error("inconsistent preg primitive dynamic type at "); + return false; + } + } + } + stmt = regAssign; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtIassign(StmtNodePtr &stmt) +{ + // iAssign <* [10] int> () + if (lexer.GetTokenKind() != TK_iassign) { + Error("expect iassign but get "); + return false; + } + // expect <> derived type + lexer.NextToken(); + TyIdx tyIdx(0); + if (!ParseDerivedType(tyIdx)) { + Error("ParseStmtIassign failed when parsing derived type"); + return false; + } + auto *iAssign = mod.CurFuncCodeMemPool()->New(); + iAssign->SetTyIdx(tyIdx); + if (lexer.GetTokenKind() == TK_intconst) { + iAssign->SetFieldID(lexer.theIntVal); + lexer.NextToken(); + } + BaseNode *addr = nullptr; + BaseNode *rhs = nullptr; + // parse 2 operands then, #1 is address, the other would be value + if (!ParseExprTwoOperand(addr, rhs)) { + return false; + } + iAssign->SetOpnd(addr, 0); + iAssign->SetRHS(rhs); + lexer.NextToken(); + stmt = iAssign; + return true; +} + +bool MIRParser::ParseStmtIassignoff(StmtNodePtr &stmt) +{ + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing binary operator but get "); + return false; + } + // iassign ( , ) + auto *iAssignOff = mod.CurFuncCodeMemPool()->New(); + iAssignOff->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + if (lexer.NextToken() != TK_intconst) { + Error("expect offset but get "); + return false; + } + iAssignOff->SetOffset(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *addr = nullptr; + BaseNode *rhs = nullptr; + if (!ParseExprTwoOperand(addr, rhs)) { + return false; + } + iAssignOff->SetBOpnd(addr, 0); + iAssignOff->SetBOpnd(rhs, 1); + lexer.NextToken(); + stmt = iAssignOff; + return true; +} + +bool MIRParser::ParseStmtIassignFPoff(StmtNodePtr &stmt) +{ + Opcode op = lexer.GetTokenKind() == TK_iassignfpoff ? OP_iassignfpoff : OP_iassignspoff; + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing binary operator but get "); + return false; + } + // iassignfpoff ( ) + auto *iAssignOff = mod.CurFuncCodeMemPool()->New(op); + iAssignOff->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + if (lexer.NextToken() != TK_intconst) { + Error("expect offset but get "); + return false; + } + iAssignOff->SetOffset(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + iAssignOff->SetOpnd(expr, 0); + lexer.NextToken(); + stmt = iAssignOff; + return true; +} + +bool MIRParser::ParseStmtBlkassignoff(StmtNodePtr &stmt) +{ + // blkassignoff (, ) + BlkassignoffNode *bassignoff = mod.CurFuncCodeMemPool()->New(); + if (lexer.NextToken() != TK_intconst) { + Error("expect offset but get "); + return false; + } + bassignoff->offset = static_cast(lexer.GetTheIntVal()); + if (lexer.NextToken() != TK_intconst) { + Error("expect align but get "); + return false; + } + bassignoff->SetAlign(static_cast(lexer.GetTheIntVal())); + if (lexer.NextToken() != TK_intconst) { + Error("expect size but get "); + return false; + } + bassignoff->blockSize = static_cast(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *destAddr = nullptr; + BaseNode *srcAddr = nullptr; + // parse 2 operands, the dest address followed by src address + if (!ParseExprTwoOperand(destAddr, srcAddr)) { + return false; + } + bassignoff->SetOpnd(destAddr, 0); + bassignoff->SetOpnd(srcAddr, 1); + lexer.NextToken(); + stmt = bassignoff; + return true; +} + +bool MIRParser::ParseStmtDoloop(StmtNodePtr &stmt) +{ + // syntax: doloop (, , ) { + // } + auto *doLoopNode = mod.CurFuncCodeMemPool()->New(); + stmt = doLoopNode; + lexer.NextToken(); + if (lexer.GetTokenKind() == TK_preg) { + uint32 pregNo = static_cast(lexer.GetTheIntVal()); + MIRFunction *mirFunc = mod.CurFunction(); + PregIdx pregIdx = mirFunc->GetPregTab()->EnterPregNo(pregNo, kPtyInvalid); + doLoopNode->SetIsPreg(true); + doLoopNode->SetDoVarStFullIdx(pregIdx); + // let other appearances handle the preg primitive type + } else { + StIdx stIdx; + if (!ParseDeclaredSt(stIdx)) { + return false; + } + if (stIdx.FullIdx() == 0) { + Error("expect a symbol parsing ParseStmtDoloop"); + return false; + } + if (stIdx.IsGlobal()) { + Error("expect local variable for doloop var but get "); + return false; + } + doLoopNode->SetDoVarStIdx(stIdx); + } + // parse ( + if (lexer.NextToken() != TK_lparen) { + Error("expect ( but get "); + return false; + } + // parse start expression + lexer.NextToken(); + BaseNode *start = nullptr; + if (!ParseExpression(start)) { + Error("ParseStmtDoloop when parsing start expression"); + return false; + } + if (doLoopNode->IsPreg()) { + auto regIdx = static_cast(doLoopNode->GetDoVarStIdx().FullIdx()); + MIRPreg *mpReg = mod.CurFunction()->GetPregTab()->PregFromPregIdx(regIdx); + if (mpReg->GetPrimType() == kPtyInvalid) { + CHECK_FATAL(start != nullptr, "null ptr check"); + mpReg->SetPrimType(start->GetPrimType()); + } + } + if (lexer.GetTokenKind() != TK_coma) { + Error("expect , after start expression but get "); + return false; + } + doLoopNode->SetStartExpr(start); + // parse end expression + lexer.NextToken(); + BaseNode *end = nullptr; + if (!ParseExpression(end)) { // here should be a compare expression + Error("ParseStmtDoloop when parsing end expression"); + return false; + } + if (lexer.GetTokenKind() != TK_coma) { + Error("expect , after condition expression but get "); + return false; + } + doLoopNode->SetContExpr(end); + // parse renew induction expression + lexer.NextToken(); + BaseNode *induction = nullptr; + if (!ParseExpression(induction)) { + Error("ParseStmtDoloop when parsing induction"); + return false; + } + // parse ) + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) parsing doloop but get "); + return false; + } + doLoopNode->SetIncrExpr(induction); + // parse body of the loop + lexer.NextToken(); + BlockNode *bodyStmt = nullptr; + if (!ParseStmtBlock(bodyStmt)) { + Error("ParseStmtDoloop when parsing body of the loop"); + return false; + } + doLoopNode->SetDoBody(bodyStmt); + return true; +} + +bool MIRParser::ParseStmtForeachelem(StmtNodePtr &stmt) +{ + // syntax: foreachelem { + // } + auto *forNode = mod.CurFuncCodeMemPool()->New(); + stmt = forNode; + lexer.NextToken(); // skip foreachelem token + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("error parsing element variable of foreachelem in "); + return false; + } + if (stidx.IsGlobal()) { + Error("illegal global scope for element variable for foreachelem in "); + return false; + } + forNode->SetElemStIdx(stidx); + lexer.NextToken(); + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("error parsing array/collection variable of foreachelem in "); + return false; + } + forNode->SetArrayStIdx(stidx); + lexer.NextToken(); + // parse body of the loop + BlockNode *bodyStmt = nullptr; + if (!ParseStmtBlock(bodyStmt)) { + Error("error when parsing body of foreachelem loop in "); + return false; + } + forNode->SetLoopBody(bodyStmt); + return true; +} + +bool MIRParser::ParseStmtIf(StmtNodePtr &stmt) +{ + if (lexer.GetTokenKind() != TK_if) { + Error("expect if but get "); + return false; + } + auto *ifStmt = mod.CurFuncCodeMemPool()->New(); + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + ifStmt->SetOpnd(expr, 0); + if (lexer.NextToken() != TK_lbrace) { + Error("expect { begin if body but get "); + return false; + } + BlockNode *thenBlock = nullptr; + if (!ParseStmtBlock(thenBlock)) { + Error("ParseStmtIf failed when parsing then block"); + return false; + } + ifStmt->SetThenPart(thenBlock); + BlockNode *elseBlock = nullptr; + if (lexer.GetTokenKind() == TK_else) { + // has else part + if (lexer.NextToken() != TK_lbrace) { + Error("expect { begin if body but get "); + return false; + } + if (!ParseStmtBlock(elseBlock)) { + Error("ParseStmtIf failed when parsing else block"); + return false; + } + ifStmt->SetElsePart(elseBlock); + } + stmt = ifStmt; + return true; +} + +bool MIRParser::ParseStmtWhile(StmtNodePtr &stmt) +{ + if (lexer.GetTokenKind() != TK_while) { + Error("expect while but get "); + return false; + } + auto *whileStmt = mod.CurFuncCodeMemPool()->New(OP_while); + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + whileStmt->SetOpnd(expr, 0); + if (lexer.NextToken() != TK_lbrace) { + Error("expect { begin if body but get "); + return false; + } + BlockNode *whileBody = nullptr; + if (!ParseStmtBlock(whileBody)) { + Error("ParseStmtWhile failed when parse while body"); + return false; + } + whileStmt->SetBody(whileBody); + stmt = whileStmt; + return true; +} + +bool MIRParser::ParseStmtDowhile(StmtNodePtr &stmt) +{ + if (lexer.GetTokenKind() != TK_dowhile) { + Error("expect while but get "); + return false; + } + auto *whileStmt = mod.CurFuncCodeMemPool()->New(OP_dowhile); + if (lexer.NextToken() != TK_lbrace) { + Error("expect { begin if body but get "); + return false; + } + BlockNode *doWhileBody = nullptr; + if (!ParseStmtBlock(doWhileBody)) { + Error("ParseStmtDowhile failed when trying to parsing do while body"); + return false; + } + whileStmt->SetBody(doWhileBody); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + whileStmt->SetOpnd(expr, 0); + lexer.NextToken(); + stmt = whileStmt; + return true; +} + +bool MIRParser::ParseStmtLabel(StmtNodePtr &stmt) +{ + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labIdx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labIdx == 0) { + labIdx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labIdx, strIdx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labIdx); + } else { + if (definedLabels.size() > labIdx && definedLabels[labIdx]) { + Error("label multiply declared "); + return false; + } + } + if (definedLabels.size() <= labIdx) { + definedLabels.resize(labIdx + 1); + } + definedLabels[labIdx] = true; + auto *labNode = mod.CurFuncCodeMemPool()->New(); + labNode->SetLabelIdx(labIdx); + stmt = labNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtGoto(StmtNodePtr &stmt) +{ + if (lexer.GetTokenKind() != TK_goto) { + Error("expect goto but get "); + return false; + } + if (lexer.NextToken() != TK_label) { + Error("expect label in goto but get "); + return false; + } + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labIdx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labIdx == 0) { + labIdx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labIdx, strIdx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labIdx); + } + auto *gotoNode = mod.CurFuncCodeMemPool()->New(OP_goto); + gotoNode->SetOffset(labIdx); + stmt = gotoNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBr(StmtNodePtr &stmt) +{ + TokenKind tk = lexer.GetTokenKind(); + if (tk != TK_brtrue && tk != TK_brfalse) { + Error("expect brtrue/brfalse but get "); + return false; + } + if (lexer.NextToken() != TK_label) { + Error("expect label in goto but get "); + return false; + } + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labIdx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labIdx == 0) { + labIdx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labIdx, strIdx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labIdx); + } + auto *condGoto = mod.CurFuncCodeMemPool()->New(tk == TK_brtrue ? OP_brtrue : OP_brfalse); + condGoto->SetOffset(labIdx); + lexer.NextToken(); + // parse () + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + condGoto->SetOpnd(expr, 0); + stmt = condGoto; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseSwitchCase(int64 &constVal, LabelIdx &lblIdx) +{ + // syntax : goto + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect intconst in switch but get "); + return false; + } + constVal = lexer.GetTheIntVal(); + if (lexer.NextToken() != TK_colon) { + Error("expect : in switch but get "); + return false; + } + if (lexer.NextToken() != TK_goto) { + Error("expect goto in switch case but get "); + return false; + } + if (lexer.NextToken() != TK_label) { + Error("expect label in switch but get "); + return false; + } + lblIdx = mod.CurFunction()->GetOrCreateLableIdxFromName(lexer.GetName()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtSwitch(StmtNodePtr &stmt) +{ + auto *switchNode = mod.CurFuncCodeMemPool()->New(mod); + stmt = switchNode; + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + switchNode->SetSwitchOpnd(expr); + if (!IsPrimitiveInteger(expr->GetPrimType())) { + Error("expect expression return integer but get "); + return false; + } + if (lexer.NextToken() == TK_label) { + switchNode->SetDefaultLabel(mod.CurFunction()->GetOrCreateLableIdxFromName(lexer.GetName())); + } else { + Error("expect label in switch but get "); + return false; + } + if (lexer.NextToken() != TK_lbrace) { + Error("expect { in switch but get "); + return false; + } + // : goto + // : goto + // ... + // : goto + TokenKind tk = lexer.NextToken(); + std::set casesSet; + while (tk != TK_rbrace) { + int64 constVal = 0; + LabelIdx lbl = 0; + if (!ParseSwitchCase(constVal, lbl)) { + Error("parse switch case failed "); + return false; + } + if (casesSet.find(constVal) != casesSet.end()) { + Error("duplicated switch case "); + return false; + } + switchNode->InsertCasePair(CasePair(constVal, lbl)); + (void)casesSet.insert(constVal); + tk = lexer.GetTokenKind(); + } + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtRangegoto(StmtNodePtr &stmt) +{ + auto *rangeGotoNode = mod.CurFuncCodeMemPool()->New(mod); + stmt = rangeGotoNode; + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + if (!IsPrimitiveInteger(expr->GetPrimType())) { + Error("expect expression return integer but get "); + return false; + } + rangeGotoNode->SetOpnd(expr, 0); + if (lexer.NextToken() == TK_intconst) { + rangeGotoNode->SetTagOffset(static_cast(lexer.GetTheIntVal())); + } else { + Error("expect tag offset in rangegoto but get "); + return false; + } + if (lexer.NextToken() != TK_lbrace) { + Error("expect { in switch but get "); + return false; + } + // : goto + // : goto + // ... + // : goto + TokenKind tk = lexer.NextToken(); + std::set casesSet; + int32 minIdx = UINT16_MAX; + int32 maxIdx = 0; + while (tk != TK_rbrace) { + int64 constVal = 0; + LabelIdx lbl = 0; + if (!ParseSwitchCase(constVal, lbl)) { + Error("parse switch case failed "); + return false; + } + if (constVal > UINT16_MAX || constVal < 0) { + Error("rangegoto case tag not within unsigned 16 bits range "); + return false; + } + if (casesSet.find(constVal) != casesSet.end()) { + Error("duplicated switch case "); + return false; + } + if (constVal < minIdx) { + minIdx = static_cast(constVal); + } + if (constVal > maxIdx) { + maxIdx = static_cast(constVal); + } + rangeGotoNode->AddRangeGoto(static_cast(constVal), static_cast(lbl)); + (void)casesSet.insert(constVal); + tk = lexer.GetTokenKind(); + } + DEBUG_ASSERT(rangeGotoNode->GetNumOpnds() == 1, "Rangegoto is a UnaryOpnd; numOpnds must be 1"); + // check there is no gap + if (static_cast(static_cast(maxIdx - minIdx) + 1) != casesSet.size()) { + Error("gap not allowed in rangegoto case tags "); + return false; + } + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtMultiway(StmtNodePtr &stmt) +{ + auto *multiwayNode = mod.CurFuncCodeMemPool()->New(mod); + stmt = multiwayNode; + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + multiwayNode->SetMultiWayOpnd(expr); + if (lexer.NextToken() == TK_label) { + multiwayNode->SetDefaultlabel(mod.CurFunction()->GetOrCreateLableIdxFromName(lexer.GetName())); + } else { + Error("expect label in multiway but get "); + return false; + } + if (lexer.NextToken() != TK_lbrace) { + Error("expect { in switch but get "); + return false; + } + // (): goto + // (): goto + // ... + // (): goto + TokenKind tk = lexer.NextToken(); + while (tk != TK_rbrace) { + BaseNode *x = nullptr; + if (!ParseExprOneOperand(x)) { + return false; + } + if (lexer.NextToken() != TK_colon) { + Error("expect : parsing multiway case tag specification but get "); + return false; + } + if (lexer.NextToken() != TK_goto) { + Error("expect goto in multiway case expression but get "); + return false; + } + if (lexer.NextToken() != TK_label) { + Error("expect goto label after multiway case expression but get "); + return false; + } + LabelIdx lblIdx = mod.CurFunction()->GetOrCreateLableIdxFromName(lexer.GetName()); + lexer.NextToken(); + multiwayNode->AppendElemToMultiWayTable(MCasePair(static_cast(x), lblIdx)); + tk = lexer.GetTokenKind(); + } + const MapleVector &multiWayTable = multiwayNode->GetMultiWayTable(); + multiwayNode->SetNumOpnds(multiWayTable.size()); + lexer.NextToken(); + return true; +} + +// used only when parsing mmpl +PUIdx MIRParser::EnterUndeclaredFunction(bool isMcount) +{ + std::string funcName; + if (isMcount) { + funcName = "_mcount"; + } else { + funcName = lexer.GetName(); + } + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + funcSt->SetNameStrIdx(strIdx); + (void)GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcSt); + funcSt->SetStorageClass(kScText); + funcSt->SetSKind(kStFunc); + auto *fn = mod.GetMemPool()->New(&mod, funcSt->GetStIdx()); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + funcSt->SetFunction(fn); + auto *funcType = mod.GetMemPool()->New(); + fn->SetMIRFuncType(funcType); + if (isMcount) { + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_void)); + fn->SetReturnTyIdx(retType->GetTypeIndex()); + } + return fn->GetPuidx(); +} + +bool MIRParser::ParseStmtCallMcount(StmtNodePtr &stmt) +{ + // syntax: call (, ..., ) + Opcode o = OP_call; + PUIdx pIdx = EnterUndeclaredFunction(true); + auto *callStmt = mod.CurFuncCodeMemPool()->New(mod, o); + callStmt->SetPUIdx(pIdx); + MapleVector opndsvec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + callStmt->SetNOpnd(opndsvec); + callStmt->SetNumOpnds(opndsvec.size()); + stmt = callStmt; + return true; +} + +bool MIRParser::ParseStmtCall(StmtNodePtr &stmt) +{ + // syntax: call (, ..., ) + TokenKind tk = lexer.GetTokenKind(); + Opcode o = GetOpFromToken(tk); + DEBUG_ASSERT(kOpcodeInfo.IsCall(o), "ParseStmtCall: not a call opcode"); + bool hasAssigned = kOpcodeInfo.IsCallAssigned(o); + bool hasInstant = false; + bool withType = false; + switch (tk) { + case TK_polymorphiccall: + case TK_polymorphiccallassigned: + withType = true; + break; + case TK_callinstant: + case TK_virtualcallinstant: + case TK_superclasscallinstant: + case TK_interfacecallinstant: + case TK_callinstantassigned: + case TK_virtualcallinstantassigned: + case TK_superclasscallinstantassigned: + case TK_interfacecallinstantassigned: + hasInstant = true; + break; + default: + break; + } + TyIdx polymophicTyidx(0); + if (o == OP_polymorphiccallassigned || o == OP_polymorphiccall) { + TokenKind nextTk = lexer.NextToken(); + if (nextTk == TK_langle) { + nextTk = lexer.NextToken(); + if (nextTk == TK_func) { + lexer.NextToken(); + if (!ParseFuncType(polymophicTyidx)) { + Error("error parsing functype in ParseStmtCall for polymorphiccallassigned at "); + return false; + } + } else { + Error("expect func in functype but get "); + return false; + } + } else { + Error("expect < in functype but get "); + return false; + } + } + TokenKind funcTk = lexer.NextToken(); + if (funcTk != TK_fname) { + Error("expect func name in call but get "); + return false; + } + PUIdx pIdx; + if (!ParseDeclaredFunc(pIdx)) { + if (mod.GetFlavor() < kMmpl) { + Error("expect .mmpl"); + return false; + } + pIdx = EnterUndeclaredFunction(); + } + lexer.NextToken(); + CallNode *callStmt = nullptr; + CallinstantNode *callInstantStmt = nullptr; + if (withType) { + callStmt = mod.CurFuncCodeMemPool()->New(mod, o); + callStmt->SetTyIdx(polymophicTyidx); + } else if (hasInstant) { + TokenKind langleTk = lexer.GetTokenKind(); + if (langleTk != TK_langle) { + Error("missing < in generic method instantiation at "); + return false; + } + TokenKind lbraceTk = lexer.NextToken(); + if (lbraceTk != TK_lbrace) { + Error("missing { in generic method instantiation at "); + return false; + } + MIRInstantVectorType instVecTy; + if (!ParseGenericInstantVector(instVecTy)) { + Error("error parsing generic method instantiation at "); + return false; + } + TokenKind rangleTk = lexer.GetTokenKind(); + if (rangleTk != TK_rangle) { + Error("missing > in generic method instantiation at "); + return false; + } + TyIdx tyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&instVecTy); + callInstantStmt = mod.CurFuncCodeMemPool()->New(mod, o, tyIdx); + callStmt = callInstantStmt; + lexer.NextToken(); // skip the > + } else { + callStmt = mod.CurFuncCodeMemPool()->New(mod, o); + } + callStmt->SetPUIdx(pIdx); + + MIRFunction *callee = GlobalTables::GetFunctionTable().GetFuncTable()[pIdx]; + callee->GetFuncSymbol()->SetAppearsInCode(true); + if (callee->GetName() == "setjmp") { + mod.CurFunction()->SetHasSetjmp(); + } + + MapleVector opndsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseExprNaryOperand(opndsVec)) { + return false; + } + callStmt->SetNOpnd(opndsVec); + callStmt->SetNumOpnds(opndsVec.size()); + if (hasAssigned) { + CallReturnVector retsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseCallReturns(retsVec)) { + return false; + } + if (!hasInstant) { + DEBUG_ASSERT(callStmt != nullptr, "callstmt is null in MIRParser::ParseStmtCall"); + callStmt->SetReturnVec(retsVec); + } else { + DEBUG_ASSERT(callInstantStmt != nullptr, "callinstantstmt is null in MIRParser::ParseStmtCall"); + callInstantStmt->SetReturnVec(retsVec); + } + } + lexer.NextToken(); + stmt = callStmt; + return true; +} + +bool MIRParser::ParseStmtIcall(StmtNodePtr &stmt, Opcode op) +{ + // syntax: icall (, , ..., ) + // icallassigned (, ..., ) { + // dassign + // dassign + // . . . + // dassign } + // icallproto (, , ..., ) + // icallprotoassigned (, , ..., ) { + // dassign + // dassign + // . . . + // dassign } + IcallNode *iCallStmt = mod.CurFuncCodeMemPool()->New(mod, op); + lexer.NextToken(); + if (op == OP_icallproto || op == OP_icallprotoassigned) { + TyIdx tyIdx(0); + if (!ParseDerivedType(tyIdx)) { + Error("error parsing type in ParseStmtIcall for icallproto at "); + return false; + } + iCallStmt->SetRetTyIdx(tyIdx); + } + MapleVector opndsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseExprNaryOperand(opndsVec)) { + return false; + } + iCallStmt->SetNOpnd(opndsVec); + iCallStmt->SetNumOpnds(opndsVec.size()); + if (op == OP_icallassigned || op == OP_icallprotoassigned) { + CallReturnVector retsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseCallReturns(retsVec)) { + return false; + } + iCallStmt->SetReturnVec(retsVec); + } + lexer.NextToken(); + stmt = iCallStmt; + return true; +} + +bool MIRParser::ParseStmtIcall(StmtNodePtr &stmt) +{ + return ParseStmtIcall(stmt, OP_icall); +} + +bool MIRParser::ParseStmtIcallassigned(StmtNodePtr &stmt) +{ + return ParseStmtIcall(stmt, OP_icallassigned); +} + +bool MIRParser::ParseStmtIcallproto(StmtNodePtr &stmt) +{ + return ParseStmtIcall(stmt, OP_icallproto); +} + +bool MIRParser::ParseStmtIcallprotoassigned(StmtNodePtr &stmt) +{ + return ParseStmtIcall(stmt, OP_icallprotoassigned); +} + +bool MIRParser::ParseStmtIntrinsiccall(StmtNodePtr &stmt, bool isAssigned) +{ + Opcode o = !isAssigned ? (lexer.GetTokenKind() == TK_intrinsiccall ? OP_intrinsiccall : OP_xintrinsiccall) + : (lexer.GetTokenKind() == TK_intrinsiccallassigned ? OP_intrinsiccallassigned + : OP_xintrinsiccallassigned); + auto *intrnCallNode = mod.CurFuncCodeMemPool()->New(mod, o); + lexer.NextToken(); + if (o == !isAssigned ? OP_intrinsiccall : OP_intrinsiccallassigned) { + intrnCallNode->SetIntrinsic(GetIntrinsicID(lexer.GetTokenKind())); + } else { + intrnCallNode->SetIntrinsic(static_cast(lexer.GetTheIntVal())); + } + lexer.NextToken(); + MapleVector opndsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseExprNaryOperand(opndsVec)) { + return false; + } + intrnCallNode->SetNOpnd(opndsVec); + intrnCallNode->SetNumOpnds(opndsVec.size()); + if (isAssigned) { + CallReturnVector retsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseCallReturns(retsVec)) { + return false; + } + // store return type of IntrinsiccallNode + if (retsVec.size() == 1 && retsVec[0].first.Idx() != 0) { + MIRSymbol *retSymbol = curFunc->GetSymTab()->GetSymbolFromStIdx(retsVec[0].first.Idx()); + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retSymbol->GetTyIdx()); + CHECK_FATAL(retType != nullptr, "rettype is null in MIRParser::ParseStmtIntrinsiccallAssigned"); + intrnCallNode->SetPrimType(retType->GetPrimType()); + } + intrnCallNode->SetReturnVec(retsVec); + } + stmt = intrnCallNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtIntrinsiccall(StmtNodePtr &stmt) +{ + return ParseStmtIntrinsiccall(stmt, false); +} + +bool MIRParser::ParseStmtIntrinsiccallassigned(StmtNodePtr &stmt) +{ + return ParseStmtIntrinsiccall(stmt, true); +} + +bool MIRParser::ParseStmtIntrinsiccallwithtype(StmtNodePtr &stmt, bool isAssigned) +{ + Opcode o = (!isAssigned) ? OP_intrinsiccallwithtype : OP_intrinsiccallwithtypeassigned; + IntrinsiccallNode *intrnCallNode = mod.CurFuncCodeMemPool()->New(mod, o); + TokenKind tk = lexer.NextToken(); + TyIdx tyIdx(0); + if (IsPrimitiveType(tk)) { + if (!ParsePrimType(tyIdx)) { + Error("expect primitive type in ParseStmtIntrinsiccallwithtype but get "); + return false; + } + } else if (!ParseDerivedType(tyIdx)) { + Error("error parsing type in ParseStmtIntrinsiccallwithtype at "); + return false; + } + intrnCallNode->SetTyIdx(tyIdx); + intrnCallNode->SetIntrinsic(GetIntrinsicID(lexer.GetTokenKind())); + lexer.NextToken(); + MapleVector opndsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseExprNaryOperand(opndsVec)) { + return false; + } + intrnCallNode->SetNOpnd(opndsVec); + intrnCallNode->SetNumOpnds(opndsVec.size()); + if (isAssigned) { + CallReturnVector retsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseCallReturns(retsVec)) { + return false; + } + // store return type of IntrinsiccallNode + if (retsVec.size() == 1 && retsVec[0].first.Idx() != 0) { + MIRSymbol *retSymbol = curFunc->GetSymTab()->GetSymbolFromStIdx(retsVec[0].first.Idx()); + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retSymbol->GetTyIdx()); + CHECK_FATAL(retType != nullptr, "rettype is null in MIRParser::ParseStmtIntrinsiccallwithtypeAssigned"); + intrnCallNode->SetPrimType(retType->GetPrimType()); + } + intrnCallNode->SetReturnVec(retsVec); + } + stmt = intrnCallNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtIntrinsiccallwithtype(StmtNodePtr &stmt) +{ + return ParseStmtIntrinsiccallwithtype(stmt, false); +} + +bool MIRParser::ParseStmtIntrinsiccallwithtypeassigned(StmtNodePtr &stmt) +{ + return ParseStmtIntrinsiccallwithtype(stmt, true); +} + +bool MIRParser::ParseCallReturnPair(CallReturnPair &retpair) +{ + bool isst = (lexer.GetTokenKind() == TK_dassign); + if (isst) { + // parse %i + lexer.NextToken(); + StIdx stidx; + // How to use islocal?? + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (lexer.GetTokenKind() == TK_lname) { + MIRSymbolTable *lSymTab = mod.CurFunction()->GetSymTab(); + MIRSymbol *lSym = lSymTab->GetSymbolFromStIdx(stidx.Idx(), 0); + DEBUG_ASSERT(lSym != nullptr, "lsym MIRSymbol is null"); + if (lSym->GetName().find("L_STR") == 0) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lSym->GetTyIdx()); + auto *ptrTy = static_cast(ty->CopyMIRTypeNode()); + DEBUG_ASSERT(ptrTy != nullptr, "null ptr check"); + ptrTy->SetPrimType(GetExactPtrPrimType()); + TyIdx newTyidx = GlobalTables::GetTypeTable().GetOrCreateMIRType(ptrTy); + delete ptrTy; + lSym->SetTyIdx(newTyidx); + } + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol parsing call return assignment but get"); + return false; + } + uint16 fieldId = 0; + TokenKind nextToken = lexer.NextToken(); + // parse field id + if (nextToken == TK_intconst) { + fieldId = lexer.GetTheIntVal(); + lexer.NextToken(); + } + RegFieldPair regFieldPair; + regFieldPair.SetFieldID(fieldId); + retpair = CallReturnPair(stidx, regFieldPair); + } else { + // parse type + lexer.NextToken(); + TyIdx tyidx(0); + // RegreadNode regreadexpr; + bool ret = ParsePrimType(tyidx); + if (ret != true) { + Error("call ParsePrimType failed in ParseCallReturns"); + return false; + } + if (tyidx == 0u) { + Error("expect primitive type but get "); + return false; + } + PrimType ptype = GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx); + PregIdx pregIdx; + if (lexer.GetTokenKind() == TK_specialreg) { + if (!ParseSpecialReg(pregIdx)) { + Error("expect specialreg parsing callassign CallReturnVector"); + return false; + } + } else if (lexer.GetTokenKind() == TK_preg) { + if (!ParsePseudoReg(ptype, pregIdx)) { + Error("expect pseudoreg parsing callassign CallReturnVector"); + return false; + } + } else { + Error("expect special or pseudo register but get "); + return false; + } + DEBUG_ASSERT(pregIdx > 0, "register number is zero"); + DEBUG_ASSERT(pregIdx <= 0xffff, "register number is over 16 bits"); + RegFieldPair regFieldPair; + regFieldPair.SetPregIdx(pregIdx); + retpair = CallReturnPair(StIdx(), regFieldPair); + } + return true; +} + +bool MIRParser::ParseCallReturns(CallReturnVector &retsvec) +{ + // { + // dassign + // dassign + // . . . + // dassign } + // OR + // { + // regassign + // regassign + // regassign + // } + if (lexer.NextToken() != TK_lbrace) { + Error("expect { parsing call return values. "); + return false; + } + TokenKind tk = lexer.NextToken(); + CallReturnPair retpair; + while (tk != TK_rbrace) { + if (lexer.GetTokenKind() != TK_dassign && lexer.GetTokenKind() != TK_regassign) { + Error("expect dassign/regassign but get "); + return false; + } + if (!ParseCallReturnPair(retpair)) { + Error("error parsing call returns. "); + return false; + } + retsvec.push_back(retpair); + tk = lexer.GetTokenKind(); + } + return true; +} + +bool MIRParser::ParseStmtAsm(StmtNodePtr &stmt) +{ + AsmNode *asmNode = mod.CurFuncCodeMemPool()->New(&mod.GetCurFuncCodeMPAllocator()); + mod.CurFunction()->SetHasAsm(); + lexer.NextToken(); + // parse qualifiers + while (lexer.GetTokenKind() == TK_volatile || lexer.GetTokenKind() == TK_inline || + lexer.GetTokenKind() == TK_goto) { + AsmQualifierKind qual; + switch (lexer.GetTokenKind()) { + case TK_volatile: { + qual = kASMvolatile; + break; + } + case TK_inline: { + qual = kASMinline; + break; + } + case TK_goto: + default: { + qual = kASMgoto; + break; + } + } + asmNode->SetQualifier(qual); + lexer.NextToken(); + } + // parse open brace + if (lexer.GetTokenKind() != TK_lbrace) { + Error("Open brace not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse asm string + if (lexer.GetTokenKind() != TK_string) { + Error("asm string not found parsing asm statement."); + return false; + } + asmNode->asmString = lexer.GetName(); + lexer.NextToken(); + // parse first colon + if (lexer.GetTokenKind() != TK_colon) { + Error("first colon not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse outputs + UStrIdx uStrIdx; + CallReturnPair retpair; + while (lexer.GetTokenKind() == TK_string) { + // parse an output constraint string + uStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + lexer.NextToken(); + if (!ParseCallReturnPair(retpair)) { + Error("error parsing call returns. "); + return false; + } + asmNode->outputConstraints.push_back(uStrIdx); + asmNode->asmOutputs.push_back(retpair); + if (lexer.GetTokenKind() == TK_coma) { + lexer.NextToken(); + } + } + // parse second colon + if (lexer.GetTokenKind() != TK_colon) { + Error("second colon not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse inputs + while (lexer.GetTokenKind() == TK_string) { + // parse an input constraint string + uStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + if (lexer.GetName()[0] == '+') { + asmNode->SetHasWriteInputs(); + } + if (lexer.NextToken() != TK_lparen) { + Error("expect ( but get "); + return false; + } + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExpression(expr)) { + Error("ParseExpression failed"); + return false; + } + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) but get "); + return false; + } + asmNode->inputConstraints.push_back(uStrIdx); + asmNode->GetNopnd().push_back(expr); + if (lexer.NextToken() == TK_coma) { + lexer.NextToken(); + } + } + asmNode->SetNumOpnds(static_cast(asmNode->GetNopndSize())); + // parse third colon + if (lexer.GetTokenKind() != TK_colon) { + Error("third colon not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse clobber list + while (lexer.GetTokenKind() == TK_string) { + // parse an input constraint string + uStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + asmNode->clobberList.push_back(uStrIdx); + if (lexer.NextToken() == TK_coma) { + lexer.NextToken(); + } + } + // parse fourth colon + if (lexer.GetTokenKind() != TK_colon) { + Error("fourth colon not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse labels + while (lexer.GetTokenKind() == TK_label) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labIdx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labIdx == 0) { + labIdx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labIdx, strIdx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labIdx); + } + asmNode->gotoLabels.push_back(labIdx); + if (lexer.NextToken() == TK_coma) { + lexer.NextToken(); + } + } + // parse closing brace + if (lexer.GetTokenKind() != TK_rbrace) { + Error("Closing brace not found parsing asm statement."); + return false; + } + stmt = asmNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtSafeRegion(StmtNodePtr &stmt) +{ + switch (lexer.GetTokenKind()) { + case TK_safe: + safeRegionFlag.push(true); + break; + case TK_unsafe: + safeRegionFlag.push(false); + break; + case TK_endsafe: + case TK_endunsafe: + safeRegionFlag.pop(); + break; + default: + Error("Only support safe/unsafe/endsafe/endunsafe."); + return false; + } + (void)stmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtJsTry(StmtNodePtr &stmt) +{ + auto *tryNode = mod.CurFuncCodeMemPool()->New(); + lexer.NextToken(); + // parse handler label + if (lexer.GetTokenKind() == TK_intconst && lexer.GetTheIntVal() == 0) { + tryNode->SetCatchOffset(0); + } else { + if (lexer.GetTokenKind() != TK_label) { + Error("expect handler label in try but get "); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labidx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(stridx); + if (labidx == 0) { + labidx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labidx, stridx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labidx); + } + tryNode->SetCatchOffset(labidx); + } + lexer.NextToken(); + // parse finally label + if (lexer.GetTokenKind() == TK_intconst && lexer.GetTheIntVal() == 0) { + tryNode->SetFinallyOffset(0); + } else { + if (lexer.GetTokenKind() != TK_label) { + Error("expect finally label in try but get "); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labidx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(stridx); + if (labidx == 0) { + labidx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labidx, stridx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labidx); + } + tryNode->SetFinallyOffset(labidx); + } + stmt = tryNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtTry(StmtNodePtr &stmt) +{ + auto *tryNode = mod.CurFuncCodeMemPool()->New(mod); + lexer.NextToken(); + DEBUG_ASSERT(lexer.GetTokenKind() == TK_lbrace, "expect left brace in try but get "); + lexer.NextToken(); + // parse handler label + while (lexer.GetTokenKind() != TK_rbrace) { + if (lexer.GetTokenKind() != TK_label) { + Error("expect handler label in try but get "); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labidx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(stridx); + if (labidx == 0) { + labidx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labidx, stridx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labidx); + } + tryNode->AddOffset(labidx); + lexer.NextToken(); + } + stmt = tryNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtCatch(StmtNodePtr &stmt) +{ + auto *catchNode = mod.CurFuncCodeMemPool()->New(mod); + lexer.NextToken(); + DEBUG_ASSERT(lexer.GetTokenKind() == TK_lbrace, "expect left brace in catch but get "); + lexer.NextToken(); + while (lexer.GetTokenKind() != TK_rbrace) { + TyIdx tyidx(0); + if (!ParseType(tyidx)) { + Error("expect type parsing java catch statement"); + return false; + } + catchNode->PushBack(tyidx); + } + catchNode->SetNumOpnds(0); + stmt = catchNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseUnaryStmt(Opcode op, StmtNodePtr &stmt) +{ + lexer.NextToken(); + auto *throwStmt = mod.CurFuncCodeMemPool()->New(op); + stmt = throwStmt; + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + throwStmt->SetOpnd(expr, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseUnaryStmtThrow(StmtNodePtr &stmt) +{ + return ParseUnaryStmt(OP_throw, stmt); +} + +bool MIRParser::ParseUnaryStmtDecRef(StmtNodePtr &stmt) +{ + return ParseUnaryStmt(OP_decref, stmt); +} + +bool MIRParser::ParseUnaryStmtIncRef(StmtNodePtr &stmt) +{ + return ParseUnaryStmt(OP_incref, stmt); +} + +bool MIRParser::ParseUnaryStmtDecRefReset(StmtNodePtr &stmt) +{ + return ParseUnaryStmt(OP_decrefreset, stmt); +} + +bool MIRParser::ParseUnaryStmtIGoto(StmtNodePtr &stmt) +{ + return ParseUnaryStmt(OP_igoto, stmt); +} + +bool MIRParser::ParseUnaryStmtEval(StmtNodePtr &stmt) +{ + return ParseUnaryStmt(OP_eval, stmt); +} + +bool MIRParser::ParseUnaryStmtFree(StmtNodePtr &stmt) +{ + return ParseUnaryStmt(OP_free, stmt); +} + +bool MIRParser::ParseUnaryStmtCallAssertNonNull(StmtNodePtr &stmt) +{ + std::string funcName; + std::string stmtFuncName; + int index = 0; + if (!ParseCallAssertInfo(funcName, &index, stmtFuncName)) { + Error("ParseCallAssertInfo failed"); + return false; + } + lexer.NextToken(); + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + GStrIdx stmtstridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(stmtFuncName); + stmt = mod.CurFuncCodeMemPool()->New(OP_callassertnonnull, stridx, index, stmtstridx); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + stmt->SetOpnd(expr, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseAssertInfo(std::string &funcName) +{ + if (lexer.NextToken() != TK_langle) { + Error("expect < parsing safey assert check "); + return false; + } + if (lexer.NextToken() != TK_fname) { + Error("expect &funcname parsing parsing safey assert check "); + return false; + } + funcName = lexer.GetName(); + if (lexer.NextToken() != TK_rangle) { + Error("expect > parsing safey assert check "); + return false; + } + return true; +} + +bool MIRParser::ParseUnaryStmtAssertNonNullCheck(Opcode op, StmtNodePtr &stmt) +{ + std::string funcName; + if (!ParseAssertInfo(funcName)) { + Error("ParseAssertInfo failed"); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + lexer.NextToken(); + stmt = mod.CurFuncCodeMemPool()->New(op, stridx); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + stmt->SetOpnd(expr, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseUnaryStmtAssertNonNull(StmtNodePtr &stmt) +{ + if (mod.IsCModule()) { + return ParseUnaryStmtAssertNonNullCheck(OP_assertnonnull, stmt); + } else { + return ParseUnaryStmt(OP_assertnonnull, stmt); + } +} + +bool MIRParser::ParseUnaryStmtAssignAssertNonNull(StmtNodePtr &stmt) +{ + return ParseUnaryStmtAssertNonNullCheck(OP_assignassertnonnull, stmt); +} + +bool MIRParser::ParseUnaryStmtReturnAssertNonNull(StmtNodePtr &stmt) +{ + return ParseUnaryStmtAssertNonNullCheck(OP_returnassertnonnull, stmt); +} + +bool MIRParser::ParseStmtMarker(StmtNodePtr &stmt) +{ + Opcode op; + switch (paramTokenKindForStmt) { + case TK_jscatch: + op = OP_jscatch; + break; + case TK_finally: + op = OP_finally; + break; + case TK_cleanuptry: + op = OP_cleanuptry; + break; + case TK_endtry: + op = OP_endtry; + break; + case TK_retsub: + op = OP_retsub; + break; + case TK_membaracquire: + op = OP_membaracquire; + break; + case TK_membarrelease: + op = OP_membarrelease; + break; + case TK_membarstoreload: + op = OP_membarstoreload; + break; + case TK_membarstorestore: + op = OP_membarstorestore; + break; + default: + return false; + } + auto *stmtNode = mod.CurFuncCodeMemPool()->New(op); + stmt = stmtNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtGosub(StmtNodePtr &stmt) +{ + if (lexer.NextToken() != TK_label) { + Error("expect finally label in gosub but get "); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labidx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(stridx); + if (labidx == 0) { + labidx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labidx, stridx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labidx); + } + auto *goSubNode = mod.CurFuncCodeMemPool()->New(OP_gosub); + goSubNode->SetOffset(labidx); + stmt = goSubNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseBinaryStmt(StmtNodePtr &stmt, Opcode op) +{ + auto *assStmt = mod.CurFuncCodeMemPool()->New(op); + lexer.NextToken(); + BaseNode *opnd0 = nullptr; + BaseNode *opnd1 = nullptr; + if (!ParseExprTwoOperand(opnd0, opnd1)) { + return false; + } + assStmt->SetBOpnd(opnd0, 0); + assStmt->SetBOpnd(opnd1, 1); + stmt = assStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseNaryStmtAssert(StmtNodePtr &stmt, Opcode op) +{ + std::string funcName; + if (!ParseAssertInfo(funcName)) { + Error("ParseAssertInfo failed"); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + auto *assStmt = mod.CurFuncCodeMemPool()->New(mod, op, stridx); + if (!ParseNaryExpr(*assStmt)) { + Error("ParseNaryStmtAssert failed"); + return false; + } + assStmt->SetNumOpnds(static_cast(assStmt->GetNopndSize())); + stmt = assStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseNaryStmtAssertGE(StmtNodePtr &stmt) +{ + return ParseNaryStmtAssert(stmt, OP_assertge); +} + +bool MIRParser::ParseNaryStmtAssertLT(StmtNodePtr &stmt) +{ + return ParseNaryStmtAssert(stmt, OP_assertlt); +} + +bool MIRParser::ParseNaryStmtReturnAssertLE(StmtNodePtr &stmt) +{ + return ParseNaryStmtAssert(stmt, OP_returnassertle); +} + +bool MIRParser::ParseNaryStmtAssignAssertLE(StmtNodePtr &stmt) +{ + return ParseNaryStmtAssert(stmt, OP_assignassertle); +} + +bool MIRParser::ParseNaryStmtCalcassertGE(StmtNodePtr &stmt) +{ + return ParseNaryStmtAssert(stmt, OP_calcassertge); +} + +bool MIRParser::ParseNaryStmtCalcassertLT(StmtNodePtr &stmt) +{ + return ParseNaryStmtAssert(stmt, OP_calcassertlt); +} + +bool MIRParser::ParseCallAssertInfo(std::string &funcName, int *paramIndex, std::string &stmtFuncName) +{ + if (lexer.NextToken() != TK_langle) { + Error("expect < parsing safey call check "); + return false; + } + if (lexer.NextToken() != TK_fname) { + Error("expect &funcname parsing parsing safey call check "); + return false; + } + funcName = lexer.GetName(); + if (lexer.NextToken() != TK_coma) { + Error("expect , parsing parsing safey call check "); + return false; + } + if (lexer.NextToken() != TK_intconst) { + Error("expect intconst parsing parsing safey call check "); + return false; + } + *paramIndex = static_cast(lexer.GetTheIntVal()); + if (lexer.NextToken() != TK_coma) { + Error("expect , parsing parsing safey call check "); + return false; + } + if (lexer.NextToken() != TK_fname) { + Error("expect &stmtfuncname parsing parsing safey call check "); + return false; + } + stmtFuncName = lexer.GetName(); + if (lexer.NextToken() != TK_rangle) { + Error("expect > parsing parsing safey call check "); + return false; + } + return true; +} + +bool MIRParser::ParseNaryStmtCallAssertLE(StmtNodePtr &stmt) +{ + std::string funcName; + std::string stmtFuncName; + int index = 0; + if (!ParseCallAssertInfo(funcName, &index, stmtFuncName)) { + Error("ParseCallAssertInfo failed"); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + GStrIdx stmtstridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(stmtFuncName); + auto *assStmt = + mod.CurFuncCodeMemPool()->New(mod, OP_callassertle, stridx, index, stmtstridx); + if (!ParseNaryExpr(*assStmt)) { + Error("ParseNaryExpr failed"); + return false; + } + assStmt->SetNumOpnds(static_cast(assStmt->GetNopndSize())); + stmt = assStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseNaryExpr(NaryStmtNode &stmtNode) +{ + if (lexer.NextToken() != TK_lparen) { + Error("expect ( parsing NaryExpr "); + return false; + } + (void)lexer.NextToken(); // skip TK_lparen + while (lexer.GetTokenKind() != TK_rparen) { + BaseNode *expr = nullptr; + if (!ParseExpression(expr)) { + Error("ParseStmtReturn failed"); + return false; + } + stmtNode.GetNopnd().push_back(expr); + if (lexer.GetTokenKind() != TK_coma && lexer.GetTokenKind() != TK_rparen) { + Error("expect , or ) parsing NaryStmt"); + return false; + } + if (lexer.GetTokenKind() == TK_coma) { + lexer.NextToken(); + } + } + return true; +} + +bool MIRParser::ParseNaryStmt(StmtNodePtr &stmt, Opcode op) +{ + auto *stmtReturn = mod.CurFuncCodeMemPool()->New(mod, op); + if (op == OP_syncenter) { // old code reconstruct later + if (lexer.NextToken() != TK_lparen) { + Error("expect return with ( but get "); + return false; + } + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExpression(expr)) { + Error("ParseStmtReturn failed"); + return false; + } + stmtReturn->GetNopnd().push_back(expr); + if (lexer.GetTokenKind() == TK_coma) { + lexer.NextToken(); + BaseNode *exprSync = nullptr; + if (!ParseExpression(exprSync)) { + Error("ParseStmtReturn failed"); + return false; + } + stmtReturn->GetNopnd().push_back(exprSync); + } else { + MIRType *intType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i32)); + // default 2 for __sync_enter_fast() + MIRIntConst *intConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(2, *intType); + ConstvalNode *exprConst = mod.GetMemPool()->New(); + exprConst->SetPrimType(PTY_i32); + exprConst->SetConstVal(intConst); + stmtReturn->GetNopnd().push_back(exprConst); + stmtReturn->SetNumOpnds(stmtReturn->GetNopndSize()); + } + + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) parsing NaryStmt"); + return false; + } + } else if (!ParseNaryExpr(*stmtReturn)) { + Error("ParseNaryExpr failed"); + return false; + } + stmtReturn->SetNumOpnds(stmtReturn->GetNopndSize()); + stmt = stmtReturn; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseNaryStmtReturn(StmtNodePtr &stmt) +{ + return ParseNaryStmt(stmt, OP_return); +} + +bool MIRParser::ParseNaryStmtSyncEnter(StmtNodePtr &stmt) +{ + return ParseNaryStmt(stmt, OP_syncenter); +} + +bool MIRParser::ParseNaryStmtSyncExit(StmtNodePtr &stmt) +{ + return ParseNaryStmt(stmt, OP_syncexit); +} + +bool MIRParser::ParseLoc() +{ + if (lexer.NextToken() != TK_intconst) { + Error("expect intconst in LOC but get "); + return false; + } + lastFileNum = lexer.GetTheIntVal(); + if (lexer.NextToken() != TK_intconst) { + Error("expect intconst in LOC but get "); + return false; + } + lastLineNum = lexer.GetTheIntVal(); + if (firstLineNum == 0) { + firstLineNum = lastLineNum; + } + if (lexer.NextToken() == TK_intconst) { // optional column number + lastColumnNum = static_cast(lexer.GetTheIntVal()); + lexer.NextToken(); + } + return true; +} + +bool MIRParser::ParseLocStmt(StmtNodePtr &) +{ + return ParseLoc(); +} + +bool MIRParser::ParseStatement(StmtNodePtr &stmt) +{ + paramTokenKindForStmt = lexer.GetTokenKind(); + uint32 mplNum = lexer.GetLineNum(); + uint32 lnum = lastLineNum; + uint32 fnum = lastFileNum; + uint16 cnum = lastColumnNum; + std::map::iterator itFuncPtr = funcPtrMapForParseStmt.find(paramTokenKindForStmt); + if (itFuncPtr != funcPtrMapForParseStmt.end()) { + if (!(this->*(itFuncPtr->second))(stmt)) { + return false; + } + } else { + return false; + } + if (stmt && stmt->GetSrcPos().MplLineNum() == 0) { + stmt->GetSrcPos().SetFileNum(fnum); + stmt->GetSrcPos().SetLineNum(lnum); + stmt->GetSrcPos().SetColumn(cnum); + stmt->GetSrcPos().SetMplLineNum(mplNum); + if (safeRegionFlag.top()) { + stmt->SetInSafeRegion(); + } + } + return true; +} + +/* parse the statements enclosed by { and } + */ +bool MIRParser::ParseStmtBlock(BlockNodePtr &blk) +{ + if (lexer.GetTokenKind() != TK_lbrace) { + Error("expect { for func body but get "); + return false; + } + blk = mod.CurFuncCodeMemPool()->New(); + MIRFunction *fn = mod.CurFunction(); + paramCurrFuncForParseStmtBlock = fn; + lexer.NextToken(); + // Insert _mcount for PI. + if (mod.GetWithProfileInfo()) { + StmtNode *stmtt = nullptr; + if (!ParseStmtCallMcount(stmtt)) { + return false; + } + blk->AddStatement(stmtt); + } + while (true) { + TokenKind stmtTk = lexer.GetTokenKind(); + // calculate the mpl file line number mplNum here to get accurate result + uint32 mplNum = lexer.GetLineNum(); + if (IsStatement(stmtTk)) { + ParseStmtBlockForSeenComment(blk, mplNum); + StmtNode *stmt = nullptr; + if (!ParseStatement(stmt)) { + Error("ParseStmtBlock failed when parsing a statement"); + return false; + } + if (stmt != nullptr) { // stmt is nullptr if it is a LOC + blk->AddStatement(stmt); + } + } else { + std::map::iterator itFuncPtr = funcPtrMapForParseStmtBlock.find(stmtTk); + if (itFuncPtr == funcPtrMapForParseStmtBlock.end()) { + if (stmtTk == TK_rbrace) { + ParseStmtBlockForSeenComment(blk, mplNum); + lexer.NextToken(); + return true; + } else { + Error("expect } or var or statement for func body but get "); + return false; + } + } else { + if (!(this->*(itFuncPtr->second))()) { + return false; + } + } + } + } +} + +void MIRParser::ParseStmtBlockForSeenComment(BlockNodePtr blk, uint32 mplNum) +{ + if (Options::noComment) { + lexer.seenComments.clear(); + return; + } + // collect accumulated comments into comment statement nodes + if (!lexer.seenComments.empty()) { + for (size_t i = 0; i < lexer.seenComments.size(); ++i) { + auto *cmnt = mod.CurFuncCodeMemPool()->New(mod); + cmnt->SetComment(lexer.seenComments[i]); + SetSrcPos(cmnt->GetSrcPos(), mplNum); + blk->AddStatement(cmnt); + } + lexer.seenComments.clear(); + } +} + +bool MIRParser::ParseStmtBlockForVar(TokenKind stmtTK) +{ + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + MIRSymbol *st = fn->GetSymTab()->CreateSymbol(kScopeLocal); + st->SetStorageClass(kScAuto); + st->SetSKind(kStVar); + SetSrcPos(st->GetSrcPosition(), lexer.GetLineNum()); + if (stmtTK == TK_tempvar) { + st->SetIsTmp(true); + } + if (!ParseDeclareVar(*st)) { + return false; + } + if (!fn->GetSymTab()->AddToStringSymbolMap(*st)) { + Error("duplicate declare symbol parse function "); + return false; + } + if (!ParseDeclareVarInitValue(*st)) { + return false; + } + return true; +} + +bool MIRParser::ParseStmtBlockForVar() +{ + return ParseStmtBlockForVar(TK_var); +} + +bool MIRParser::ParseStmtBlockForTempVar() +{ + return ParseStmtBlockForVar(TK_tempvar); +} + +bool MIRParser::ParseStmtBlockForReg() +{ + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_preg) { + Error("expect %%preg after reg"); + return false; + } + PregIdx pregIdx; + if (!ParsePseudoReg(PTY_ref, pregIdx)) { + return false; + } + MIRPreg *preg = mod.CurFunction()->GetPregTab()->PregFromPregIdx(pregIdx); + TyIdx tyidx(0); + if (!ParseType(tyidx)) { + Error("ParseDeclareVar failed when parsing the type"); + return false; + } + DEBUG_ASSERT(tyidx > 0, "parse declare var failed "); + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyidx); + preg->SetMIRType(mirType); + if (lexer.GetTokenKind() == TK_intconst) { + int64 theIntVal = lexer.GetTheIntVal(); + if (theIntVal != 0 && theIntVal != 1) { + Error("parseDeclareReg failed"); + return false; + } + preg->SetNeedRC(theIntVal == 0 ? false : true); + } else { + Error("parseDeclareReg failed"); + return false; + } + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForType() +{ + paramParseLocalType = true; + if (!ParseTypedef()) { + return false; + } + return true; +} + +bool MIRParser::ParseStmtBlockForFrameSize() +{ + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after frameSize but get "); + return false; + } + fn->SetFrameSize(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForUpformalSize() +{ + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after upFormalSize but get "); + return false; + } + fn->SetUpFormalSize(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForModuleID() +{ + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after moduleid but get "); + return false; + } + fn->SetModuleID(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForFuncSize() +{ + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after funcSize but get "); + return false; + } + fn->SetFuncSize(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForFuncID() +{ + // funcid is for debugging purpose + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after funcid but get "); + return false; + } + fn->SetPuidxOrigin(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForFormalWordsTypeTagged() +{ + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + uint8 *addr = ParseWordsInfo(fn->GetUpFormalSize()); + if (addr == nullptr) { + Error("parser error for formalwordstypetagged"); + return false; + } + fn->SetFormalWordsTypeTagged(addr); + return true; +} + +bool MIRParser::ParseStmtBlockForLocalWordsTypeTagged() +{ + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + uint8 *addr = ParseWordsInfo(fn->GetFrameSize()); + if (addr == nullptr) { + Error("parser error for localWordsTypeTagged"); + return false; + } + fn->SetLocalWordsTypeTagged(addr); + return true; +} + +bool MIRParser::ParseStmtBlockForFormalWordsRefCounted() +{ + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + uint8 *addr = ParseWordsInfo(fn->GetUpFormalSize()); + if (addr == nullptr) { + Error("parser error for formalwordsrefcounted"); + return false; + } + fn->SetFormalWordsRefCounted(addr); + return true; +} + +bool MIRParser::ParseStmtBlockForLocalWordsRefCounted() +{ + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + uint8 *addr = ParseWordsInfo(fn->GetFrameSize()); + if (addr == nullptr) { + Error("parser error for localwordsrefcounted"); + return false; + } + fn->SetLocalWordsRefCounted(addr); + return true; +} + +bool MIRParser::ParseStmtBlockForFuncInfo() +{ + lexer.NextToken(); + if (!ParseFuncInfo()) { + return false; + } + return true; +} + +/* exprparser */ +static Opcode GetUnaryOp(TokenKind tk) +{ + switch (tk) { +#define UNARYOP(P) \ + case TK_##P: \ + return OP_##P; +#include "unary_op.def" +#undef UNARYOP + default: + return OP_undef; + } +} + +static Opcode GetBinaryOp(TokenKind tk) +{ + switch (tk) { +#define BINARYOP(P) \ + case TK_##P: \ + return OP_##P; +#include "binary_op.def" +#undef BINARYOP + default: + return OP_undef; + } +} + +static Opcode GetConvertOp(TokenKind tk) +{ + switch (tk) { + case TK_ceil: + return OP_ceil; + case TK_cvt: + return OP_cvt; + case TK_floor: + return OP_floor; + case TK_round: + return OP_round; + case TK_trunc: + return OP_trunc; + default: + return OP_undef; + } +} + +bool MIRParser::ParseExprOneOperand(BaseNodePtr &expr) +{ + if (lexer.GetTokenKind() != TK_lparen) { + Error("expect ( parsing operand parsing unary "); + return false; + } + lexer.NextToken(); + if (!ParseExpression(expr)) { + Error("expect expression as openrand of unary expression "); + return false; + } + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) parsing operand parsing unary "); + return false; + } + return true; +} + +bool MIRParser::ParseExprTwoOperand(BaseNodePtr &opnd0, BaseNodePtr &opnd1) +{ + if (lexer.GetTokenKind() != TK_lparen) { + Error("expect ( parsing operand parsing unary "); + return false; + } + lexer.NextToken(); + if (!ParseExpression(opnd0)) { + return false; + } + if (lexer.GetTokenKind() != TK_coma) { + Error("expect , between two operands but get "); + return false; + } + lexer.NextToken(); + if (!ParseExpression(opnd1)) { + return false; + } + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) parsing operand parsing unary "); + return false; + } + return true; +} + +bool MIRParser::ParseExprNaryOperand(MapleVector &opndVec) +{ + if (lexer.GetTokenKind() != TK_lparen) { + Error("expect ( parsing operand parsing nary operands "); + return false; + } + TokenKind tk = lexer.NextToken(); + while (tk != TK_rparen) { + BaseNode *opnd = nullptr; + if (!ParseExpression(opnd)) { + Error("expect expression parsing nary operands "); + return false; + } + opndVec.push_back(opnd); + tk = lexer.GetTokenKind(); + if (tk == TK_coma) { + tk = lexer.NextToken(); + } + } + return true; +} + +bool MIRParser::ParseDeclaredSt(StIdx &stidx) +{ + TokenKind varTk = lexer.GetTokenKind(); + stidx.SetFullIdx(0); + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + if (varTk == TK_gname) { + stidx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(stridx); + if (stidx.FullIdx() == 0) { + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(stridx); + st->SetSKind(kStVar); + (void)GlobalTables::GetGsymTable().AddToStringSymbolMap(*st); + stidx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(stridx); + return true; + } + } else if (varTk == TK_lname) { + stidx = mod.CurFunction()->GetSymTab()->GetStIdxFromStrIdx(stridx); + if (stidx.FullIdx() == 0) { + Error("local symbol not declared "); + return false; + } + } else { + Error("expect global/local name but get "); + return false; + } + return true; +} + +void MIRParser::CreateFuncMIRSymbol(PUIdx &puidx, GStrIdx strIdx) +{ + MIRSymbol *funcSt = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + funcSt->SetNameStrIdx(strIdx); + (void)GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcSt); + funcSt->SetStorageClass(kScText); + funcSt->SetSKind(kStFunc); + funcSt->SetNeedForwDecl(); + auto *fn = mod.GetMemPool()->New(&mod, funcSt->GetStIdx()); + puidx = static_cast(GlobalTables::GetFunctionTable().GetFuncTable().size()); + fn->SetPuidx(puidx); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + funcSt->SetFunction(fn); + if (options & kParseInlineFuncBody) { + funcSt->SetIsTmpUnused(true); + } +} + +bool MIRParser::ParseDeclaredFunc(PUIdx &puidx) +{ + GStrIdx stridx = GlobalTables::GetStrTable().GetStrIdxFromName(lexer.GetName()); + if (stridx == 0u) { + stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + } + StIdx stidx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(stridx); + if (stidx.FullIdx() == 0) { + CreateFuncMIRSymbol(puidx, stridx); + return true; + } + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + DEBUG_ASSERT(st != nullptr, "null ptr check"); + if (st->GetSKind() != kStFunc) { + Error("function name not declared as function"); + return false; + } + MIRFunction *func = st->GetFunction(); + puidx = func->GetPuidx(); + st->SetAppearsInCode(true); + return true; +} + +bool MIRParser::ParseExprDread(BaseNodePtr &expr) +{ + if (lexer.GetTokenKind() != TK_dread) { + Error("expect dread but get "); + return false; + } + AddrofNode *dexpr = mod.CurFuncCodeMemPool()->New(OP_dread); + expr = dexpr; + lexer.NextToken(); + TyIdx tyidx(0); + bool parseRet = ParsePrimType(tyidx); + if (tyidx == 0u || !parseRet) { + Error("expect primitive type but get "); + return false; + } + expr->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol ParseExprDread failed"); + return false; + } + dexpr->SetStIdx(stidx); + TokenKind endtk = lexer.NextToken(); + if (endtk == TK_intconst) { + dexpr->SetFieldID(lexer.GetTheIntVal()); + lexer.NextToken(); + } else if (!IsDelimitationTK(endtk)) { + Error("expect , or ) delimitation token but get "); + return false; + } else { + dexpr->SetFieldID(0); + } + if (!dexpr->CheckNode(mod)) { + Error("dread is not legal"); + return false; + } + return true; +} + +bool MIRParser::ParseExprDreadoff(BaseNodePtr &expr) +{ + if (lexer.GetTokenKind() != TK_dreadoff) { + Error("expect dreadoff but get "); + return false; + } + DreadoffNode *dexpr = mod.CurFuncCodeMemPool()->New(OP_dreadoff); + expr = dexpr; + lexer.NextToken(); + TyIdx tyidx(0); + bool parseRet = ParsePrimType(tyidx); + if (tyidx == 0u || !parseRet) { + Error("expect primitive type but get "); + return false; + } + expr->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol ParseExprDread failed"); + return false; + } + dexpr->stIdx = stidx; + TokenKind endtk = lexer.NextToken(); + if (endtk == TK_intconst) { + dexpr->offset = static_cast(lexer.GetTheIntVal()); + lexer.NextToken(); + } else { + Error("expect integer offset but get "); + return false; + } + return true; +} + +bool MIRParser::ParseExprRegread(BaseNodePtr &expr) +{ + auto *regRead = mod.CurFuncCodeMemPool()->New(); + expr = regRead; + lexer.NextToken(); + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + return false; + } + if (tyidx == 0u) { + Error("expect primitive type but get "); + return false; + } + expr->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + if (lexer.GetTokenKind() == TK_specialreg) { + PregIdx tempPregIdx = regRead->GetRegIdx(); + bool isSuccess = ParseSpecialReg(tempPregIdx); + regRead->SetRegIdx(tempPregIdx); + return isSuccess; + } + if (lexer.GetTokenKind() == TK_preg) { + PregIdx tempPregIdx = regRead->GetRegIdx(); + bool isSuccess = ParsePseudoReg(regRead->GetPrimType(), tempPregIdx); + regRead->SetRegIdx(tempPregIdx); + return isSuccess; + } + Error("expect special or pseudo register but get "); + return false; +} + +bool MIRParser::ParseExprConstval(BaseNodePtr &expr) +{ + auto *exprConst = mod.CurFuncCodeMemPool()->New(); + TokenKind typeTk = lexer.NextToken(); + if (!IsPrimitiveType(typeTk)) { + Error("expect type for GetConstVal but get "); + return false; + } + exprConst->SetPrimType(GetPrimitiveType(typeTk)); + lexer.NextToken(); + MIRConst *constVal = nullptr; + if (!ParseScalarValue(constVal, *GlobalTables::GetTypeTable().GetPrimType(exprConst->GetPrimType()))) { + Error("expect scalar type but get "); + return false; + } + exprConst->SetConstVal(constVal); + expr = exprConst; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprConststr(BaseNodePtr &expr) +{ + auto *strConst = mod.CurFuncCodeMemPool()->New(); + TokenKind tk = lexer.NextToken(); + if (!IsPrimitiveType(tk)) { + Error("expect primitive type for conststr but get "); + return false; + } + strConst->SetPrimType(GetPrimitiveType(tk)); + if (!IsAddress(strConst->GetPrimType())) { + Error("expect primitive type for conststr but get "); + return false; + } + tk = lexer.NextToken(); + if (tk != TK_string) { + Error("expect string literal for conststr but get "); + return false; + } + strConst->SetStrIdx(GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(lexer.GetName())); + expr = strConst; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprConststr16(BaseNodePtr &expr) +{ + auto *str16Const = mod.CurFuncCodeMemPool()->New(); + TokenKind tk = lexer.NextToken(); + if (!IsPrimitiveType(tk)) { + Error("expect primitive type for conststr16 but get "); + return false; + } + str16Const->SetPrimType(GetPrimitiveType(tk)); + if (!IsAddress(str16Const->GetPrimType())) { + Error("expect primitive type for conststr16 but get "); + return false; + } + tk = lexer.NextToken(); + if (tk != TK_string) { + Error("expect string literal for conststr16 but get "); + return false; + } + // UTF-16 strings in mpl files are presented as UTF-8 strings + // to keep the printable chars in ascii form + // so we need to do a UTF8ToUTF16 conversion + std::string str = lexer.GetName(); + std::u16string str16; + (void)namemangler::UTF8ToUTF16(str16, str); + str16Const->SetStrIdx(GlobalTables::GetU16StrTable().GetOrCreateStrIdxFromName(str16)); + expr = str16Const; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprSizeoftype(BaseNodePtr &expr) +{ + auto *exprSizeOfType = mod.CurFuncCodeMemPool()->New(); + TokenKind typeTk = lexer.NextToken(); + if (!IsPrimitiveType(typeTk)) { + Error("expect type for GetConstVal but get "); + return false; + } + exprSizeOfType->SetPrimType(GetPrimitiveType(typeTk)); + lexer.NextToken(); + TyIdx tyidx(0); + if (!ParseType(tyidx)) { + Error("expect type parsing array but get "); + return false; + } + exprSizeOfType->SetTyIdx(tyidx); + expr = exprSizeOfType; + return true; +} + +bool MIRParser::ParseExprFieldsDist(BaseNodePtr &expr) +{ + TokenKind typeTk = lexer.NextToken(); + if (!IsPrimitiveType(typeTk)) { + Error("expect type for GetConstVal but get "); + return false; + } + auto *node = mod.CurFuncCodeMemPool()->New(); + node->SetPrimType(GetPrimitiveType(typeTk)); + lexer.NextToken(); + TyIdx tyIdx(0); + if (!ParseType(tyIdx)) { + Error("expect type parsing array but get "); + return false; + } + node->SetTyIdx(tyIdx); + TokenKind tk = lexer.GetTokenKind(); + if (tk != TK_intconst) { + Error("expect type int but get"); + return false; + } + node->SetFiledID1(lexer.GetTheIntVal()); + tk = lexer.NextToken(); + if (tk != TK_intconst) { + Error("expect type int but get"); + return false; + } + node->SetFiledID2(lexer.GetTheIntVal()); + lexer.NextToken(); + expr = node; + return true; +} + +bool MIRParser::ParseExprBinary(BaseNodePtr &expr) +{ + Opcode opcode = GetBinaryOp(lexer.GetTokenKind()); + if (opcode == OP_undef) { + Error("expect add operator but get "); + return false; + } + auto *addExpr = mod.CurFuncCodeMemPool()->New(opcode); + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing binary operator but get "); + return false; + } + addExpr->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + lexer.NextToken(); + BaseNode *opnd0 = nullptr; + BaseNode *opnd1 = nullptr; + if (!ParseExprTwoOperand(opnd0, opnd1)) { + return false; + } + addExpr->SetBOpnd(opnd0, 0); + addExpr->SetBOpnd(opnd1, 1); + expr = addExpr; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprCompare(BaseNodePtr &expr) +{ + Opcode opcode = GetBinaryOp(lexer.GetTokenKind()); + auto *addExpr = mod.CurFuncCodeMemPool()->New(opcode); + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing compare operator but get "); + return false; + } + addExpr->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect operand type parsing compare operator but get "); + return false; + } + addExpr->SetOpndType(GetPrimitiveType(lexer.GetTokenKind())); + lexer.NextToken(); + BaseNode *opnd0 = nullptr; + BaseNode *opnd1 = nullptr; + if (!ParseExprTwoOperand(opnd0, opnd1)) { + return false; + } + addExpr->SetBOpnd(opnd0, 0); + addExpr->SetBOpnd(opnd1, 1); + expr = addExpr; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprDepositbits(BaseNodePtr &expr) +{ + // syntax: depositbits (, ) + if (lexer.GetTokenKind() != TK_depositbits) { + Error("expect depositbits but get "); + return false; + } + auto *dpsbNode = mod.CurFuncCodeMemPool()->New(); + expr = dpsbNode; + PrimType ptyp = GetPrimitiveType(lexer.NextToken()); + if (!IsPrimitiveInteger(ptyp)) { + Error("expect but get "); + return false; + } + dpsbNode->SetPrimType(ptyp); + if (lexer.NextToken() != TK_intconst) { + Error("expect bOffset but get "); + return false; + } + dpsbNode->SetBitsOffset(lexer.GetTheIntVal()); + if (lexer.NextToken() != TK_intconst) { + Error("expect bSize but get "); + return false; + } + dpsbNode->SetBitsSize(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *opnd0 = nullptr; + BaseNode *opnd1 = nullptr; + if (!ParseExprTwoOperand(opnd0, opnd1)) { + Error("ParseExprDepositbits when parsing two operand"); + return false; + } + dpsbNode->SetBOpnd(opnd0, 0); + dpsbNode->SetBOpnd(opnd1, 1); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprIreadIaddrof(IreadNode &expr) +{ + // syntax : iread/iaddrof () + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect primitive type but get "); + return false; + } + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + return false; + } + expr.SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + tyidx = TyIdx(0); + if (!ParseDerivedType(tyidx)) { + Error("ParseExprIreadIaddrof failed when paring derived type"); + return false; + } + expr.SetTyIdx(tyidx); + if (lexer.GetTokenKind() == TK_intconst) { + expr.SetFieldID(lexer.theIntVal); + lexer.NextToken(); + } + BaseNode *opnd0 = nullptr; + if (!ParseExprOneOperand(opnd0)) { + return false; + } + expr.SetOpnd(opnd0, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprIread(BaseNodePtr &expr) +{ + // syntax : iread () + auto *iExpr = mod.CurFuncCodeMemPool()->New(OP_iread); + if (!ParseExprIreadIaddrof(*iExpr)) { + Error("ParseExprIread failed when trying to parse addof"); + return false; + } + expr = iExpr; + return true; +} + +bool MIRParser::ParseExprIaddrof(BaseNodePtr &expr) +{ + // syntax : iaddrof () + auto *iExpr = mod.CurFuncCodeMemPool()->New(OP_iaddrof); + if (!ParseExprIreadIaddrof(*iExpr)) { + Error("ParseExprIaddrof failed when trying to parse addof"); + return false; + } + expr = iExpr; + return true; +} + +bool MIRParser::ParseExprIreadoff(BaseNodePtr &expr) +{ + // syntax : iread () + auto *iReadOff = mod.CurFuncCodeMemPool()->New(); + expr = iReadOff; + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect primitive type but get "); + return false; + } + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + return false; + } + iReadOff->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + if (!IsPrimitiveScalar(iReadOff->GetPrimType())) { + Error("only scalar types allowed for ireadoff"); + return false; + } + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect offset but get "); + return false; + } + iReadOff->SetOffset(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *opnd = nullptr; + if (!ParseExprOneOperand(opnd)) { + Error("ParseExprIreadoff when paring one operand"); + return false; + } + iReadOff->SetOpnd(opnd, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprIreadFPoff(BaseNodePtr &expr) +{ + // syntax : iread + auto *iReadOff = mod.CurFuncCodeMemPool()->New(); + expr = iReadOff; + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect primitive type but get "); + return false; + } + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + return false; + } + iReadOff->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect offset but get "); + return false; + } + iReadOff->SetOffset(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprAddrof(BaseNodePtr &expr) +{ + // syntax: addrof + auto *addrofNode = mod.CurFuncCodeMemPool()->New(OP_addrof); + expr = addrofNode; + if (lexer.GetTokenKind() != TK_addrof) { + Error("expect addrof but get "); + return false; + } + lexer.NextToken(); + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + Error("expect primitive type but get "); + return false; + } + addrofNode->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect symbol ParseExprAddroffunc"); + return false; + } + if (stidx.IsGlobal()) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + DEBUG_ASSERT(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + addrofNode->SetStIdx(stidx); + TokenKind tk = lexer.NextToken(); + if (IsDelimitationTK(tk)) { + addrofNode->SetFieldID(0); + } else if (tk == TK_intconst) { + addrofNode->SetFieldID(lexer.GetTheIntVal()); + lexer.NextToken(); + } else { + addrofNode->SetFieldID(0); + } + return true; +} + +bool MIRParser::ParseExprAddrofoff(BaseNodePtr &expr) +{ + // syntax: addrofoff + AddrofoffNode *addrofoffNode = mod.CurFuncCodeMemPool()->New(OP_addrofoff); + expr = addrofoffNode; + if (lexer.GetTokenKind() != TK_addrofoff) { + Error("expect addrofoff but get "); + return false; + } + lexer.NextToken(); + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + Error("expect primitive type but get "); + return false; + } + addrofoffNode->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect symbol ParseExprAddroffunc"); + return false; + } + if (stidx.IsGlobal()) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + DEBUG_ASSERT(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + addrofoffNode->stIdx = stidx; + TokenKind tk = lexer.NextToken(); + if (tk == TK_intconst) { + addrofoffNode->offset = static_cast(lexer.GetTheIntVal()); + lexer.NextToken(); + } else { + Error("expect integer offset but get "); + return false; + } + return true; +} + +bool MIRParser::ParseExprAddroffunc(BaseNodePtr &expr) +{ + auto *addrOfFuncNode = mod.CurFuncCodeMemPool()->New(); + expr = addrOfFuncNode; + TokenKind tk = lexer.NextToken(); + if (tk != TK_a32 && tk != TK_a64 && tk != TK_ptr) { + Error("expect address primitive type but get "); + return false; + } + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + Error("ParseExprAddroffunc failed when parsing primitive type"); + return false; + } + addrOfFuncNode->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + if (lexer.GetTokenKind() != TK_fname) { + Error("expect function name but get "); + return false; + } + PUIdx pidx; + if (!ParseDeclaredFunc(pidx)) { + if (mod.GetFlavor() < kMmpl) { + Error("expect .mmpl file"); + return false; + } + pidx = EnterUndeclaredFunction(); + } + addrOfFuncNode->SetPUIdx(pidx); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprAddroflabel(BaseNodePtr &expr) +{ + // syntax: addroflabel